diff --git "a/6405.jsonl" "b/6405.jsonl" new file mode 100644--- /dev/null +++ "b/6405.jsonl" @@ -0,0 +1,693 @@ +{"seq_id":"25576405","text":"import boto3, json\nimport json\n\nstack_name = 'AmaNerdBookReview'\ntemplate_file_location = \"./ec2_script/cloudformation.json\"\n\nwith open(template_file_location, 'r') as content_file:\n content = json.load(content_file)\n\ncontent = json.dumps(content)\n\ncloud_formation_client = boto3.client('cloudformation')\n# print(\"Creating {}\".format(stack_name))\nresponse = cloud_formation_client.create_stack(\n StackName=stack_name,\n TemplateBody=content\n)\n\nprint(response)","sub_path":"scripts/ec2_script/createEC2.py","file_name":"createEC2.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"637707389","text":"#!/usr/bin/env python\n\n\"\"\"Changes spacing from single to double space for a given file.\nOutputs the new file as filename_double_spaced.txt or\nfilename_single_spaced.txt as needed.\n\nSysargs: filename\n\"\"\"\nimport sys\nimport re\nimport os\n\n\nclass Spacing(object):\n def __init__(self, filename):\n self.filename = filename\n self.basename = os.path.basename(filename) # only the base part of the filename\n\n def open_file(self):\n \"\"\"Opens a file object to be used across the class\"\"\"\n with open(self.filename, 'r') as f:\n file_contents = f.read()\n return file_contents\n\n def single_to_double(self):\n \"\"\"Converts from single to double space after a period\"\"\"\n try:\n file_contents = self.open_file()\n with open('textedit/texts/double_space_%s' % self.basename, 'w') as output:\n output.write(file_contents.replace(r'\\.\\s{1}', r'\\.\\s{2}'))\n except AssertionError:\n print(\"Single space not found\")\n\n def double_to_single(self):\n \"\"\"Converts from double to single space after a period\"\"\"\n try:\n file_contents = self.open_file()\n with open('textedit/texts/single_space_%s' % self.basename, 'w') as output:\n output.write(file_contents.replace(r'\\.\\s{2}', r'\\.\\s{1}'))\n except AssertionError:\n print(\"Double space not found\")\n\n def spacing_check(self):\n \"\"\"simple check to see if file is single or double spaced and refereces it to the opposite\"\"\"\n if len(re.findall(r'\\.\\s{2}', self.filename)) > 1:\n self.double_to_single()\n else:\n self.single_to_double()\n\n\nif __name__ == '__main__':\n filename = sys.argv[1]\n sp = Spacing(filename)\n Spacing.spacing_check(sp)\n","sub_path":"learn_pypkg/learn_pypkg/edit/spacing.py","file_name":"spacing.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"265226244","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nfrom collections import OrderedDict\nimport numpy as np\nfrom torch.autograd import Variable\nclass BoxCar(nn.Module):\n \n def __init__(self, ch=3, dim1=299, dim2=299, k=100000, use_gpu=True):\n super(BoxCar, self).__init__()\n f1 = torch.from_numpy(np.arange(dim1)).view(1, 1, 1, dim1, -1)\n f2 = torch.from_numpy(np.arange(dim2)).view(1, 1, 1, 1, -1)\n z1 = torch.zeros(ch*dim1).long().view(1, 1, ch, dim1, -1)\n z2 = torch.zeros(ch*dim2).long().view(1, 1, ch, 1, -1)\n\n if use_gpu:\n f1 = Variable(f1.cuda())\n f2 = Variable(f2.cuda())\n z1 = Variable(z1.cuda())\n z2 = Variable(z2.cuda())\n else:\n f1 = Variable(f1)\n f2 = Variable(f2)\n z1 = Variable(z1)\n z2 = Variable(z2)\n\n f1.requires_grad = False\n f2.requires_grad = False\n z1.requires_grad = False\n z2.requires_grad = False\n\n\n self.f1 = f1 + z1\n self.f2 = f2 + z2\n self.k = k\n self.ch = ch\n self.dim1 = dim1\n self.dim2 = dim2\n\n def img_set_zero(x, f):\n \"\"\"\n \n \"\"\"\n def logistic(self, x):\n return 1.0/(1 + torch.exp(-self.k * x))\n \n def forward(self, x, m):\n '''\n x - s x ch x dim1 x dim2\n m - s x g x 4\n returns - s x g x ch x dim1 x dim2\n '''\n \n s = x.size(0)\n g = m.size(1)\n M = []\n for i in range(g):\n m1 = self.logistic((self.f1 - m[:, i, 0].contiguous().view(s, 1, 1, 1, -1)).float())\n m2 = self.logistic((self.f1 - m[:, i, 2].contiguous().view(s, 1, 1, 1, -1)).float())\n m3 = self.logistic((self.f2 - m[:, i, 1].contiguous().view(s, 1, 1, 1, -1)).float())\n m4 = self.logistic((self.f2 - m[:, i, 3].contiguous().view(s, 1, 1, 1, -1)).float())\n v = (m1-m2)*(m3-m4)\n M.append(v)\n\n M = torch.cat(M, 1)\n return x.view(s, 1, self.ch, self.dim1, -1) * M\n\n\nclass Upsampler(nn.Module):\n def __init__(self, set_zero=False, target_dim=(299,299), mode='bilinear'):\n\n super(Upsampler, self).__init__()\n self.h = target_dim[0]\n self.w = target_dim[1]\n self.set_zero = set_zero\n\n self.upsampler = torch.nn.Upsample(size=target_dim, mode=mode)\n \n def img_set_zero(self, x, tl_x, tl_y, br_x, br_y):\n \"\"\"\n x: (3, 299, 299)\n \"\"\"\n \n if tl_x > 0:\n x[:, :tl_x, :] = 0\n x[:, br_x:, :] = 0\n if tl_y > 0:\n x[:, :, :tl_y] = 0\n x[:, :, br_y:] = 0\n \n return x\n\n def img_crop(self, x, tl_x, tl_y, br_x, br_y, border_width=3, target_size=(299,299)):\n \"\"\"\n Takes tensor of dimension x: (3, 299, 299) and\n f: (s, 4) containing tl_x, tl_y, br_x, br_y in that\n order. Returns upsampled crops\n \"\"\"\n # note that the following step is not \n # a part of the network, taking values\n # out of the tensor here\n \n tl_x, tl_y, br_x, br_y = int(tl_x.data[0]), int(tl_y.data[0]), int(br_x.data[0]), int(br_y.data[0])\n if self.set_zero:\n x = self.img_set_zero(x, tl_x, tl_y, br_x, br_y)\n\n # Add border to cropping to preserve gradient on boundaries\n tl_x = max(tl_x - border_width, 0)\n tl_y = max(tl_y - border_width, 0)\n br_x = min(br_x + border_width, x.size(1)-1)\n br_y = min(br_y + border_width, x.size(2)-1)\n \n cropped = x[:,tl_x:br_x,tl_y:br_y].contiguous()\n cropped = cropped.view(1, 3, cropped.size(1), cropped.size(2))\n upped = self.upsampler(cropped).view(3, 299, 299)\n return upped\n\n def img_crops(self, x, f):\n \"\"\"\n x: (3, 299, 299)\n f: (g, 4) tl_x, tl_y, br_x, br_y\n returns cropped and upsampled same as x.size\n \"\"\"\n out = []\n for i in range(f.size(0)):\n out.append(self.img_crop(x.clone(), f[i][0], f[i][1], f[i][2], f[i][3]))\n out = torch.stack(out, 0)\n return out\n\n def imgs_crops(self, x, f):\n \"\"\"\n x: (s, g, 3, 299, 299)\n f: (s, g, 4) tl_x, tl_y, br_x, br_y\n returns cropped and upsampled same as x.size\n \"\"\"\n out = []\n for i,x_i in enumerate(torch.unbind(x)):\n out.append(self.img_crops(x_i, f[i]))\n out = torch.stack(out, 0)\n return out\n\n def forward(self, x, f):\n return self.imgs_crops(x, f)\n\n","sub_path":"densenet_model/utils_slow_bc.py","file_name":"utils_slow_bc.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"272486348","text":"\n# Import data\nimport pandas as pd\nimport pickle\n\nwith open('pickle/processed_df.p', mode='rb') as picklefile:\n df = pickle.load(picklefile)\n\n# Feature set\nfeatures = df[df[\"test\"] == 0].iloc[:,0:93]\nlabels = df[df[\"test\"] == 0].iloc[:,94]\n\n## Models\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\n\n# Random forest\nrf_model = RandomForestClassifier(n_estimators=10, criterion='gini', \n max_depth=None, min_samples_split=2, \n min_samples_leaf=1, max_features='auto', \n max_leaf_nodes=None, bootstrap=True, \n oob_score=False, n_jobs=1, \n random_state=None, verbose=0, \n min_density=None, compute_importances=None)\n\n#rf_model_fitted = rf_model.fit(X_train, y_train)\n\n# Gradient boosted trees\ngb_model = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, \n n_estimators=300, subsample=0.75, \n min_samples_split=20, min_samples_leaf=5, \n max_depth=6, init=None, random_state=None, \n max_features='auto', verbose=1, \n max_leaf_nodes=None, warm_start=False)\n\n#gb_model_fitted = gb_model.fit(X_train, y_train)\n\n# Model validation\nfrom sklearn import cross_validation\nimport numpy as np\n\n# # Random forest classifier\n# rf_score = cross_validation.cross_val_score(rf_model, features, labels, scoring='log_loss', cv=3)\n# print \"Random forest CV score\", np.mean(rf_score)\n\n# # Gradient boosted classifier\n# gb_score = cross_validation.cross_val_score(gb_model, features_samp, labels_samp, scoring='log_loss', cv=5)\n# print \"Gradient boosting CV score\", np.mean(gb_score)\n\n# Gradient boosted classifier - submission\ngb_fitted = gb_model.fit(features, labels)\n\n# Predict test values\nfeatures_test = df[df[\"test\"] == 1].iloc[:,0:93]\ngb_pred = gb_fitted.predict_proba(features_test)\n\n## Make submission\nimport csv\n\nfile_h = 'submission/submission.csv'\n\nwith open(file_h, 'wb') as csv_file:\n csv_w = csv.writer(csv_file)\n csv_w.writerow(['id', 'Class_1', 'Class_2', 'Class_3', \n 'Class_4', 'Class_5', 'Class_6',\n 'Class_7', 'Class_8', 'Class_9'])\n \n for i, pred_vals in enumerate(gb_pred):\n csv_w.writerow([i+1] + list(pred_vals))\n\n","sub_path":"Otto/rf_and_gb_classifiers.py","file_name":"rf_and_gb_classifiers.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"490110739","text":"from flask import Flask, abort, request\nimport json\n\nfrom files_commands import get_all_files, add_file, remove_file\n\napp = Flask(__name__)\napi_url = '/v1.0'\n\n@app.route(api_url+'/files',methods=['POST'])\ndef create_files():\n content = request.get_json(silent=True)\n filename = content['filename']\n content = content['content']\n grep_process2 = open(filename+'.txt','a')\n grep_process2.write(content+'\\n')\n grep_process2.close()\n return \"el archivo ha sido creado\",201\n\n\n\n\n\n@app.route(api_url+'/files',methods=['GET'])\ndef read_user():\n list = {}\n list[\"files\"] = get_all_files()\n return json.dumps(list), 200\n\n@app.route(api_url+'/users',methods=['PUT'])\ndef update_user():\n return \"not found\", 404\n\n@app.route(api_url+'/files',methods=['DELETE'])\ndef delete_user():\n error = False\n for filename in get_all_files():\n if not remove_file(filename):\n error = True\n\n if error:\n return 'some users were not deleted', 400\n else:\n return 'all users were deleted', 200\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=8080,debug='True')\n","sub_path":"ejercicios/punto1/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"176525126","text":"# male, female\r\n# 넘파이로 저장\r\n# fit_generator로 코딩\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Flatten\r\nnp.random.seed(26)\r\n\r\n\r\n# # 이미지에 대한 생성옵션 정하기\r\n# # 1. 증폭기능\r\ntrain_datagen = ImageDataGenerator(rescale=1./255,\r\n horizontal_flip=True, \r\n vertical_flip=True,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1,\r\n rotation_range=5,\r\n zoom_range=1.2,\r\n shear_range=0.7,\r\n fill_mode=\"nearest\")\r\n\r\ntest_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\n\r\n\r\n\r\n# flow 또는 flow_from_directory\r\n# 파일은 flow 폴더는 flow_from_directory\r\n# 실제 데이터가 있는 곳을 알려주고, 이미지를 불러오는 작업.\r\ntrain_generator = train_datagen.flow_from_directory(\r\n \"./data/data2\",\r\n target_size=(150,150),\r\n batch_size=5, # image를 5장씩\r\n class_mode=\"binary\"\r\n # ,save_to_dir=\"./data/data1_2/train\"\r\n)\r\ntest_generator = test_datagen.flow_from_directory(\r\n \"./data/data2\",\r\n target_size=(150,150),\r\n batch_size=5, # image를 5장씩\r\n class_mode=\"binary\"\r\n # ,save_to_dir=\"./data/data1_2/test\"\r\n)\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(10,(4,4),input_shape=(150,150,3))) \r\nmodel.add(Conv2D(10,(3,3))) \r\nmodel.add(Conv2D(10,(3,3))) \r\nmodel.add(Conv2D(10,(2,2))) \r\nmodel.add(MaxPooling2D(pool_size=2)) \r\nmodel.add(Flatten()) \r\nmodel.add(Dense(10,activation='relu')) \r\nmodel.add(Dense(1,activation=\"sigmoid\")) \r\nmodel.summary()\r\n\r\n\r\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"acc\"])\r\nhist = model.fit_generator(\r\n train_generator,\r\n steps_per_epoch = 20,\r\n epochs = 100,\r\n validation_data = test_generator,\r\n validation_steps = 4\r\n)\r\n\r\nloss = hist.history[\"loss\"]\r\nval_loss = hist.history[\"val_loss\"]\r\nacc = hist.history[\"acc\"]\r\nval_acc = hist.history[\"val_acc\"]\r\n\r\n\r\n# 시각화\r\nimport matplotlib.pyplot as plt\r\nplt.figure(figsize=(10,6)) # 단위 무엇인지 찾아볼것\r\nplt.subplot(2,1,1) # 2행 1열 중 첫번째\r\nplt.plot(loss,marker='.',c='red',label='loss')\r\nplt.plot(val_loss,marker='.',c='blue',label='val_loss')\r\nplt.grid() # 모눈종이 모양으로 하겠다.\r\n\r\nplt.title('loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(loc='upper right')\r\n\r\nplt.subplot(2,1,2) # 2행 1열 중 두번째\r\nplt.plot(acc,marker='.',c='red')\r\nplt.plot(val_acc,marker='.',c='blue')\r\nplt.grid() # 모눈종이 모양으로 하겠다.\r\n\r\nplt.title('accuracy')\r\nplt.ylabel('acc')\r\nplt.xlabel('epoch')\r\nplt.legend(['acc','val_acc']) # 라벨의 위치를 명시해주지 않으면 알아서 빈곳에 노출한다.\r\n\r\nplt.show()\r\n\r\n\r\nprint(\"===============================================\")\r\n\r\nlen = len(train_generator)\r\nprint(len) # 348\r\n\r\ntrain_generator = train_datagen.flow_from_directory(\r\n \"./data/data2\",\r\n target_size=(150,150),\r\n batch_size=5*len, # image를 5장씩\r\n class_mode=\"binary\"\r\n # ,save_to_dir=\"./data/data1_2/train\"\r\n)\r\ntest_generator = test_datagen.flow_from_directory(\r\n \"./data/data2\",\r\n target_size=(150,150),\r\n batch_size=5*len, # image를 5장씩\r\n class_mode=\"binary\"\r\n # ,save_to_dir=\"./data/data1_2/test\"\r\n)\r\n\r\n# (x_train, y_train), (x_test, y_test) = cifar10.load_data()\r\nnp.save(\"./data/keras64_imageDataGenerator1_x_train\",arr=train_generator[0][0])\r\nnp.save(\"./data/keras64_imageDataGenerator1_y_train\",arr=train_generator[0][1])\r\nnp.save(\"./data/keras64_imageDataGenerator1_x_test\",arr=test_generator[0][0])\r\nnp.save(\"./data/keras64_imageDataGenerator1_y_test\",arr=test_generator[0][1])\r\n\r\n\r\n\r\n\r\n# x_train = np.load(\"./data/keras63_imageDataGenerator2_x_train.npy\")\r\n# x_test = np.load(\"./data/keras63_imageDataGenerator2_y_train.npy\")\r\n# y_train = np.load(\"./data/keras63_imageDataGenerator2_x_test.npy\")\r\n# y_test = np.load(\"./data/keras63_imageDataGenerator2_y_test.npy\")\r\n# print(x_train.shape)\r\n# print(x_test.shape)\r\n# print(y_train.shape)\r\n# print(y_test.shape)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Study/keras/keras64_imageDataGenerator1_1126.py","file_name":"keras64_imageDataGenerator1_1126.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"330711290","text":"#testing sql connection\n\nimport pyodbc\n\n# conn = pyodbc.connect('Driver={SQL Server};'\n# 'Server=ROBUN028\\\\RZC;'\n# 'Database=TINMAR_ERP_NEW_LOCAL;'\n# 'Trusted_Connection=yes;')\n#\n\nconn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=ROBUN028\\\\RZC;DATABASE=TINMAR_ERP_NEW_LOCAL;UID=sa;PWD=vasilica#25')\n\ncursor = conn.cursor()\ncursor.execute('SELECT top 10 * FROM Document')\n\nfor row in cursor:\n print(row)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"495152166","text":"#Решение к заданию \"Шифр Вижинера\"\n#This method works for encoding of ASCII.\n\nwhile True:\n language = input(\"1)Русский\\n2)English\\nSelect language:\")\n\n#Russian\n if language == '2' or language == 'English':\n message = input(\"Enter message: \")\n key = input(\"Enter key: \")\n\n message = message.lower()\n\n key *= len(message)//len(key) + 1 #Fit the key to the size of the original message\n res = \"\" # результат\n\n for i, j in enumerate(message):\n gg = (ord(j) + ord(key[i])) # the conversion of a symbol into a number and the summation of these numbers.\n res += chr(gg%26+ord('A')) # filling the variable with an encrypted message and translating from number to symbol.\n\n print(\"Encrypted message: \"+ str(res))\n#English\n elif language == '1' or language == 'Русский':\n message = input(\"Ведите ваше сообщение: \")\n key = input(\"Введите слово-ключ: \")\n\n message = message.lower()\n\n key *= len(message) // len(key) + 1 # Подгон ключа под размер исходного сообщения\n res = \"\" # результат\n\n for i, j in enumerate(message):\n gg = (ord(j) + ord(key[i])) # Перевод символа в номер и суммирование этих номеров\n res += chr(gg % 26 + ord('А')) # заполнение переменной зашифрованным сообщением и перевод из номера в символ\n\n print(\"Зашифрованное сообщение: \" + str(res))\n# Проверка на ошибку ввода\n else:\n print('Command not found!')\n\n if input(\"* * * * *\\nRefresh(y/n)?\") == 'n':\n break\n\n\n\n","sub_path":"Vigenere_cipher.py","file_name":"Vigenere_cipher.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"290375493","text":"loading_quotes = [\n ('wat do? JAVASCRIPT Google Maps, handles multiple', 'Chris Shaw'),\n ('No action is required on your end :)', 'Ryan Greenland'),\n ('hows it going today big dawg?', 'Michael King'),\n ('Kool Aid Man', 'Jerod Badger'),\n ('ayeeee nice', 'Josh Martinez'),\n ('I bet there\\'s something on the site that\\'s causing it to bug out', 'Joseph Moore'),\n ('I don\\'t know if I know that Pokémon...', 'Joseph Moore'),\n ('HAHAHAHAHAHAHAHAHAHAHAHAHAH', 'Chris Shaw'),\n ('you already know the password right?', 'Chris Shaw'),\n ('you have clearance Clarence', 'Chris Shaw'),\n ('you are a modern day hero', 'Chris Shaw'),\n ('iiiiii don\\'t know? maybe it\\'s in readthe.guide?', 'Chris Shaw'),\n ('STAY OFF MY TURF.', 'Jerod Badger'),\n ('I\\'ve got a meeting in nine minutes.', 'Jerod Badger'),\n ('Weird. But, okay.', 'Jerod Badger'),\n ('Dopest dope.', 'Jerod Badger'),\n ('MUST DESTROY', 'Jerod Badger'),\n ('if you want to mess with those you totes can', 'Michael King'),\n ('they all look very quick and easy', 'Michael King'),\n ('forever immortalized', 'Michael King'),\n ('even after compressing- i got the same as the before score', 'Ryan Greenland'),\n ('I think it\\'s a salmon-crested cockatoo', 'Joseph Moore'),\n ('I\\'ve seen this once before but I can\\'t remember the solution.', 'Joseph Moore'),\n ('bruh. why you breaking things', 'Joseph Moore'),\n ('Yay verily.', 'Joseph Moore'),\n ('lolol alright. I haven\\'t even looked at them', 'Joshua Martinez'),\n ('noice', 'Joshua Martinez'),\n ('gad dang beaurocracy', 'Joey Puopolo'),\n ('can you send me a link to compressinator v2?', 'Alvin Go')\n]","sub_path":"loadingquotes.py","file_name":"loadingquotes.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"15212149","text":"'''\nThis module defines asynchronous tasks being used by celery\n'''\nimport logging\nfrom requests.exceptions import ConnectionError\nfrom celery import task\nimport requests\nfrom vendors import load_provider\nfrom vendors.sms import app_settings\nfrom dvutils.utils import queue_wrapper\n\nLOGGER = logging.getLogger(__name__)\n\n\n\n@task()\ndef send_bulk_sms(wbn_list=[]):\n return None\n if wbn_list:\n # cyclic import\n from package.models import find_by_wbn\n from .sms import sms_dispatch\n fail_wbns = []\n for wbn in wbn_list:\n p = find_by_wbn(wbn)\n if p:\n try:\n sms_dispatch(p)\n except ConnectionError:\n fail_wbns.append(p.wbn)\n if fail_wbns and not (set(wbn_list) == set(fail_wbns)):\n send_bulk_sms.apply_async(fail_wbns, countdown=5*60)\n\n\n@task()\ndef send_ndr_bulk_sms(wbn_list=[]):\n return None\n if wbn_list:\n # cyclic import\n from package.models import find_by_wbn, connection\n from .sms import sms_ndr\n sms_waybills = connection.Package.find({\n 'wbn': {'$in': wbn_list},\n 'cs.ss': {'$in': ['Returned', 'Scheduled', 'Pending']}\n }).distinct('wbn')\n\n fail_wbns = []\n for wbn in sms_waybills:\n p = find_by_wbn(wbn)\n if p:\n try:\n sms_ndr(p)\n except ConnectionError:\n fail_wbns.append(p.wbn)\n if fail_wbns and not (set(sms_waybills) == set(fail_wbns)):\n send_ndr_bulk_sms.apply_async(fail_wbns, countdown=5*60)\n\n\n@task(queue=queue_wrapper('vendors.sms.tasks.send_sms'))\ndef send_sms(\n phone, provider=app_settings.SMS_VENDOR_DEFAULT, msg_type=None,\n **kwargs):\n '''\n Function to send SMS\n '''\n provider = app_settings.SMS_VENDOR.get(msg_type, None)\n if not provider:\n return\n module = load_provider('sms', provider)\n provider = module.SMS(\n phone, msg_type, **kwargs)\n url, auth = provider.get_credentials()\n params = provider.get_params()\n method = module.get_method()\n\n if method == 'GET':\n request = requests.get\n elif method == 'POST':\n request = requests.post\n\n if auth:\n response = request(url, auth=auth, params=params)\n else:\n response = request(url, params=params)\n\n if response.status_code == requests.codes.ok:\n return module.is_safe_response(response)\n else:\n LOGGER.warning(\n 'Invalid response from provider.'\n ' Status Code {}. Please enable debug mode'\n 'to inspect response'.format(response.status_code))\n","sub_path":"vendors/sms/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"61452415","text":"from random import randrange\n\nprint (\"INTEGER DIVISIONS\")\nwhile 1:\n\tnumerator = randrange(5) ; denominator = randrange(5)\n\twhile denominator == 0:\n\t\tdenominator = randrange(5)\n\ttry:\n\t\tanswer = int(input(str(numerator) + \"/\" + str(denominator) + \" = \"))\n\t\tif answer == numerator//denominator:\n\t\t\tprint(\"CORRECT\")\n\t\telse:\n\t\t\tprint(\"INCORRECT\")\t\n\texcept ValueError:\n\t\tprint(\"Please enter Integers Only!\")","sub_path":"HW5/Integer.py","file_name":"Integer.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"188903905","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport collections\n\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\nfrom caffe2.python import core, dyndep, workspace\nfrom caffe2.quantization.server import utils as dnnlowp_utils\nfrom caffe2.quantization.server.dnnlowp_test_utils import (\n avoid_vpmaddubsw_overflow_fc,\n check_quantized_results_close,\n run_conv_or_fc,\n)\nfrom hypothesis import given\n\n\ndyndep.InitOpsLibrary(\"//caffe2/caffe2/quantization/server:dnnlowp_ops\")\nworkspace.GlobalInit([\"caffe2\", \"--caffe2_omp_num_threads=11\"])\n\n\nclass DNNLowPFullyConnectedOpTest(hu.HypothesisTestCase):\n # correctness test with no quantization error in inputs\n @given(\n input_channels=st.sampled_from([3, 4, 5, 8, 16, 32]),\n output_channels=st.integers(2, 16),\n batch_size=st.integers(0, 16),\n in_quantized=st.booleans(),\n out_quantized=st.booleans(),\n weight_quantized=st.booleans(),\n prepack_weight=st.booleans(),\n preserve_activation_sparsity=st.booleans(),\n preserve_weight_sparsity=st.booleans(),\n fuse_relu=st.booleans(),\n output_packed_bias=st.booleans(),\n use_input_qparam=st.booleans(),\n **hu.gcs_cpu_only\n )\n def test_dnnlowp_fully_connected_int(\n self,\n input_channels,\n output_channels,\n batch_size,\n in_quantized,\n out_quantized,\n weight_quantized,\n prepack_weight,\n preserve_activation_sparsity,\n preserve_weight_sparsity,\n fuse_relu,\n output_packed_bias,\n use_input_qparam,\n gc,\n dc,\n ):\n # X and W have scale 1, so exactly represented after quantization\n X_min = 0 if preserve_activation_sparsity else -77\n X_max = X_min + 255\n X = np.round(\n np.random.rand(batch_size, input_channels) * (X_max - X_min) + X_min\n )\n X = X.astype(np.float32)\n # input channels 0 and 1 are all X_min to avoid overflow from vpmaddubsw\n # when multiplied with W_min and W_max\n X[:, 0] = X_min\n if batch_size != 0:\n X[0, 1] = X_max\n\n if preserve_weight_sparsity:\n W_min = -128\n W_max = 100\n else:\n W_min = -100\n W_max = W_min + 255\n W = np.round(\n np.random.rand(output_channels, input_channels) * (W_max - W_min) + W_min\n )\n W = W.astype(np.float32)\n W[0, 0] = W_min\n W[1, 0] = W_max\n\n # Make sure we won't have overflows from vpmaddubsw instruction used in\n # fbgemm\n avoid_vpmaddubsw_overflow_fc(\n batch_size,\n input_channels,\n output_channels,\n X,\n X_min,\n X_max,\n W,\n W_min,\n W_max,\n )\n\n b = np.random.randn(output_channels).astype(np.float32)\n\n Output = collections.namedtuple(\"Output\", [\"Y\", \"op_type\", \"engine\"])\n outputs = []\n\n op_engine_list = [(\"FC\", \"\")]\n if fuse_relu:\n op_engine_list += [(\"Int8FCRelu\", \"DNNLOWP\")]\n else:\n op_engine_list += [\n (\"FC\", \"DNNLOWP\"),\n (\"FC\", \"DNNLOWP_16\"),\n (\"Int8FC\", \"DNNLOWP\"),\n ]\n\n for op_type, engine in op_engine_list:\n init_net = core.Net(\"test_init_net\")\n net = core.Net(\"test_net\")\n\n do_quantize = \"DNNLOWP\" in engine and in_quantized\n do_dequantize = \"DNNLOWP\" in engine and out_quantized\n do_quantize_weight = (\n engine == \"DNNLOWP\" and weight_quantized and len(outputs) > 0\n )\n do_prepack_weight = engine == \"DNNLOWP\" and prepack_weight\n\n if do_quantize:\n quantize = core.CreateOperator(\n \"Quantize\",\n [\"X\"],\n [\"X_q\"],\n preserve_activation_sparsity=preserve_activation_sparsity,\n engine=engine,\n device_option=gc,\n )\n net.Proto().op.extend([quantize])\n\n X_min = 0 if X.size == 0 else X.min()\n X_max = 0 if X.size == 0 else X.max()\n x_q_param = dnnlowp_utils.choose_quantization_params(\n X_min, X_max, preserve_activation_sparsity\n )\n w_q_param = None\n if do_quantize_weight:\n (\n int8_given_tensor_fill,\n w_q_param,\n ) = dnnlowp_utils.create_int8_given_tensor_fill(\n W, \"W_q\", preserve_weight_sparsity\n )\n init_net.Proto().op.extend([int8_given_tensor_fill])\n\n # Bias\n int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(\n b, \"b_q\", x_q_param, w_q_param\n )\n init_net.Proto().op.extend([int8_bias_tensor_fill])\n\n if do_prepack_weight:\n inputs = [\"W_q\" if do_quantize_weight else \"W\"]\n if do_dequantize:\n inputs += [\"b_q\" if do_quantize_weight else \"b\"]\n pack = core.CreateOperator(\n \"Int8FCPackWeight\",\n inputs,\n [\"W_packed\", \"B_q32\"]\n if do_dequantize and output_packed_bias\n else [\"W_packed\"],\n preserve_weight_sparsity=preserve_weight_sparsity,\n in_scale=x_q_param.scale,\n engine=engine,\n )\n init_net.Proto().op.extend([pack])\n\n if use_input_qparam and do_dequantize and op_type != \"FC\":\n fc = core.CreateOperator(\n op_type,\n [\n \"X_q\" if do_quantize else \"X\",\n \"W_packed\"\n if do_prepack_weight\n else (\"W_q\" if do_quantize_weight else \"W\"),\n \"b_q\" if do_quantize_weight else \"b\",\n \"scale\",\n \"zero_point\",\n ],\n [\"Y_q\" if do_dequantize else \"Y\"],\n dequantize_output=not do_dequantize,\n preserve_activation_sparsity=preserve_activation_sparsity,\n preserve_weight_sparsity=preserve_weight_sparsity,\n engine=engine,\n device_option=gc,\n )\n else:\n fc = core.CreateOperator(\n op_type,\n [\n \"X_q\" if do_quantize else \"X\",\n \"W_packed\"\n if do_prepack_weight\n else (\"W_q\" if do_quantize_weight else \"W\"),\n \"b_q\" if do_quantize_weight else \"b\",\n ],\n [\"Y_q\" if do_dequantize else \"Y\"],\n dequantize_output=not do_dequantize,\n preserve_activation_sparsity=preserve_activation_sparsity,\n preserve_weight_sparsity=preserve_weight_sparsity,\n engine=engine,\n device_option=gc,\n )\n if do_quantize_weight or do_prepack_weight:\n # When quantized weight is provided, we can't rescale the\n # output dynamically by looking at the range of output of each\n # batch, so here we provide the range of output observed from\n # fp32 reference implementation\n dnnlowp_utils.add_quantization_param_args(\n fc, outputs[0][0], preserve_activation_sparsity\n )\n\n net.Proto().op.extend([fc])\n if fuse_relu and \"DNNLOWP\" not in engine:\n net.Relu([\"Y\"], \"Y\")\n\n if do_dequantize:\n dequantize = core.CreateOperator(\n \"Dequantize\", [\"Y_q\"], [\"Y\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([dequantize])\n\n if use_input_qparam and do_dequantize and op_type != \"FC\":\n ref_output = outputs[0][0]\n ref_output_min = 0 if ref_output.size == 0 else ref_output.min()\n ref_output_max = 0 if ref_output.size == 0 else ref_output.max()\n q_param = dnnlowp_utils.choose_quantization_params(\n ref_output_min, ref_output_max, preserve_activation_sparsity\n )\n run_conv_or_fc(\n self,\n init_net,\n net,\n X,\n W,\n b,\n op_type,\n engine,\n None,\n gc,\n outputs,\n np.array([q_param.scale]).astype(np.float32),\n np.array([q_param.zero_point]).astype(np.int32),\n )\n else:\n run_conv_or_fc(\n self, init_net, net, X, W, b, op_type, engine, None, gc, outputs\n )\n\n if output_packed_bias and do_prepack_weight and do_dequantize:\n bias_int32 = self.ws.blobs[\"B_q32\"].fetch()\n if do_quantize_weight:\n np.testing.assert_equal(\n bias_int32[0], np.round(b / (x_q_param.scale * w_q_param.scale))\n )\n np.testing.assert_equal(bias_int32[0].dtype, np.int32)\n\n check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)\n","sub_path":"caffe2/quantization/server/fully_connected_dnnlowp_op_test.py","file_name":"fully_connected_dnnlowp_op_test.py","file_ext":"py","file_size_in_byte":9777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"562084916","text":"import ast\nimport getpass\nimport glob\nimport logging\nimport os\nimport sys\nimport urllib\n\nimport ee\nimport requests\nimport retrying\nfrom requests_toolbelt.multipart import encoder\nfrom bs4 import BeautifulSoup\n\nimport helper_functions\nimport metadata_loader\n\n\ndef upload(user, source_path, destination_path=None, metadata_path=None, collection_name=None, multipart_upload=False,\n nodata_value=None):\n \"\"\"\n Uploads content of a given directory to GEE. The function first uploads an asset to Google Cloud Storage (GCS)\n and then uses ee.data.startIngestion to put it into GEE, Due to GCS intermediate step, users is asked for\n Google's account name and password.\n\n In case any exception happens during the upload, the function will repeat the call a given number of times, after\n which the error will be propagated further.\n\n :param user: name of a Google account\n :param source_path: path to a directory\n :param destination_path: where to upload (absolute path)\n :param metadata_path: (optional) path to file with metadata\n :param collection_name: (optional) name to be given for the uploaded collection\n :return:\n \"\"\"\n\n metadata = metadata_loader.load_metadata_from_csv(metadata_path) if metadata_path else None\n\n password = getpass.getpass()\n google_session = __get_google_auth_session(user, password)\n\n absolute_directory_path_for_upload = __get_absolute_path_for_upload(collection_name, destination_path)\n helper_functions.create_image_collection(absolute_directory_path_for_upload)\n\n path = os.path.join(os.path.expanduser(source_path), '*.tif')\n all_images_paths = glob.glob(path)\n no_images = len(all_images_paths)\n\n images_for_upload_path = __find_remaining_assets_for_upload(all_images_paths, absolute_directory_path_for_upload)\n\n for current_image_no, image_path in enumerate(images_for_upload_path):\n logging.info('Processing image %d out of %d: %s', current_image_no+1, no_images, image_path)\n filename = helper_functions.get_filename_from_path(path=image_path)\n\n asset_full_path = absolute_directory_path_for_upload + '/' + filename\n\n if metadata and not filename in metadata:\n logging.warning(\"No metadata exists for image %s: it will not be ingested\", filename)\n with open('assets_missing_metadata.log', 'a') as missing_metadata_file:\n missing_metadata_file.write(image_path + '\\n')\n continue\n\n properties = metadata[filename] if metadata else None\n\n try:\n r = __upload_to_gcs_and_start_ingestion_task(current_image_no, asset_full_path, google_session, image_path,\n properties, multipart_upload, nodata_value)\n except Exception as e:\n logging.exception('Upload of %s has failed.', filename)\n\n\ndef __find_remaining_assets_for_upload(path_to_local_assets, path_remote):\n local_assets = [helper_functions.get_filename_from_path(path) for path in path_to_local_assets]\n if helper_functions.collection_exist(path_remote):\n remote_assets = helper_functions.get_asset_names_from_collection(path_remote)\n if len(remote_assets) > 0:\n assets_left_for_upload = set(local_assets) - set(remote_assets)\n if len(assets_left_for_upload) == 0:\n logging.warning('Collection already exists and contains all assets provided for upload. Exiting ...')\n sys.exit(1)\n\n logging.info('Collection already exists. %d assets left for upload to %s.', len(assets_left_for_upload), path_remote)\n assets_left_for_upload_full_path = [path for path in path_to_local_assets\n if helper_functions.get_filename_from_path(path) in assets_left_for_upload]\n return assets_left_for_upload_full_path\n\n return path_to_local_assets\n\n\ndef __get_absolute_path_for_upload(collection_name, destination_path):\n if destination_path: # user has provided an absolute path\n return destination_path\n if collection_name.startswith('users') or collection_name.startswith('/users'): # absolute path\n return collection_name\n else: # relative path\n root_path_in_gee = ee.data.getAssetRoots()[0]['id']\n absolute_path = root_path_in_gee + '/' + collection_name\n return absolute_path\n\n\n@retrying.retry(wait_exponential_multiplier=1000, wait_exponential_max=4000, stop_max_attempt_number=5)\ndef __upload_to_gcs_and_start_ingestion_task(current_image_no, asset_full_path, google_session, image_path, properties,\n multipart_upload, nodata_value):\n if multipart_upload:\n asset_request = __upload_large_file(session=google_session,\n file_path=image_path,\n asset_name=asset_full_path,\n properties=properties,\n nodata=nodata_value)\n else:\n asset_request = __upload_file(session=google_session,\n file_path=image_path,\n asset_name=asset_full_path,\n properties=properties,\n nodata=nodata_value)\n task_id = ee.data.newTaskId(1)[0]\n r = ee.data.startIngestion(task_id, asset_request)\n __periodic_wait(current_image=current_image_no, period=50)\n return r\n\n\ndef __validate_metadata(path_for_upload, metadata_path):\n validation_result = metadata_loader.validate_metadata_from_csv(metadata_path)\n keys_in_metadata = {result.keys for result in validation_result}\n images_paths = glob.glob(os.path.join(path_for_upload, '*.tif*'))\n keys_in_data = {helper_functions.get_filename_from_path(path) for path in images_paths}\n missing_keys = keys_in_data - keys_in_metadata\n\n if missing_keys:\n logging.warning('%d images does not have a corresponding key in metadata', len(missing_keys))\n print('\\n'.join(e for e in missing_keys))\n else:\n logging.info('All images have metadata available')\n\n if not validation_result.success:\n print('Validation finished with errors. Type \"y\" to continue, default NO: ')\n choice = raw_input().lower()\n if choice not in ['y', 'yes']:\n logging.info('Application will terminate')\n exit(1)\n\n\ndef __extract_metadata_for_image(filename, metadata):\n if filename in metadata:\n return metadata[filename]\n else:\n logging.warning('Metadata for %s not found', filename)\n return None\n\n\ndef __get_google_auth_session(username, password):\n google_accounts_url = 'https://accounts.google.com'\n authentication_url = 'https://accounts.google.com/ServiceLoginAuth'\n\n session = requests.session()\n\n login_html = session.get(google_accounts_url)\n soup_login = BeautifulSoup(login_html.content, 'html.parser').find('form').find_all('input')\n payload = {}\n for u in soup_login:\n if u.has_attr('value'):\n payload[u['name']] = u['value']\n\n payload['Email'] = username\n payload['Passwd'] = password\n\n auto = login_html.headers.get('X-Auto-Login')\n follow_up = urllib.unquote(urllib.unquote(auto)).split('continue=')[-1]\n galx = login_html.cookies['GALX']\n\n payload['continue'] = follow_up\n payload['GALX'] = galx\n\n session.post(authentication_url, data=payload)\n return session\n\n\ndef __get_upload_url(session):\n r = session.get('https://ee-api.appspot.com/assets/upload/geturl?')\n d = ast.literal_eval(r.text)\n return d['url']\n\n\ndef __upload_large_file(session, file_path, asset_name, properties=None, nodata=None):\n upload_url = __get_upload_url(session)\n with open(file_path, 'rb') as f:\n form = encoder.MultipartEncoder({\n \"documents\": (file_path, f, \"application/octet-stream\"),\n \"composite\": \"NONE\",\n })\n headers = {\"Prefer\": \"respond-async\", \"Content-Type\": form.content_type}\n resp = session.post(upload_url, headers=headers, data=form)\n gsid = resp.json()[0]\n asset_data = {\"id\": asset_name,\n \"tilesets\": [\n {\"sources\": [\n {\"primaryPath\": gsid,\n \"additionalPaths\": []\n }\n ]}\n ],\n \"bands\": [],\n \"properties\": properties,\n \"missingData\": {\"value\": nodata}\n }\n return asset_data\n\n\ndef __upload_file(session, file_path, asset_name, properties=None, nodata=None):\n with open(file_path, 'rb') as f:\n files = {'file': f}\n upload_url = __get_upload_url(session)\n upload = session.post(upload_url, files=files)\n gsid = upload.json()[0]\n asset_data = {\"id\": asset_name,\n \"tilesets\": [\n {\"sources\": [\n {\"primaryPath\": gsid,\n \"additionalPaths\": []\n }\n ]}\n ],\n \"bands\": [],\n \"properties\": properties,\n \"missingData\": {\"value\": nodata}\n }\n return asset_data\n\n\ndef __periodic_wait(current_image, period):\n if (current_image + 1) % period == 0:\n # Time to check how many tasks are running!\n logging.info('Periodic check for number of running tasks is due')\n helper_functions.wait_for_tasks_to_complete()","sub_path":"geebam/batch_uploader.py","file_name":"batch_uploader.py","file_ext":"py","file_size_in_byte":9716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"511615382","text":"from PIL import Image\n\ndef pixelate(img):\n pixels = img.load()\n colors = []\n current_color = pixels[0,0]\n pix_size = 25\n\n for x in range(0, img.size[0], pix_size):\n for y in range(0, img.size[1], pix_size):\n red_avg = 0\n green_avg = 0\n blue_avg = 0\n current = pixels[x,y]\n\n bx = x + pix_size\n by = y + pix_size\n if by >= img.size[1]:\n by = img.size[1]\n if bx >=img.size[0]:\n bx = img.size[0]\n for m in range(x, bx):\n for n in range(y, by):\n red_avg += pixels[m,n][0]\n green_avg += pixels[m,n][1]\n blue_avg += pixels[m,n][2]\n red_avg = red_avg // (pix_size * pix_size)\n green_avg = green_avg // (pix_size * pix_size)\n blue_avg = blue_avg // (pix_size * pix_size)\n for h in range(x,bx):\n for k in range(y,by):\n pixels[h,k] = (red_avg, green_avg, blue_avg)\n\n return img\n\nimg = Image.open('sanfran.jpg')\nnew = pixelate(img)\nnew.save(\"pixelatedImageOfSanFran.jpg\")\n","sub_path":"2_Handcraft_Iteration_Games/10_levelofdifficulty.py","file_name":"10_levelofdifficulty.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"113806101","text":"# Copyright 2016 IBM Corp. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport requests.packages.urllib3\nimport zhmcclient\n\nfrom nova.test import TestCase\nfrom nova_dpm.tests.unit.virt.dpm import test_utils as utils\nfrom nova_dpm.virt.dpm import host\n\n\nclass HostTestCase(TestCase):\n\n def setUp(self):\n super(HostTestCase, self).setUp()\n requests.packages.urllib3.disable_warnings()\n self.session = utils.create_session_1()\n self.client = zhmcclient.Client(self.session)\n self.cpc = self.client.cpcs.find(**{\"name\": \"cpc_2\"})\n self.flags(host=utils.HOST)\n self.flags(group=\"dpm\", max_processors=3)\n self.flags(group=\"dpm\", max_memory=2048)\n self.host = host.Host(self.cpc, self.client)\n\n def test_host_properties(self):\n\n host_properties = self.host.properties\n self.assertEqual(utils.HOST,\n host_properties['hypervisor_hostname'])\n self.assertEqual('cpc_2', host_properties['cpc_name'])\n self.assertEqual(3, host_properties['vcpus'])\n self.assertEqual(2048, host_properties['memory_mb'])\n self.assertEqual(utils.MAX_PROC_USED, host_properties['vcpus_used'])\n self.assertEqual(utils.TOTAL_MEM_USED,\n host_properties['memory_mb_used'])\n self.assertEqual(2013001, host_properties['hypervisor_version'])\n self.assertEqual('PRSM', host_properties['hypervisor_type'])\n cpu_info = host_properties['cpu_info']\n cpu_info_dict = json.loads(cpu_info)\n self.assertEqual('s390x', cpu_info_dict['arch'])\n self.assertEqual('IBM', cpu_info_dict['vendor'])\n\n def test_get_proc_used(self):\n proc_used = self.host._get_proc_used()\n self.assertEqual(utils.MAX_PROC_USED, proc_used)\n\n def test_mem_used(self):\n memory_used = self.host._get_mem_used()\n self.assertEqual(utils.TOTAL_MEM_USED, memory_used)\n\n def test_get_version_in_int(self):\n version = self.host._get_version_in_int()\n self.assertEqual(2013001, version)\n","sub_path":"nova_dpm/tests/unit/virt/dpm/test_host.py","file_name":"test_host.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"412279111","text":"'''\nSee APi doc: https://github.com/ssllabs/ssllabs-scan/blob/stable/ssllabs-api-docs.md\n'''\nfrom datetime import datetime\nimport json\nimport os\nimport requests\nimport time\nimport ssllabsscan.main as m\n\nAPI_URL = \"https://api.ssllabs.com/api/v2/analyze\"\n\nCHAIN_ISSUES = {\n \"0\": \"none\",\n \"1\": \"unused\",\n \"2\": \"incomplete chain\",\n \"4\": \"chain contains unrelated or duplicate certs\",\n \"8\": \"chain order is incorrect\",\n \"16\": \"contains a self-signed root certificate\",\n \"32\": \"chain can't be validated\"\n}\n\n# Forward secrecy protects past sessions against future compromises of secret keys or passwords.\nFORWARD_SECRECY = {\n \"0\": \"No WEAK\",\n \"1\": \"With some browsers WEAK\",\n \"2\": \"With modern browsers\",\n \"3\": \"Yes, with modern browsers\",\n \"4\": \"Yes (with most browsers) ROBUST\"\n}\n\nPROTOCOLS = [\n \"TLS 1.3\", \"TLS 1.2\", \"TLS 1.1\", \"TLS 1.0\", \"SSL 3.0 INSECURE\", \"SSL 2.0 INSECURE\"\n]\n\nVULNERABLES = [\n \"Vuln Beast\", \"Vuln Drown\", \"Vuln Heartbleed\", \"Vuln FREAK\",\n \"Vuln openSsl Ccs\", \"Vuln openSSL LuckyMinus20\", \"Vuln POODLE\", \"Vuln POODLE TLS\"\n]\n\nSUMMARY_COL_NAMES = [\n \"Host\", \"Grade\", \"Hidden Grade\", \"Owner\", \"HasWarnings\", \"Cert Issuer\", \"Cert Expiry\", \"Chain issues\", \n \"Perfect Forward Secrecy\", \"Heartbeat ext\", \"Hostname\", \"Protocol\", \"Server signature\", \"HTTP Status Code\", \n \"Signature algorithm\"\n] + VULNERABLES + PROTOCOLS\n\n\nclass SSLLabsClient():\n def __init__(self, check_progress_interval_secs=15):\n self.__check_progress_interval_secs = check_progress_interval_secs\n\n '''\n Write scanned results to server's own json file\n '''\n def analyze(self, host, summary_csv_file, owner):\n data = self.start_new_scan(host=host)\n # Replaces / with _ \n host = host.replace('/', '_')\n # Check if 'json_data' directory exists before writing to it\n if os.path.exists(os.path.join(m.PATH, 'json_data')):\n json_file = os.path.join(os.path.join(m.PATH, \"json_data\"), f\"{host}.json\")\n else:\n os.makedirs(os.path.join(m.PATH, 'json_data'))\n p = os.path.join(m.PATH, 'json_data')\n json_file = os.path.join(p, f\"{host}.json\")\n # Dump JSON\n with open(json_file, \"w\") as outfile:\n json.dump(data, outfile, indent=2)\n print('JSON dumped successfully.')\n # write the summary to file\n self.append_summary_csv(summary_csv_file, host, data, owner)\n\n '''\n Run a SSLLABS scan on a server\n '''\n def start_new_scan(self, host, publish=\"off\", startNew=\"on\", all=\"done\", ignoreMismatch=\"on\"):\n path = API_URL\n payload = {\n \"host\": host,\n \"publish\": publish,\n \"startNew\": startNew,\n \"all\": all,\n \"ignoreMismatch\": ignoreMismatch\n }\n results = self.request_api(path, payload)\n payload.pop(\"startNew\")\n while results[\"status\"] != \"READY\" and results[\"status\"] != \"ERROR\":\n time.sleep(self.__check_progress_interval_secs)\n results = self.request_api(path, payload)\n return results\n\n '''\n Takes in bit value representing number of flags in a host's chain issues\n Unpacks the bit values and returns list of issues\n '''\n def get_chain_issues(self, val):\n result = []\n val = int(val)\n # If host has 0 issues\n if val == 0:\n result = CHAIN_ISSUES[str(0)]\n return result\n if val & (1 << 0):\n result.append(CHAIN_ISSUES[str(1)])\n if val & (1 << 1):\n result.append(CHAIN_ISSUES[str(2)])\n if val & (1 << 2):\n result.append(CHAIN_ISSUES[str(4)])\n if val & (1 << 3):\n result.append(CHAIN_ISSUES[str(8)])\n if val & (1 << 4):\n result.append(CHAIN_ISSUES[str(16)])\n if val & (1 << 5):\n result.append(CHAIN_ISSUES[str(32)])\n result = ' AND '.join(result)\n return result\n\n '''\n Access API\n '''\n @staticmethod\n def request_api(url, payload):\n response = requests.get(url, params=payload)\n return response.json()\n\n '''\n Converts epoch time to readable time format\n '''\n @staticmethod\n def prepare_datetime(epoch_time):\n # SSL Labs returns an 13-digit epoch time that contains milliseconds, Python only expects 10 digits (seconds)\n return datetime.utcfromtimestamp(float(str(epoch_time)[:10])).strftime(\"%Y-%m-%d\")\n\n '''\n Summarize all json data into html file\n '''\n def append_summary_csv(self, summary_file, host, data, owner):\n # write the summary to file\n with open(os.path.join(m.PATH, summary_file), \"a\") as outfile:\n proto = data['protocol']\n # Only parse through first ['endpoint'] as some sites have multiple hostnames.\n # Some servers don't report certain fields if it cannot detect it\n try:\n server_sig = data[\"endpoints\"][0][\"details\"][\"serverSignature\"]\n except:\n server_sig = \"N/A\"\n try:\n chain_issues = self.get_chain_issues(str(data[\"endpoints\"][0][\"details\"][\"chain\"][\"issues\"]))\n except:\n chain_issues = \"N/A\"\n try:\n server_name = data[\"endpoints\"][0][\"serverName\"]\n except:\n server_name = \"N/A\"\n # see SUMMARY_COL_NAMES\n summary = [\n host,\n data[\"endpoints\"][0][\"grade\"],\n data[\"endpoints\"][0]['gradeTrustIgnored'],\n owner,\n data[\"endpoints\"][0][\"hasWarnings\"],\n data[\"endpoints\"][0][\"details\"][\"cert\"][\"issuerLabel\"],\n self.prepare_datetime(data[\"endpoints\"][0][\"details\"][\"cert\"][\"notAfter\"]),\n chain_issues,\n FORWARD_SECRECY[str(data[\"endpoints\"][0][\"details\"][\"forwardSecrecy\"])],\n data[\"endpoints\"][0][\"details\"][\"heartbeat\"],\n server_name,\n proto,\n server_sig,\n data[\"endpoints\"][0][\"details\"][\"httpStatusCode\"],\n data[\"endpoints\"][0][\"details\"][\"cert\"][\"sigAlg\"],\n data[\"endpoints\"][0][\"details\"][\"vulnBeast\"],\n data[\"endpoints\"][0][\"details\"][\"drownVulnerable\"],\n data[\"endpoints\"][0][\"details\"][\"heartbleed\"],\n data[\"endpoints\"][0][\"details\"][\"freak\"],\n False if data[\"endpoints\"][0][\"details\"][\"openSslCcs\"] == 1 else True,\n False if data[\"endpoints\"][0][\"details\"][\"openSSLLuckyMinus20\"] == 1 else True,\n data[\"endpoints\"][0][\"details\"][\"poodle\"],\n False if data[\"endpoints\"][0][\"details\"][\"poodleTls\"] == 1 else True,\n ]\n for protocol in PROTOCOLS:\n found = False\n for p in data[\"endpoints\"][0][\"details\"][\"protocols\"]:\n if protocol.startswith(f\"{p['name']} {p['version']}\"):\n found = True\n break\n summary += [\"Yes\" if found is True else \"No\"]\n outfile.write(\",\".join(str(s) for s in summary) + \"\\n\")","sub_path":"ssllabsscan/ssllabs_client.py","file_name":"ssllabs_client.py","file_ext":"py","file_size_in_byte":7197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"268715577","text":"import os\r\nimport datetime\r\nimport shutil\r\n\r\npath = input(\"What path would you like to clear: \")\r\ndays = int(input(\"What is the day limit: \"))\r\n\r\nexist = os.path.exists(path)\r\n\r\nif(exist == False):\r\n print(\"Please provide a valid path\")\r\n path = input(\"What path would you like to clear: \")\r\n\r\nif(os.path.isfile(path)):\r\n print(\"Please provide path of a directory\")\r\n path = input(\"What path would you like to clear: \")\r\n\r\n\r\nfor root, dirs, files in os.walk(path, topdown=False):\r\n for file in files:\r\n fullPath = os.path.join(root, file)\r\n presentTime = datetime.datetime.now()\r\n file_cre_time = datetime.datetime.fromtimestamp(os.path.getctime(fullPath))\r\n no_of_days = (presentTime - file_cre_time).days\r\n if(no_of_days >= days):\r\n os.remove(fullPath)\r\n print(\"Your computer is now clean without any unwanted files!!\")\r\n for i in dirs:\r\n fol_path = os.path.join(root, i)\r\n if len(os.listdir(fol_path)) == 0:\r\n shutil.rmtree(fol_path)\r\n print(\"Your computer is now clean without any unwanted files!!\")","sub_path":"removeFiles.py","file_name":"removeFiles.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"452212198","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def delNodes(self, root: TreeNode, to_delete: List[int]) -> List[TreeNode]:\n to_delete_set = set(to_delete)\n stack = [(root, None, None)]\n forest = ([root], [])[root.val in to_delete_set]\n \n while stack:\n node, parent, direction = stack.pop()\n \n if node.val in to_delete_set:\n if parent:\n if direction == \"left\":\n parent.left = None\n else:\n parent.right = None\n if node.left and node.left.val not in to_delete_set:\n forest.append(node.left)\n if node.right and node.right.val not in to_delete_set:\n forest.append(node.right)\n \n if node.left:\n stack.append((node.left, node, \"left\"))\n if node.right:\n stack.append((node.right, node, \"right\"))\n \n return forest\n","sub_path":"Take2Camp/DeleteNodesAndReturnForest.py","file_name":"DeleteNodesAndReturnForest.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"282494179","text":"import sublime, sublime_plugin, sys\nfrom . import *\n\nclass GoldOpenEnvProjCommand(sublime_plugin.ApplicationCommand):\n\n def run(self):\n gold_helpers.LogAndStatusMessage(\"--> \" + __name__ + \": \" + type(self).__name__ + \".\" + sys._getframe().f_code.co_name)\n sublime.active_window().run_command(\"open_window\")\n sublime.active_window().run_command(\"prompt_open_project\")\n projectFilename = sublime.active_window().project_file_name()\n projectPath = sublime.active_window().project_data()['wyde-root']\n\n if projectFilename != None and projectPath != None:\n # projectPath = projectFilename[0:projectFilename.rfind('\\\\')]\n gold_environnement.InitializeEnvironnement(projectPath)\n\n gold_helpers.LogAndStatusMessage(\"<-- \" + __name__ + \": \" + type(self).__name__ + \".\" + sys._getframe().f_code.co_name)\n","sub_path":"POC/v0_0_POC_via_Dll/gold_open_env_proj.py","file_name":"gold_open_env_proj.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"626671362","text":"# convert the latitude/longitude coordinates to the HERE Tile ID\nimport math\nimport unittest\nfrom baseconvert import BaseConverter\n\nTILE_LEVEL = 12\n\n\ndef check_digits_num(bin_start):\n '''Check and correct binary lenth .'''\n if len(bin_start) == 13:\n bin_start = bin_start[:1] + '' + bin_start[1 + 1:]\n elif len(bin_start) == 14:\n bin_start = bin_start[:0] + '' + bin_start[1 + 1:]\n return bin_start\n\n\ndef to_base_4(long_bin):\n '''Convert from binary value to a base 4 integer.'''\n base4v = BaseConverter(input_base=2, output_base=4, string=True)\n return base4v(long_bin)\n\n\ndef to_base_10(test, short_bin):\n '''Convert from from base 4 to base 10.'''\n base4v = BaseConverter(input_base=4, output_base=10, string=True)\n converted_partition = int(base4v(short_bin))\n print(\"The coordinates\", test, \"correspond to the Partition ID\", converted_partition)\n return converted_partition\n\n\ndef convert_tile_id(y_coordinata, x_coordinata):\n '''Convert from the latitude/longitude coordinates to Partition ID.'''\n delta_for_tile = 360 / (2 ** TILE_LEVEL)\n x_tile = math.floor(((180 + x_coordinata) / delta_for_tile))\n a_x_bin = check_digits_num(bin(x_tile))\n y_tile = math.floor((90 + y_coordinata) / delta_for_tile)\n a_y_bin = check_digits_num(bin(y_tile))\n num_base_4 = to_base_4(\"\".join([\"%s%s\" % (k, v) for k, v in zip(a_y_bin, a_x_bin)]))\n num_base_10 = num_base_4.rjust(13, '0')\n partition_id = to_base_10((x_coordinata, y_coordinata), num_base_10)\n return partition_id\n\n\nclass PartitionByCoordsTestCase(unittest.TestCase):\n def test_fra(self):\n expected = 23595506\n lat = 48.97789222693959\n lon = 2.470016718535561\n self.assertEqual(convert_tile_id(lat, lon), expected)\n\n def test_partition_in_usa(self):\n expected = 20797560\n lat = 48.402208\n lon = -4.493086\n self.assertEqual(convert_tile_id(lat, lon), expected)\n\n def test_partition_in_afr(self):\n expected = 21550951\n lat = -31.879325\n lon = 22.101703\n self.assertEqual(convert_tile_id(lat, lon), expected)\n\n def test_partition_in_mex(self):\n expected = 18464000\n lat = -25.279134\n lon = -57.617324\n self.assertEqual(convert_tile_id(lat, lon), expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"my_progect/coord_by_partition_id.py","file_name":"coord_by_partition_id.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"221813489","text":"#\n# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice, this\n# list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nfrom MDSplus import mdsExceptions, Device, Data, Window, Range, Dimension, TreePath, Int32, Float32, Float64\nfrom threading import Thread, Condition\nfrom ctypes import CDLL, c_int, c_short, c_long, byref, Structure, c_void_p, c_char_p\nfrom time import sleep\n\n\nclass CAENDT5724(Device):\n \"\"\"CAEN DT5724 4 Channels 14 Bit 100MS/S Digitizer\"\"\"\n parts = [\n {'path': ':BOARD_ID', 'type': 'numeric', 'value': 0},\n {'path': ':COMMENT', 'type': 'text'},\n {'path': ':TRIG_MODE', 'type': 'text', 'value': 'OVER THRESHOLD'},\n {'path': ':TRIG_SOFT', 'type': 'text', 'value': 'ENABLED'},\n {'path': ':TRIG_EXT', 'type': 'text', 'value': 'ENABLED'},\n {'path': ':TRIG_SOURCE', 'type': 'numeric'},\n {'path': ':CLOCK_MODE', 'type': 'text', 'value': '250 MHz'},\n {'path': ':CLOCK_SOURCE', 'type': 'numeric'},\n {'path': ':NUM_SEGMENTS', 'type': 'numeric', 'value': 1024},\n {'path': ':USE_TIME', 'type': 'text', 'value': 'YES'},\n {'path': ':PTS', 'type': 'numeric', 'value': 1024},\n {'path': ':START_IDX', 'type': 'numeric', 'value': 0},\n {'path': ':END_IDX', 'type': 'numeric', 'value': 1024},\n {'path': ':START_TIME', 'type': 'numeric', 'value': 0},\n {'path': ':END_TIME', 'type': 'numeric', 'value': 1E-6},\n {'path': ':ACQ_MODE', 'type': 'text', 'value': 'TRANSIENT RECORDER'},\n {'path': ':IRQ_EVENTS', 'type': 'numeric', 'value': 0},\n ]\n for i in range(0, 4):\n parts.extend([\n {'path': '.CHANNEL_%d' % (i+1), 'type': 'structure'},\n {'path': '.CHANNEL_%d:STATE' % (\n i+1), 'type': 'text', 'value': 'ENABLED'},\n {'path': '.CHANNEL_%d:TRIG_STATE' % (\n i+1), 'type': 'text', 'value': 'DISABLED'},\n {'path': '.CHANNEL_%d:OFFSET' % (\n i+1), 'type': 'numeric', 'value': 0},\n {'path': '.CHANNEL_%d:DAC_OFFSET' % (\n i+1), 'type': 'numeric', 'value': 0},\n {'path': '.CHANNEL_%d:THRESH_LEVEL' % (\n i+1), 'type': 'numeric', 'value': 0},\n {'path': '.CHANNEL_%d:THRESH_SAMPL' % (\n i+1), 'type': 'numeric', 'value': 0},\n {'path': '.CHANNEL_%d:DATA' % (i+1), 'type': 'signal'},\n {'path': '.CHANNEL_%d:SEG_RAW' % (i+1), 'type': 'signal'},\n ])\n del(i)\n parts.extend([\n {'path': ':INIT_ACTION', 'type': 'action',\n 'valueExpr': \"Action(Dispatch('PXI_SERVER','INIT',50,None),Method(None,'init',head))\",\n 'options': ('no_write_shot',)},\n {'path': ':START_ACTION', 'type': 'action',\n 'valueExpr': \"Action(Dispatch('PXI_SERVER','STORE',50,None),Method(None,'start_store',head))\",\n 'options': ('no_write_shot',)},\n {'path': ':STOP_ACTION', 'type': 'action',\n 'valueExpr': \"Action(Dispatch('PXI_SERVER','STORE',50,None),Method(None,'stop_store',head))\",\n 'options': ('no_write_shot',)},\n {'path': ':NUM_CHANNELS', 'type': 'numeric', 'value': 0},\n ])\n\n cvV1718 = 0 # CAEN V1718 USB-VME bridge\n cvV2718 = 1 # V2718 PCI-VME bridge with optical link\n cvA2818 = 2 # PCI board with optical link\n cvA2719 = 3 # Optical link piggy-back\n cvA32_S_DATA = 0x0D # A32 supervisory data access\n\n cvD32 = 0x04 # D32\n cvD64 = 0x08\n\n MEM_512kS = 524288\n MEM_4MS = 4194304\n InternalFrequency = 100E6\n\n HANDLE_RESTORE = 1\n HANDLE_OPEN = 2\n\n caenLib = None\n caenInterfaceLib = None\n\n caenHandles = {}\n caenCvs = {}\n caenReadCvs = {}\n caenWorkers = {}\n caenNids = {}\n\n# Support Class for IRQ Wait\n class IRQWait(Thread):\n def configure(self, handle, cv, readCv):\n self.handle = handle\n self.cv = cv\n self.readCv = readCv\n\n def run(self):\n while 0 == 0:\n self.readCv.acquire()\n self.readCv.wait()\n self.readCv.release()\n #print 'waiting IRQ'\n CAENDT5724.caenLib.CAENVME_IRQWait(\n self.handle, c_long(0x01), c_long(1000000))\n #print 'IRQ Received'\n self.cv.acquire()\n self.cv.notify()\n self.cv.release()\n # end class IRQWait\n\n# Support class for continuous store\n class AsynchStore(Thread):\n\n cvV1718 = 0 # CAEN V1718 USB-VME bridge\n cvV2718 = 1 # V2718 PCI-VME bridge with optical link\n cvA2818 = 2 # PCI board with optical link\n cvA2719 = 3 # Optical link piggy-back\n cvA32_S_DATA = 0x0D # A32 supervisory data access\n cvD32 = 0x04 # D32\n cvD64 = 0x08\n\n # def configure(self, handle, acqMode, startIdx, endIdx, pts, actChans, nActChans, dt, trigTime, triggerSourceNid, segmentSamples, segmentSize, chanMask, nid, device, cv, readCv, useCounter, irqEvents):\n def configure(self, handle, acqMode, startIdx, endIdx, pts, actChans, nActChans, dt, triggerSourceNid, segmentSamples, segmentSize, chanMask, nid, device, cv, readCv, useCounter, irqEvents):\n\n self.handle = handle\n self.startIdx = startIdx\n self.endIdx = endIdx\n self.acqMode = acqMode\n self.pts = pts\n self.actChans = actChans\n self.nActChans = nActChans\n self.dt = dt\n \"\"\"\n self.trigTime = trigTime\n \"\"\"\n self.segmentSamples = segmentSamples\n self.segmentSize = segmentSize\n self.chanMask = chanMask\n self.nid = nid\n self.device = device\n self.cv = cv\n self.readCv = readCv\n self.useCounter = useCounter\n self.irqEvents = irqEvents\n self.triggerSourceNid = triggerSourceNid\n self.saveList = c_void_p(0)\n\n def run(self):\n\n class DT5720Data(Structure):\n _fields_ = [(\"eventSize\", c_int), (\"boardGroup\", c_int), (\"counter\", c_int), (\n \"time\", c_int), (\"data\", c_short * (self.segmentSamples * self.nActChans))]\n\n treePtr = c_void_p(0)\n status = CAENDT5724.caenInterfaceLib.openTree(c_char_p(\n self.device.getTree().name), c_int(self.device.getTree().shot), byref(treePtr))\n\n CAENDT5724.caenInterfaceLib.startSave(byref(self.saveList))\n\n vmeAddress = 0\n\n #currStartIdx = self.segmentSamples - self.pts + self.startIdx\n #currEndIdx = self.segmentSamples - self.pts + self.endIdx\n #currChanSamples = currEndIdx - currStartIdx\n numChannels = self.device.num_channels.data()\n clockNid = self.device.clock_source.getNid()\n triggNid = self.device.trig_source.getNid()\n numTrigger = 0\n\n channels = []\n chanNid = []\n\n if self.acqMode == \"TRANSIENT RECORDER\":\n numTrigger = len(self.device.trig_source.getData())\n else:\n # continuous\n numTrigger = -1\n\n for chan in range(0, numChannels):\n channels.append([])\n chanNid.append(\n getattr(self.device, 'channel_%d_seg_raw' % (chan+1)).getNid())\n\n chanNid_c = (c_int * len(chanNid))(*chanNid)\n\n #currSegmentIdx = 0\n segmentCounter = 0\n self.dtArray = []\n\n while not self.stopReq:\n self.readCv.acquire()\n self.readCv.notify()\n self.readCv.release()\n self.cv.acquire()\n #print 'WAIT CONDITION'\n self.cv.wait()\n self.cv.release()\n #print 'CONDITION ISSUED'\n\n # Read number of buffers\n actSegments = c_int(0)\n status = CAENDT5724.caenLib.CAENVME_ReadCycle(self.handle, c_int(\n vmeAddress + 0x812C), byref(actSegments), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n print ('Error reading number of acquired segments')\n continue\n\n segmentCounter = CAENDT5724.caenInterfaceLib.readAndSaveSegments(self.handle, c_int(vmeAddress), c_int(numChannels), c_int(self.nActChans), c_int(self.segmentSamples), c_int(self.segmentSize),\n c_int(self.startIdx), c_int(self.endIdx), c_int(self.pts), c_int(\n self.useCounter), c_int(self.chanMask), c_int(segmentCounter),\n c_int(numTrigger), chanNid_c, clockNid, triggNid, treePtr, self.saveList)\n\n if self.acqMode == \"TRANSIENT RECORDER\" and segmentCounter == numTrigger:\n print('Transient Recoder acquisition completed!!!!')\n break\n\n if self.stopReq:\n print('ASYNCH STORE EXITED!!!!')\n break\n status = CAENDT5724.caenLib.CAENVME_IRQEnable(\n self.handle, c_int(0x01))\n # endwhile self.stopReq == 0:\n\n def stop(self):\n\n self.stopReq = True\n self.cv.acquire()\n self.cv.notify()\n self.cv.release()\n\n # need to wait a while\n sleep(0.5)\n\n CAENDT5724.caenInterfaceLib.stopSave(self.saveList)\n self.saveList = c_void_p(0)\n\n # end class AsynchStore\n\n def saveInfo(self):\n\n # CAENDT5724.caenNids\n CAENDT5724.caenHandles[self.getNid()] = self.handle\n CAENDT5724.caenCvs[self.getNid()] = self.cv\n CAENDT5724.caenReadCvs[self.getNid()] = self.readCv\n # If worker is running stop it\n # Worker is saved by saveWorker\n try:\n CAENDT5724.caenWorkers[self.getNid()].stop()\n CAENDT5724.caenWorkers[self.getNid()].stopReq = True\n except:\n pass\n\n def restoreInfo(self):\n #global caenHandles\n #global caenCvs\n #global caenWorkers\n #global nids\n\n if CAENDT5724.caenLib is None:\n try:\n CAENDT5724.caenLib = CDLL(\"libCAENVME.so\")\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Cannot open DT5724 Device - Error loading library libCAENVME.so')\n raise mdsExceptions.DevPY_INTERFACE_LIBRARY_NOT_FOUND\n\n if CAENDT5724.caenInterfaceLib is None:\n try:\n CAENDT5724.caenInterfaceLib = CDLL(\"libCaenInterface.so\")\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Cannot open DT5724 Device - Error loading library libCAENVME.so')\n raise mdsExceptions.DevPY_INTERFACE_LIBRARY_NOT_FOUND\n\n try:\n #idx = caenNids.index(self.getNid())\n self.handle = CAENDT5724.caenHandles[self.getNid()]\n self.cv = CAENDT5724.caenCvs[self.getNid()]\n self.readCv = CAENDT5724.caenReadCvs[self.getNid()]\n #self.worker = CAENDT5724.caenWorkers[self.getNid()]\n return self.HANDLE_RESTORE\n except:\n try:\n boardId = self.board_id.data()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Cannot open DT5724 Device - invalid board id')\n raise mdsExceptions.DevBAD_NAME\n self.handle = c_long(0)\n print ('HANDLE NOT FOUND INITIALIZE CAEN MODULE')\n #status = caenLib.CAENVME_Init(c_int(self.cvV2718), c_int(0), c_int(boardId), byref(self.handle))\n # Device VMEDevice (V3718 card ) is 0, BOARID is istead VMELink from 0 to 3 for the V3718 4 link card\n status = CAENDT5724.caenLib.CAENVME_Init(\n c_int(self.cvV2718), c_int(boardId), c_int(0), byref(self.handle))\n if status != 0:\n print ('Error initializing CAENVME')\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Cannot open DT5724 Device')\n raise mdsExceptions.DevCANNOT_LOAD_SETTINGS\n\n self.cv = Condition()\n self.readCv = Condition()\n IRQw = self.IRQWait()\n IRQw.daemon = True\n IRQw.configure(self.handle, self.cv, self.readCv)\n IRQw.start()\n return self.HANDLE_OPEN\n\n\n# Worker Management\n def saveWorker(self):\n CAENDT5724.caenWorkers[self.getNid()] = self.worker\n\n def restoreWorker(self):\n try:\n if self.getNid() in CAENDT5724.caenWorkers.keys():\n self.worker = CAENDT5724.caenWorkers[self.getNid()]\n except:\n print('Cannot restore worker!!')\n\n\n################################# INIT ###############################\n def init(self):\n self.restoreInfo()\n\n vmeAddress = 0\n\n # Module Reset\n data = c_int(0)\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0xEF24), byref(data), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error resetting DT5724 Device')\n raise mdsExceptions.DevCOMM_ERROR\n\n # give some time\n sleep(0.1)\n\n # Module type\n devType = c_int(0)\n status = CAENDT5724.caenLib.CAENVME_ReadCycle(self.handle, c_int(\n vmeAddress + 0x8140), byref(devType), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error reading board info')\n raise mdsExceptions.DevCOMM_ERROR\n\n if (devType.value & 0x000000FF) != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Invalid board type. Device must be DT5724 model')\n raise mdsExceptions.DevCOMM_ERROR\n\n if (devType.value & 0x0000FF00) >> 8 == 0x01:\n self.chanMemory = self.MEM_512kS\n else:\n self.chanMemory = self.MEM_4MS\n\n print ('Channel Memory: ', self.chanMemory)\n\n numChannels = devType.value >> 16\n print ('DevType code: ', devType.value)\n print ('NUM CHANNELS: ', numChannels)\n print ('Channel Memory: ', self.chanMemory)\n self.num_channels.putData(numChannels)\n\n \"\"\"\n print \"write decimation factor. Not Yet implemented\"\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8044), byref(c_int(0x2)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing decimation' )\n raise mdsExceptions.DevCOMM_ERROR\n \"\"\"\n\n # Number of segments\n segmentDict = {1: 0, 2: 1, 4: 2, 8: 3, 16: 4,\n 32: 5, 64: 6, 128: 7, 256: 8, 512: 9, 1024: 10}\n try:\n nSegments = self.num_segments.data()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Invalid Number of Segments')\n raise mdsExceptions.DevBAD_PARAMETER\n segmentCode = segmentDict[nSegments]\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x800c), byref(c_int(segmentCode)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n #print \"Buffer Organization 0x800C \", segmentCode\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error writing number of segments')\n raise mdsExceptions.DevCOMM_ERROR\n\n # Global Channel Configuration\n trigModeDict = {'OVER THRESHOLD': 0, 'UNDER THRESHOLD': 1}\n try:\n trigMode = self.trig_mode.data()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Invalid Trigger mode')\n raise mdsExceptions.DevBAD_MODE\n trigModeCode = trigModeDict[trigMode]\n conf = trigModeCode << 6\n conf = conf | 0x00000010\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x8000), byref(c_int(conf)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error writing group configuration')\n raise mdsExceptions.DevCOMM_ERROR\n\n # Channel configurations\n trigEnableCode = 0\n chanEnableCode = 0\n enabledDict = {'ENABLED': 1, 'DISABLED': 0}\n numChannels = self.num_channels.data()\n for chan in range(0, numChannels):\n\n # Empy the node which will contain the segmented data\n getattr(self, 'channel_%d_seg_raw' % (chan+1)).deleteData()\n\n # Set threshold level\n threshold = getattr(\n self, 'channel_%d_thresh_level' % (chan+1)).data()\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x1080 + chan * 0x100), byref(c_int(threshold)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error writing threshold level')\n raise mdsExceptions.DevCOMM_ERROR\n\n # Set threshold samples\n threshSamples = getattr(\n self, 'channel_%d_thresh_sampl' % (chan+1)).data()\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x1084 + chan * 0x100), byref(c_int(threshSamples)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error writing threshold samples')\n raise mdsExceptions.DevCOMM_ERROR\n\n # Read FIRMWARE info\n \"\"\"\n firmware = c_uint(0)\n status = CAENDT5724.caenLib.CAENVME_ReadCycle(self.handle, c_int(vmeAddress + 0x108C + chan * 0x100), byref(firmware), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n print \"firmware AMC FPGA Addr \", hex(vmeAddress + 0x108C + chan * 0x100), hex((firmware.value >> 16) & 0x0000ffff), \" Version \", hex((firmware.value >> 8) & 0x000000ff), \".\", hex((firmware.value ) & 0x000000ff)\n \"\"\"\n dac_offset = getattr(\n self, 'channel_%d_dac_offset' % (chan+1)).data()\n\n # Channel offset compensation\n try:\n offset = getattr(self, 'channel_%d_offset' % (chan+1)).data()\n except:\n offset = 0\n\n # Set offset\n offset = offset + dac_offset\n print ('Ch ', chan, 'Offset Volt = ', offset)\n if(offset > 1.125):\n offset = 1.125\n if(offset < -1.125):\n offset = -1.125\n offset = (offset / 1.125) * 32767\n print ('Ch ', chan, 'Offset Val. =', int(offset))\n\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x1098 + chan * 0x100), byref(c_int(int(offset + 0x08000))), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error writing DAC offset')\n raise mdsExceptions.DevCOMM_ERROR\n\n # Enable channel\n state = getattr(self, 'channel_%d_state' % (chan+1)).data()\n chanEnableCode = chanEnableCode | (enabledDict[state] << chan)\n\n # Enable Trigger\n trigState = getattr(self, 'channel_%d_trig_state' %\n (chan+1)).data()\n trigEnableCode = trigEnableCode | (enabledDict[trigState] << chan)\n\n # END channel configuration loop\n\n # Set channel enabled mask\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x8120), byref(c_int(chanEnableCode)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error writing Channel enable register')\n raise mdsExceptions.DevCOMM_ERROR\n\n # Set channel trigger mask\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x810C), byref(c_int(trigEnableCode)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error writing Channel trigger enable register')\n raise mdsExceptions.DevCOMM_ERROR\n\n # Set trigger enabling\n trigExt = self.trig_ext.data()\n trigEnableCode = trigEnableCode | (enabledDict[trigExt] << 30)\n trigSoft = self.trig_soft.data()\n trigEnableCode = trigEnableCode | (enabledDict[trigSoft] << 31)\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x810C), byref(c_int(trigEnableCode)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error writing trigger configuration')\n raise mdsExceptions.DevCOMM_ERROR\n\n # Front Panel trigger out setting set TRIG/CLK to TTL\n data = 1\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x811C), byref(c_int(data)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n\n # Configure trigger source\n \"\"\"\n try:\n trigSource = self.trig_source.data()\n #Trigger source must be an array, consider only the first element as triggerSource time\n if len(self.trig_source.getShape()) > 0:\n trigSource = trigSource[0]\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot resolve Trigger source')\n raise mdsExceptions.DevBAD_PARAMETER\n \"\"\"\n\n # Configure clock source\n # The clock source can be only INTERNAL\n clockMode = self.clock_mode.data()\n if clockMode == 'EXTERNAL':\n try:\n clockSource = self.clock_source()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Cannot resolve Clock source')\n raise mdsExceptions.DevBAD_PARAMETER\n else:\n clockSource = Range(None, None, Float64(1/self.InternalFrequency))\n self.clock_source.putData(clockSource)\n\n # Configure Post Trigger Samples\n try:\n pts = int(self.pts.data())\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Cannot resolve PTS Samples')\n raise mdsExceptions.DevBAD_PARAMETER\n segmentSize = self.chanMemory/nSegments\n if pts > segmentSize:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'PTS Larger than segmentSize')\n raise mdsExceptions.DevBAD_PARAMETER\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x8114), byref(c_int(pts >> 1)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n\n # Time management compute endIdx and startIdx\n useTime = self.use_time.data()\n if useTime == 'YES':\n try:\n # Start and End Index acquisition burst calculation is prfomend with trigger time set to 0\n trigSource = 0.\n startTime = self.start_time.data()\n endTime = self.end_time.data()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Cannot Read Start or End time')\n raise mdsExceptions.DevBAD_PARAMETER\n if endTime > 0:\n endIdx = Data.execute('x_to_i($1, $2)', Dimension(\n Window(0, segmentSize, trigSource), clockSource), Float64(endTime + trigSource))\n else:\n endIdx = -Data.execute('x_to_i($1,$2)', Dimension(\n Window(0, segmentSize, trigSource + endTime), clockSource), Float64(trigSource))\n self.end_idx.putData(Int32(int(endIdx + 0.5)))\n if startTime > 0:\n startIdx = Data.execute('x_to_i($1, $2)', Dimension(\n Window(0, segmentSize, trigSource), clockSource), startTime + trigSource)\n else:\n startIdx = -Data.execute('x_to_i($1,$2)', Dimension(\n Window(0, segmentSize, trigSource + startTime), clockSource), trigSource)\n self.start_idx.putData(Int32(int(startIdx + 0.5)))\n\n currStartIdx = int(segmentSize - pts + startIdx.data())\n if currStartIdx < 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Invalid segment size/pre-trigger samples')\n raise mdsExceptions.DevBAD_PARAMETER\n\n currEndIdx = int(segmentSize - pts + endIdx.data())\n if currEndIdx >= segmentSize:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Invalid segment size/post-trigger samples')\n raise mdsExceptions.DevBAD_PARAMETER\n\n print ('startIdx : ', int(startIdx))\n print ('endIdx : ', int(endIdx))\n print ('SEGMENT SIZE : ', int(segmentSize))\n print ('PTS : ', pts)\n print ('currStartIdx : ', currStartIdx)\n print ('currEndIdx : ', currEndIdx)\n\n acqMode = self.acq_mode.data()\n if acqMode == 'CONTINUOUS' or acqMode == 'CONTINUOUS WITH COUNTER' or acqMode == 'TRANSIENT RECORDER':\n irqEvents = self.irq_events.data()\n irqEvents = irqEvents - 1\n if irqEvents < 1:\n irqEvents = 1\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0xEF18), byref(c_int(irqEvents)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error setting IRQ events')\n raise mdsExceptions.DevCOMM_ERROR\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0xEF00), byref(c_int(0x09)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error setting IRQ line')\n raise mdsExceptions.DevCOMM_ERROR\n status = CAENDT5724.caenLib.CAENVME_IRQEnable(\n self.handle, c_int(0x01))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error Enabling IRQ')\n raise mdsExceptions.DevCOMM_ERROR\n # Start asynchronous readout thread\n # self.start_store()\n # endif acqMode == 'CONTINUOUS SAMPLING'\n\n self.saveInfo()\n\n\n################################ TRIGGER ###################################\n\n def trigger(self):\n\n if (self.restoreInfo() != self.HANDLE_RESTORE and self.worker.stopReq == True):\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'DT5724 Device not initialized')\n raise mdsExceptions.DevINV_SETUP\n\n try:\n vmeAddress = 0\n # Module SW trigger\n #data = c_int(0)\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x8108), byref(c_int(0)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error in sofware trigger DT5724 Device')\n raise mdsExceptions.DevCOMM_ERROR\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Generic SW trigger Error')\n raise mdsExceptions.DevCOMM_ERROR\n\n\n################################# START STORE ###############################\n def start_store(self):\n\n if (self.restoreInfo() != self.HANDLE_RESTORE):\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'DT5724 Device not initialized')\n raise mdsExceptions.DevINV_SETUP\n\n vmeAddress = 0\n\n # Module type\n devType = c_int(0)\n status = CAENDT5724.caenLib.CAENVME_ReadCycle(self.handle, c_int(\n vmeAddress + 0x8140), byref(devType), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error reading board info')\n raise mdsExceptions.DevCOMM_ERROR\n\n if (devType.value & 0x000000FF) != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Invalid board type. Device must be DT5724 model')\n raise mdsExceptions.DevCOMM_ERROR\n\n if (devType.value & 0x0000FF00) >> 8 == 0x01:\n self.chanMemory = self.MEM_512kS\n else:\n self.chanMemory = self.MEM_4MS\n\n try:\n clock = self.clock_source.evaluate()\n dt = clock.getDelta().data()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error evaluating clock source')\n raise mdsExceptions.DevBAD_PARAMETER\n try:\n triggerSourceNid = TreePath(self.trig_source.getFullPath())\n #trigTime = self.trig_source.data()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error evaluating trigger source')\n raise mdsExceptions.DevBAD_PARAMETER\n try:\n startIdx = self.start_idx.data()\n endIdx = self.end_idx.data()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error evaluating start or end idx')\n raise mdsExceptions.DevBAD_PARAMETER\n try:\n pts = self.pts.data()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error evaluating Post Trigger Samples')\n raise mdsExceptions.DevBAD_PARAMETER\n\n # Compute Segment Size\n try:\n nSegments = self.num_segments.data()\n segmentSamples = self.chanMemory/nSegments\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error reading max number of segments')\n raise mdsExceptions.DevBAD_PARAMETER\n #currStartIdx = segmentSamples - pts + startIdx\n #currEndIdx = segmentSamples - pts + endIdx\n\n # Get Active channels\n chanMask = c_int(0)\n status = CAENDT5724.caenLib.CAENVME_ReadCycle(self.handle, c_int(\n vmeAddress + 0x8120), byref(chanMask), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n nActChans = 0\n chanMask = chanMask.value\n numChannels = self.num_channels.data()\n for chan in range(0, numChannels):\n if (chanMask & (1 << chan)) != 0:\n nActChans = nActChans + 1\n if nActChans == 0:\n print ('No active groups')\n return\n segmentSize = 16 + 2 * segmentSamples * nActChans\n acqMode = self.acq_mode.data()\n\n for chan in range(0, numChannels):\n if (chanMask & (1 << chan)) != 0:\n try:\n dac_offset = getattr(\n self, 'channel_%d_dac_offset' % (chan+1)).data()\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error reading channel DAC offset')\n raise\n if acqMode == 'CONTINUOUS WITH COUNTER':\n useCounter = True\n data = Data.compile(\"2.25*($1 - 8192)/16384.+$2\", TreePath(\n getattr(self, 'channel_%d_seg_raw' % (chan+1)).getFullPath()), dac_offset)\n else:\n useCounter = False\n segRawPath = TreePath(\n getattr(self, 'channel_%d_seg_raw' % (chan+1)).getFullPath())\n data = Data.compile(\n \"(2.25*( $ - 8192)/16384. + $ )\", segRawPath, Float32(dac_offset))\n try:\n getattr(self, 'channel_%d_data' % (chan+1)).putData(data)\n except:\n Data.execute('DevLogErr($1,$2)',\n self.getNid(), 'Error Writing data')\n raise\n # endfor chan in range(0,numChannels):\n\n self.worker = self.AsynchStore()\n self.worker.daemon = True\n self.worker.stopReq = False\n\n #self.worker.configure(self.handle, acqMode, startIdx, endIdx, pts, chanMask, nActChans, dt, trigTime, triggerSourceNid, segmentSamples, segmentSize, chanMask, self.getNid(), self, self.cv, self.readCv, useCounter, self.irq_events.data() + 1)\n\n self.worker.configure(self.handle, acqMode, startIdx, endIdx, pts, chanMask, nActChans, dt, triggerSourceNid, segmentSamples,\n segmentSize, chanMask, self.getNid(), self, self.cv, self.readCv, useCounter, self.irq_events.data() + 1)\n\n try:\n runCommand = 4\n \"\"\"\n #External cllock not yet implemented\n if clockMode == 'EXTERNAL':\n runCommand = runCommand | 0x00000040\n \"\"\"\n # Module SW trigger\n data = c_int(0)\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x8100), byref(c_int(runCommand)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error starting acquisition on DT5724 Device')\n raise mdsExceptions.DevCOMM_ERROR\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(\n ), 'Cannot starting acquisition on DT5724 Device SW exception')\n raise mdsExceptions.DevCOMM_ERROR\n\n self.saveWorker()\n self.worker.start()\n \"\"\"\n try:\n if acqMode == 'TRANSIENT RECORDER':\n trigSoft = self.trig_soft.data()\n if trigSoft == 'ENABLED':\n trigSource = self.trig_source.data()\n t0 = trigSource[0]\n sleep(t0)\n print(\"SW Trigger \" + trigSource[0])\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8108), byref(c_int(0L)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error in sofware trigger DT5724 Device' )\n raise mdsExceptions.DevCOMM_ERROR\n\n if len(trigSource) == 1 :\n sleep( 1 )\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8108), byref(c_int(0L)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error in sofware trigger(1) DT5724 Device' )\n raise mdsExceptions.DevCOMM_ERROR\n\n for delay in trigSource[1 : ] :\n sleep( delay - t0 )\n t0 = delay\n print(\"SW Trigger \" + delay)\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8108), byref(c_int(0L)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error in sofware trigger DT5724 Device' )\n raise mdsExceptions.DevCOMM_ERROR\n except:\n Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot starting acquisition on DT5724 Device SW exception' )\n raise mdsExceptions.DevCOMM_ERROR\n \"\"\"\n\n\n#################################### STOP STORE ###################################\n def stop_store(self):\n\n if self.restoreInfo() != self.HANDLE_RESTORE:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'DT5724 Device not initialized')\n raise mdsExceptions.DevINV_SETUP\n\n vmeAddress = 0\n # Stop device\n status = CAENDT5724.caenLib.CAENVME_WriteCycle(self.handle, c_int(\n vmeAddress + 0x8100), byref(c_int(0)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))\n if status != 0:\n Data.execute('DevLogErr($1,$2)', self.getNid(),\n 'Error stopping device')\n raise mdsExceptions.DevCOMM_ERROR\n # need to wait a while\n sleep(0.5)\n\n self.restoreWorker()\n if self.worker.isAlive():\n print (\"PXI CAENDT5724 stop_worker\")\n self.worker.stop()\n del self.worker\n","sub_path":"pydevices/RfxDevices/CAENDT5724.py","file_name":"CAENDT5724.py","file_ext":"py","file_size_in_byte":38826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"253833972","text":"from gensim.models import word2vec\nfrom scipy import io\nimport numpy as np\nfrom bisect import bisect\nimport pickle\n\n\ndef spearman(array_x, array_y):\n N = len(array_x)\n\n return 1 - (6*sum((array_x - array_y)**2)) / (N**3 - N)\n\n\ndef create_rank_array(list_):\n rank_array = np.zeros(len(list_))\n sorted_list_ = sorted(list_)\n for i, element in enumerate(list_):\n rank_array[i] = len(list_) - bisect(sorted_list_, element) + 1\n\n return rank_array\n\n\ndef calc_spearman_data(data):\n human_list, word2vec_list = list(), list()\n for num_line, line in enumerate(data):\n if num_line == 0:\n continue\n cols = line.strip().split()\n human_list.append(cols[2])\n word2vec_list.append(cols[3])\n human_rank_array = create_rank_array(human_list)\n word2vec_rank_array = create_rank_array(word2vec_list)\n\n return spearman(human_rank_array, word2vec_rank_array)\n\n\nif __name__ == '__main__':\n with open('result/knock94_85_result.txt', 'r') as data_in_85, open('result/knock94_90_result.txt', 'r') as data_in_90:\n print('knock85_spearman : {}'.format(calc_spearman_data(data_in_85)))\n print('knock90_spearman : {}'.format(calc_spearman_data(data_in_90)))\n","sub_path":"Shi-ma/chapter10/knock95.py","file_name":"knock95.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"30980402","text":"class Solution(object):\n def findLengthOfLCIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n res = 0\n count = 0\n for i in xrange(0, len(nums)):\n if i == 0 or nums[i] > nums[i-1]:\n count += 1\n res = max(count, res)\n else:\n count = 1\n return res","sub_path":"674.py","file_name":"674.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"608429206","text":"\"\"\"\nCreated on Feb 15, 2011\n\n@author: Barry Maturkanich\n\nUtility methods used by the rest of the package.\nOnly one so far, a helper to convert a PIL image into\na Wx image.\n\"\"\"\n\nfrom StringIO import StringIO\n\nimport wx\n\ndef convertToWxImage(image):\n \"\"\"Convert the value (string or PIL image) to a wx image.\"\"\"\n\n if type(image) is str:\n # Load from stream\n return wx.ImageFromStream(StringIO(image))\n\n else:\n # Convert from PIL\n if 'A' in image.mode:\n if image.mode != 'RGBA':\n image = image.convert('RGBA')\n wxImage = wx.EmptyImage(*image.size)\n wxImage.SetData(image.convert('RGB').tostring())\n wxImage.SetAlphaData(image.tostring()[3::4])\n else:\n if image.mode != 'RGB':\n image = image.convert('RGB')\n wxImage = wx.EmptyImage(*image.size)\n wxImage.SetData(image.tostring())\n return wxImage","sub_path":"src/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"315084353","text":"# -*- coding: utf-8 -*-\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport cv2\nimport numpy as np\nimport StringIO\nimport datetime\nimport pytz\nimport angus\nimport angus_display as ad\nimport stats as st\n\n\ndef f(stream_index, width, height):\n\n camera = cv2.VideoCapture(stream_index)\n camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, int(width))\n camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, int(height))\n camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)\n\n if not camera.isOpened():\n print(\"Cannot open stream of index {}\".format(stream_index))\n exit(1)\n\n print(\"Video stream is of resolution {} x {}\".format(camera.get(3), camera.get(4)))\n\n stats = st.Stats(\"stats.json\")\n animation = []\n engaged = []\n\n conn = angus.connect()\n service = conn.services.get_service(\"scene_analysis\", version=1)\n service.enable_session()\n\n while camera.isOpened():\n ret, frame = camera.read()\n\n if not ret:\n break\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, buff = cv2.imencode(\".jpg\", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])\n buff = StringIO.StringIO(np.array(buff).tostring())\n\n t = datetime.datetime.now(pytz.utc)\n job = service.process({\"image\": buff,\n \"timestamp\" : t.isoformat(),\n \"camera_position\": \"facing\",\n \"sensitivity\": {\n \"appearance\": 0.7,\n \"disappearance\": 0.7,\n \"age_estimated\": 0.4,\n \"gender_estimated\": 0.5,\n \"focus_locked\": 0.9,\n \"emotion_detected\": 0.4,\n \"direction_estimated\": 0.8\n }\n })\n\n res = job.result\n\n events = res[\"events\"]\n entities = res[\"entities\"]\n\n for idx, h in entities.iteritems():\n pt = ad.displayAge(frame, idx, h, 0.50, 0.35)\n ch = ad.displayHair(frame, idx, h)\n ad.displayAvatar(frame, h, pt, ch)\n ad.displayEmotion(frame, h, pt)\n ad.displayGender(frame, h, pt)\n ad.displayGaze(frame, idx, h, pt, 0.50)\n\n panel = ((width - 180, 40), (width-20, height - 40))\n ad.blur(frame, panel[0], panel[1], (255, 255, 255), 2)\n ad.computeConversion(res, events, entities, engaged, stats, animation, 0.5, 500)\n ad.displayConversion(frame, stats, (width - 100, int(0.3*height)))\n ad.displayAnimation(frame, animation)\n ad.display_logo(frame, 20, height - 60)\n\n cv2.imshow('window', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n stats.save()\n break\n\n service.disable_session()\n\n camera.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n ### Web cam index might be different from 0 on your setup.\n ### To grab a given video file instead of the host computer cam, try:\n ### main(\"/path/to/myvideo.avi\")\n f(0, 640, 480)\n","sub_path":"demo_sceneanalysis/demo_sceneanalysis.py","file_name":"demo_sceneanalysis.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"99369068","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n# #step0: import\nimport pandas as pd\nimport json\nimport os\nimport csv\nfrom selenium import webdriver\nimport time\nfrom selenium.common.exceptions import NoSuchElementException\n\n# def isAlpha(word):\n# try:\n# return word.encode('ascii').isalpha()\n# except UnicodeEncodeError:\n# return False\n\ndef check_exists_by_xpath(xpath):\n try:\n driver.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return False\n return True\n\n\ndf = pd.DataFrame(columns=[\"Comment\", \"Comment_Star\"])\n\nurl = ((\"https://www.tripadvisor.com/Attraction_Review-g14129610-d4045009-Reviews\"\n \"-Namiyoke_Inari_Shrine-Tsukiji_Chuo_Tokyo_Tokyo_Prefecture_Kanto.html\"))\nresponse = urlopen(url)\nhtml = BeautifulSoup(response)\n\nen = html.find(class_=\"ui_header h1\")\nprint(en.text)\n\n# rating = html.find(class_=\"ui_bubble_rating\")\n# rank = html.find(class_=\"header_popularity\")\n# address = html.find(\"span\", class_=\"detail\")\n# type = html.find(\"div\", class_=\"detail\")\ntitle_ch = ''\ntitle_en = ''\n\n#Comment\n\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--headless\")\n\n\n# import the webdriver\ndriver = webdriver.Chrome(\"E:/pyETL/Tokyo/景點/tripadvisor/chromedriver\", options=options)\n# insert the tripadvisor's website of one attraction\ndriver.get(url)\n\n# Comment\nnum = 0\n# driver.get((\"https://www.tripadvisor.com.tw/Attraction_Review-g1066456-d2311984-Reviews-or\" + str(10) +\n# \"-Tokyo_Camii_Turkish_Culture_Center-Shibuya_Tokyo_Tokyo_Prefecture_Kanto.html\"))\n\ntry:\n end_page = driver.find_element_by_class_name(\"pageNumbers\").find_element_by_class_name(\"last\").text\n print(end_page)\nexcept:\n print(\"一頁而已87\")\n end_page = 1\n\nfor i in range(int(end_page)):\n url1 = url.split(\"-\")[0] + \"-or\" + str(num) + \"-\" + '-'.join(url.split(\"-\")[1:])\n print(url1)\n try:\n\n driver.get(url1)\n # driver.get((\"https://www.tripadvisor.com.tw/Attraction_Review-g1066454-d1626639-Reviews-or\" + str(num) +\n # \"-Jonanjima_SeasidePark-Ota_Tokyo_Tokyo_Prefecture_Kanto.html\"))\n # 10-Tokyo_Camii_Turkish_Culture_Center-Shibuya_Tokyo_Tokyo_Prefecture_Kanto.html\n # s = driver.find_element_by_class_name(\"pageNum last taLnk\")\n # print(s)\n # function to check if the button is on the page\n time.sleep(5)\n if (check_exists_by_xpath(\"//span[@class='taLnk ulBlueLinks']\")):\n # to expand the review\n for item in driver.find_elements_by_class_name(\n 'taLnk ulBlueLinks'): # driver.find_elements_by_xpath(\"//span[@class='taLnk ulBlueLinks']\"):\n item.click()\n time.sleep(2)\n\n container = driver.find_elements_by_xpath(\"//div[@class='review-container']\")\n\n\n # 評論星等\n # x = driver.find_elements_by_xpath(\"//div[@class='ui_column is-9']\")\n\n for j in container:\n x = j.find_elements_by_xpath(\"//div[@class='ui_column is-9']\")\n # print(x[1].find_element_by_class_name('ui_bubble_rating').get_attribute('class'))\n # for i in x:\n comment_star = (j.find_element_by_class_name('ui_bubble_rating').get_attribute('class'))\n comment = (j.text.split('\\n')[5:7])\n print(comment)\n print(comment_star)\n\n\n data = {\"Comment\": comment,\n \"Comment_Star\": comment_star\n }\n df = df.append(data, ignore_index=True)\n dn = \"tripadvisor_English/\"\n if not os.path.exists(dn):\n os.makedirs(dn)\n\n df.to_csv(dn + en.text + \".csv\", encoding=\"utf-8\", index=False)\n print(\"*\" * 50)\n\n # for j in container:\n # comment.append(j.text.split('\\n')[5:7])\n # print(j.text)\n #print(\"********************\")\n except:\n print(\"重跑\")\n # break\n num += 10\ndriver.close()\n#\n\n\n\n\n#存資料\n","sub_path":"NLP/情感分析/星等評論_4_English.py","file_name":"星等評論_4_English.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"345563517","text":"#Write a program that takes a list of numbers (for example, a = [5, 10, 15, 20, 25])\n#and returns a new list that contains all the elements of the first list minus all\n#the duplicates.\n\na = [5, 5, 15, 20, 20, 20, 10, 15, 20, 25, 25, 65]\nprint(a)\n\n'''z = input(\"Enter a list of numbers separated by spaces:\")\nprint(z)\nz_list = list(map(int,z.split(\" \")))\nprint(z_list)\n'''\n\nb = []\n\nfor element in a:\n if element in b:\n continue\n else:\n b.append(element)\n \nprint(b)\n\n","sub_path":"20160505_List_Remove_Duplicates.py","file_name":"20160505_List_Remove_Duplicates.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"488570305","text":"import alphaspace2 as al \nfrom alphaspace2.functions import _group \nfrom alphaspace2.Cluster import _Pocket \nfrom vs_pfm.docking.dock import DockPrep\nimport tempfile \nimport mdtraj \nfrom vs_pfm.general_functions import pickle_file, unpickle_file\nimport numpy as np \nimport scipy \nimport scipy.cluster.hierarchy as hier\nimport os \nimport copy \nfrom itertools import chain \nfrom vs_pfm.struct_anlys.as_patcher.View import write_snapshot\n\n\nclass AS(object):\n '''\n this object including three parts:\n 1. prepare receptor pdb file for alphaspace2, \\\n including add h, convert pdb to pdbqt, strip all Hs, \\\n 2. run alphaspace 2 (if lig specified, only map binding site)\n 3. record all alpha atoms, beta atoms and pocket relationship \\\n for further template-based pocket matching and pocket-based fragmentation. (optional step)\n '''\n\n def __init__(self,recep,lig=None,folder=None):\n '''\n param: recep: receptor pdb file \n param: folder: folder to store intermediate files, if not specified,\\\n save in temporary folder \n param: lig: ligand pdb file. If not specified, alphaspace2 \\\n will map entire protein. \n '''\n self.recep = recep \n self.lig = lig\n self.folder = folder \n\n \n def __prep_pdbqt(self):\n '''\n prepare pdbqt file for ligandability calculation. Note: adding hs using pdb2pqr also add missing AA atoms.\n Therefore, pdbqt file (without hs)\\\n can have different atom count as the input file. \n '''\n dock_obj = DockPrep(input_file=self.recep,input_type='receptor')\n dock_obj.add_h(self.folder + '/temp_h')\n dock_obj.prepare_pdbqt(self.folder+'/temp_h.pdb',self.folder + '/temp_h.pdbqt')\n self._pdb = self.folder + '/temp_h.pdb' \n self._pdbqt = self.folder + '/temp_h.pdbqt'\n \n \n def __remove_hs(self):\n '''\n remove hs for both pdb and pdbqt \n '''\n recep_lines = open(self._pdb,'r').readlines()\n recep_noh_lines = [l for l in recep_lines if (not l[11:17].strip().startswith('H')) and (l.startswith('ATOM '))]\n self._recep_noh = self.folder + '/recep_noh.pdb'\n recep_noh = open(self._recep_noh,'w')\n recep_noh.writelines(recep_noh_lines)\n recep_noh.close()\n\n recep_pdbqt_lines = open(self._pdbqt,'r').readlines()\n recep_pdbqt_noh_lines = [l for l in recep_pdbqt_lines if (not l[76:].strip().startswith('H')) and (l.startswith('ATOM '))]\n self._recep_noh_pdbqt = self.folder + '/recep_noh.pdbqt'\n recep_noh_pdbqt = open(self._recep_noh_pdbqt,'w')\n recep_noh_pdbqt.writelines(recep_pdbqt_noh_lines)\n recep_noh_pdbqt.close() \n\n \n def __run_as(self):\n '''\n run alphaspace2 \n '''\n self._recep_mdtraj = mdtraj.load(self._recep_noh)\n if self.lig:\n self._lig_mdtraj = mdtraj.load(self.lig)\n else:\n self._lig_mdtraj = None \n al.annotateVinaAtomTypes(pdbqt=self._recep_noh_pdbqt,receptor=self._recep_mdtraj)\n ss = al.Snapshot()\n ss.run(self._recep_mdtraj,binder=self._lig_mdtraj)\n self.ss = ss\n if not self._lig_mdtraj: \n self.ss._alpha_contact = [False] * len(self.ss._alpha_xyz)\n self.ss._beta_contact = [False] * len(self.ss._beta_xyz)\n self.ss._pocket_contact = [False] * len(self.ss._pocket_xyz)\n\n # prepare beta and alpha to pocket mapping for pocket based fragmentation \n self._beta_to_pocket_mapping = [None] * len(self.ss._beta_xyz)\n self._alpha_to_pocket_mapping = [None] * len(self.ss._alpha_xyz)\n for p in self.ss.pockets:\n for a_ind in p.alpha_index: \n self._alpha_to_pocket_mapping[a_ind] = p.index\n for b in p.betas:\n \n self._beta_to_pocket_mapping[b.index] = p.index \n\n def run(self):\n if not self.folder:\n with tempfile.TemporaryDirectory() as self.folder:\n self.__prep_pdbqt()\n self.__remove_hs()\n self.__run_as()\n else:\n self.__prep_pdbqt()\n self.__remove_hs()\n self.__run_as()\n\n\n\n def match_pocket_to_template(self, template_as):\n '''\n use template pocket name to define pocket on pdb of interest. beta atoms are assigned to closest template pockets\n if distance < 4.7, otherwise, beta atoms are labeled as unmatched and clustered all unmatched beta atoms based on\n pairwise distance using average linkage\n\n :param template_as: unpickled template alpha space obj\n :return:\n '''\n self.ss_matched = copy.copy(self.ss)\n self._beta_matched_to_pocket_mapping = [None] * len(self.ss._beta_xyz)\n self._beta_matched_status = [None] * len(self.ss._beta_xyz)\n\n\n # get pair-wise beta distance between template beta atoms and pdb beta atoms\n template_ss = template_as.ss \n template_pocket_beta_center = [None] * (len(template_ss._pocket_beta_index_list))\n \n for pock in template_ss.pockets: \n pock_beta_xyz = np.array([beta.xyz for beta in pock.betas]) \n pock_beta_center = np.mean(pock_beta_xyz,axis=0)\n template_pocket_beta_center[pock.index] = pock_beta_center\n \n template_pocket_beta_center = np.array(template_pocket_beta_center)\n \n dist = scipy.spatial.distance.cdist(self.ss._beta_xyz, template_pocket_beta_center)\n # assign pocket by distance to pocket center, the closest center is used for assignment\n unassigned_beta = []\n for i in range(0, len(dist)):\n closest_pocket_dist = np.min(dist[i])\n if closest_pocket_dist < 4.7:\n pocket_id = np.argmin(dist[i])\n self._beta_matched_to_pocket_mapping[i] = pocket_id\n self._beta_matched_status[i] = True\n else:\n unassigned_beta.append(i)\n self._beta_matched_status[i] = False\n\n\n\n # cluster unassigned beta atoms to get new pockets\n\n self.template_pocket_count = len(template_ss._pocket_beta_index_list)\n self.template_ss = template_ss\n unassigned_beta_coords = np.array([self.ss._beta_xyz[i] for i in unassigned_beta])\n if len(unassigned_beta_coords) == 1:\n cluster = [1] \n elif len(unassigned_beta_coords) == 0:\n cluster = [] \n else: \n zmat = hier.linkage(unassigned_beta_coords, method='average')\n cluster = hier.fcluster(zmat, 4.7,criterion='distance') #cluster index starts from 1 \n \n self.update_unmatched_pocket(unassigned_beta,cluster)\n\n self.update_pocket()\n\n def update_unmatched_pocket(self,unmatched_beta_ind_array,unmatched_beta_new_pocket_array):\n '''\n When a group of structures are matched to a template AS, all the unmatched beta atoms \n can be clustered again to form new pockets. The clustering results can be updated using this \n function.New pocket is indexed after all template pockets\n param: unmatched_beta_ind_array: array of unmatched beta atom indexes \n param: unmatached_beta_new_pocket_array: array of assigned new pocket id for unmatched beta atoms\n\n '''\n\n for ind, i in enumerate(unmatched_beta_ind_array):\n\n self._beta_matched_to_pocket_mapping[i] = self.template_pocket_count -1 + unmatched_beta_new_pocket_array[ind]\n self.ss_matched._pocket_beta_index_list = _group(self._beta_matched_to_pocket_mapping)\n\n self.update_alpha_to_pocket_based_on_beta()\n\n self.update_pocket()\n\n def update_alpha_to_pocket_based_on_beta(self):\n '''\n Beta to pocket mapping can change based on whether match to template or not\n This function is used to update alpha to pocket mapping info based on beta atom status\n :return:\n '''\n self.ss_matched._pocket_alpha_index_list = [None] * len(self.ss_matched._pocket_beta_index_list)\n self._alpha_matched_to_pocket_mapping = [None] * len(self.ss_matched._alpha_xyz)\n for ind, pock_beta in enumerate(self.ss_matched._pocket_beta_index_list): \n #for beta_ind in pock_beta:\n pock_alpha = list(chain(*[self.ss._beta_alpha_index_list[beta_ind] for beta_ind in pock_beta]))\n self.ss_matched._pocket_alpha_index_list[ind] = pock_alpha\n for alpha in pock_alpha:\n self._alpha_matched_to_pocket_mapping[alpha] = ind \n\n\n def update_pocket(self):\n '''\n Redefine pockets base on new pock_alpha_index_list. Update pocket_xyz,pocket_space,pocket_contact in ss_matched\n Label pocket matched status in self. \n '''\n\n # update pocket xyz \n self.ss_matched._pocket_xyz = [None] * len(self.ss_matched._pocket_beta_index_list)\n for i in range(len(self.ss_matched._pocket_beta_index_list)):\n p = _Pocket(self.ss_matched,pocketIndex=i)\n self.ss_matched._pocket_xyz[i] = p.centroid\n \n #update pocket contact status \n if self._lig_mdtraj:\n self.ss_matched.calculateContact(coords=self._lig_mdtraj.xyz[0] * 10)\n\n\n \n self.ss_matched._pocket_space = [None] * len(self.ss_matched._pocket_beta_index_list)\n #self.ss_matched._pocket_contact = [None] * len(self.ss_matched._pocket_beta_index_list)\n self.pocket_matched_status = [False] * len(self.ss_matched._pocket_beta_index_list)\n for p in self.ss_matched.pockets:\n pock_idx = p.index\n #self.ss_matched._pocket_xyz[pock_idx] = p.centroid\n self.ss_matched._pocket_space[pock_idx] = p.space\n #self.ss_matched._pocket_contact[pock_idx] = p.isContact\n if pock_idx < self.template_pocket_count: \n self.pocket_matched_status[pock_idx] = True \n \n\n def pickle(self,output_name,as_type='original'):\n output_dir = '/'.join(output_name.split('/')[0:-1])\n obj_name = output_name.split('/')[-1]\n #if as_type == 'original':\n # pickle_file(output_dir + '/' + obj_name,self.ss)\n #elif as_type == 'matched':\n # pickle_file(output_dir + '/' + obj_name,self.ss_matched)\n #else:\n # raise ValueError('unknow alphaspace ss type')\n pickle_file(output_dir + '/' + obj_name, self)\n\n def view(self,output_dir,as_type='original',contact_only=False):\n if as_type == 'original': \n #self.ss.save(output_dir,receptor=self._recep_mdtraj,\\\n # binder=self._lig_mdtraj,contact_only=contact_only)\n write_snapshot(output_dir, self.ss, receptor=self._recep_mdtraj,binder=self._lig_mdtraj,contact_only=contact_only)\n \n elif as_type == 'matched':\n template_pocket_order = [p.index for p in sorted([p for p in self.template_ss.pockets],key=lambda p:p.space,reverse=True)]\n new_pocket_order = [p.index for p in self.ss_matched.pockets if p.index >= self.template_pocket_count]\n pocket_order = template_pocket_order + new_pocket_order\n #print(pocket_order)\n #print(self.template_pocket_count)\n write_snapshot(output_dir,self.ss_matched,pocket_order=pocket_order,\\\n receptor=self._recep_mdtraj,binder=self._lig_mdtraj,contact_only=contact_only) \n\n else:\n raise ValueError('unknow alphaspace ss type')\n\n\ndef write_snapshot_by_order(folder_path, snapshot, pock_order, receptor=None, \\\n binder=None, chimera_scripts=True, contact_only=True):\n \n from alphaspace2.View import write_chimera_scripts,gen_pdb_line\n\n if os.path.isdir(os.path.join(folder_path, 'pockets')):\n shutil.rmtree(os.path.join(folder_path, 'pockets'))\n os.makedirs(os.path.join(folder_path, 'pockets'))\n\n\n if chimera_scripts:\n write_chimera_scripts(folder_path)\n\n if receptor or binder:\n if not os.path.isdir(os.path.join(folder_path, 'pdb_out')):\n os.makedirs(os.path.join(folder_path, 'pdb_out'))\n if receptor:\n receptor.save(os.path.join(folder_path, 'pdb_out', 'prot.pdb'))\n if binder:\n binder.save(os.path.join(folder_path, 'pdb_out', 'lig.pdb'))\n\n pocket_index = 0\n\n non_empty_pockets = sorted([p for p in snapshot.pockets if p.centroid],key=lambda p:p.index) # \n pocketes = [non_empty_pockets[p] for p in pock_order]\n\n if contact_only:\n pockets = [p for p in pockets in p.isContact]\n\n for pocket in pockets:\n pocket_index += 1\n if receptor:\n lining_atoms = receptor.atom_slice(pocket.lining_atoms_idx)\n lining_atoms.save(os.path.join(folder_path, 'pockets', '{}_alpha.pdb'.format(pocket_index)))\n lining_atoms.save(os.path.join(folder_path, 'pockets', '{}_beta.pdb'.format(pocket_index)))\n\n with open(os.path.join(folder_path, 'pockets', '{}_beta.pdb'.format(pocket_index)), 'a') as handle:\n for beta in pocket.betas:\n handle.write(gen_pdb_line(atomIndex=beta.index,\n atomName='BAO' if beta.isContact else 'BAU',\n resName='BAC',\n resIndex=pocket_index,\n chainName=\" \",\n bfactor=beta.score,\n element=beta.best_probe_type,\n xyz=beta.centroid,\n occupancy=\" \")\n )\n handle.write(gen_pdb_line(atomIndex=pocket_index,\n atomName='BCC',\n resName='BCC',\n resIndex=pocket_index,\n chainName=\" \",\n bfactor=pocket.score,\n element='C',\n xyz=pocket.centroid))\n\n with open(os.path.join(folder_path, 'pockets', '{}_alpha.pdb'.format(pocket_index)), 'a') as handle:\n for alpha in pocket.alphas:\n line = gen_pdb_line(atomIndex=alpha.index,\n atomName='AAO' if alpha.isContact else 'AAU',\n resName='AAC',\n resIndex=pocket_index,\n chainName=\" \",\n bfactor=alpha.space,\n occupancy=\" \",\n element='C',\n xyz=alpha.centroid,\n )\n handle.write(line)\n handle.write(gen_pdb_line(atomIndex=pocket_index,\n atomName='ACC',\n resName='ACC',\n resIndex=pocket_index,\n chainName=\" \",\n bfactor=pocket.score,\n element='C',\n xyz=pocket.centroid))\n \n\n\n\nclass assign_new_pock(object):\n '''\n For a group of structures whose pockets are matched to a template AS, the unmatched beta atoms \n are clustered to form new pockets that do not exist in the template structure.\n\n '''\n def __init__(self,as_list):\n '''\n param: as_list: a list of AS objects which are matched to a template AS.\n param: output_dir: folder to save updated as objects. Output as objects are saved as the same file name\n but can be in a different folder. if output_dir == None, updated as_obj is not writen, \n but stored in self.as_list \n\n '''\n self.as_list = as_list \n \n \n\n def all_unmatched_beta_clustering(self):\n all_unmatched_beta_coords = []\n all_unmatched_beta_ind = []\n all_unmatched_beta_obj_ind = []\n \n for ind, obj in enumerate(self.as_list):\n matched_mask = np.array(obj._beta_matched_status)\n unmatched_beta_ind = np.argwhere(matched_mask==False)\n unmatched_beta_ind = [q for t in unmatched_beta_ind for q in t]\n #print(len(unmatched_beta_ind))\n unmatched_beta_obj_ind = [ind] * len(unmatched_beta_ind)\n \n unmatched_beta_coords = np.array(obj.ss._beta_xyz)[~matched_mask]\n #print(unmatched_beta_coords.shape)\n all_unmatched_beta_coords.extend(unmatched_beta_coords)\n all_unmatched_beta_ind.extend(unmatched_beta_ind)\n all_unmatched_beta_obj_ind.extend(unmatched_beta_obj_ind)\n \n # clustering all unmatched beta atoms \n all_unmatched_beta_coords = np.array(all_unmatched_beta_coords)\n zmat = hier.linkage(all_unmatched_beta_coords, method='average')\n cluster = hier.fcluster(zmat,4.7,criterion='distance')\n \n\n unmatched_pocket_ind = list(set(cluster.tolist()))\n #print(unmatched_pocket_ind)\n #all_pocket_ind = matched_pocket_ind + unmatched_pocket_ind\n\n # update pocket id for unmatched beta atoms \n all_info = []\n for ind,obj in enumerate(self.as_list):\n mask = np.array(all_unmatched_beta_obj_ind) == ind \n #print(mask)\n beta_ind = np.array(all_unmatched_beta_ind)[mask]\n #print(beta_ind)\n pocket_ind = cluster[mask]\n #print(pocket_ind)\n obj.update_unmatched_pocket(beta_ind,pocket_ind)\n\n\n\n \n #def save_as_obj(self,output_dir,output_names):\n # '''\n # save updated as objects \n # '''\n # if len(output_names) != len(self.as_list):\n # raise ValueError('Saving as objects error: output name count is different from as objects count')\n # if not os.path.exists(output_dir):\n # os.makedirs(output_dir)\n # for ind, obj in enumerate(self.as_list):\n # obj_name = output_names[ind]\n # pickle_file(output_dir + '/' + obj_name,obj) \n\n\n\nclass Pocket_info(object):\n '''\n calculate pocket information\n 1. pocket total alpha space\n 2. pocket total beta space\n 3. pocket total ligandability\n 4. pocket percent polar surface area\n# 5. pocket log D (defined as sum of logP for each pocket residue divided by total number of residues in that pocket)\n '''\n\n def __init__(self,as_obj,pock_type='original', pock_list='all',lig=None):\n '''\n params as_obj: protein AS object\n params pock_type: pocket type ('original' or 'matched') to specify how pockets are defined.\n Original pockets are directly generated using AlphaSpace. \n Matched pockets are pockets renamed by matching to a template.\n params pock_list: list of interested pocket. if 'all', all pockets are analyzed\n '''\n self.as_obj = as_obj\n self.pock_type = pock_type\n self.pock_list = pock_list\n\n if self.pock_type == 'original':\n self.alpha_to_pocket_mapping = self.as_obj.alpha_to_pocket_mapping\n self.beta_to_pocket_mapping = self.as_obj.beta_to_pocket_mapping\n #self.protein_to_pocket_mapping = self.as_obj.protein_to_pocket_mapping\n elif self.pock_type == 'matched':\n self.alpha_to_pocket_mapping = self.as_obj.alpha_matched_to_pocket_mapping\n self.beta_to_pocket_mapping = self.as_obj.beta_matched_to_pocket_mapping\n #self.protein_to_pocket_mapping = self.as_obj.protein_matched_to_pocket_mapping\n else:\n print('Unknown pocket type')\n\n if pock_list == 'all':\n self.pock_ind = sorted(list(set(self.alpha_to_pocket_mapping)))\n\n else:\n self.pock_ind = pock_list\n \n self.lig = lig \n\n\n def calc_pock_ligandability_and_beta_space(self):\n '''\n get pocket space and ligandability info for different type of pocket definition\n :param type: beta or beta_matched. If beta, pocket is based on pocket defined by pdb of interested; \n if beta_matched, pocket is defined by template pockets for the ones can be matched and reclustering \n unmatched beta atoms within beta atoms of pdb of interests or beta atoms of all pdb structures\n :return:\n '''\n\n beta_space = np.array(self.as_obj.beta_space)\n beta_ligandability = np.array(self.as_obj.ligandability)\n\n self.pocket_beta_space = [None] * len(self.pock_ind)\n self.pocket_ligandability = [None] * len(self.pock_ind)\n for ind, i in enumerate(self.pock_ind):\n beta_mask = np.array(self.beta_to_pocket_mapping) == i\n space = np.sum(beta_space[beta_mask])\n ligandability = np.sum(beta_ligandability[beta_mask])\n self.pocket_beta_space[ind] = space\n self.pocket_ligandability[ind] = ligandability\n\n def calc_pock_alpha_space(self):\n '''\n Get pocket alpha space and score.\n Alpha space = sum of all alpha atom volume in a pocket\n #Alpha score = alpha space * percent non-polar SAS\n #Percent non-polar SAS = (sum of SAS for pocket lining protein atoms if atom type is not N, O, or SG) / total SAS for pocket lining protein atoms\n '''\n alpha_space = np.array(self.as_obj.alpha_space)\n\n self.pocket_space = [None] * len(self.pock_ind)\n #self.pocket_score = [None] * len(self.pock_ind)\n\n for ind, i in enumerate(self.pock_ind):\n alpha_mask = np.array(self.alpha_to_pocket_mapping) == i\n space = np.sum(alpha_space[alpha_mask])\n #score = (1 - self.pock_percent_polar[ind]) * space\n self.pocket_space[ind] = space\n #self.pocket_score[ind] = score\n\n def calc_pock_occupied_alpha_space(self):\n '''\n Get pocket occupied alpha space by ligand. Ligand can be entire ligand or fragments. \n '''\n if not self.lig:\n print('Ligand is not defined therefore ligand occupied pocket space is not calculated')\n return \n else:\n pass \n\n","sub_path":"vs_pfm/struct_anlys/run_al2.py","file_name":"run_al2.py","file_ext":"py","file_size_in_byte":22426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"285954109","text":"###################################################################\n#CS6824 Homework1\n#Zhen Guo\n#10/2/16\n#functions for Watts-Strogatz graphs\n###################################################################\n\nimport numpy as np\nimport networkx as nx\nimport pylab as plt\nimport math\nimport random\nimport time\n\n#WS-1 function to generate Watts Strogatz graph\ndef generate_watts_strogatz_graph(n, k, p):\n \"\"\"randomised ring graphs\"\"\"\n ringG = ring_graph(n, k)\n graphSW = rewire(ringG, n, k, p)\n return graphSW \n\ndef ring_graph(n, k):\n \"\"\"create undirected, regular ring graph with nk/2 edges\"\"\"\n graph = np.array([x[:] for x in [[0]*n]*n])\n G = nx.from_numpy_matrix(graph)\n for i in range(n):\n for j in range(i + 1, i + 1 + math.ceil(k/2)):\n G.add_edge(*(i, j % n))\n return G\n\ndef rewire(ringG, n, k, p):\n \"\"\"rewire every edge with probability p\"\"\"\n for i in range(math.ceil(k/2)):\n for j in range(n):\n flag = randflag(p)\n if flag == 1: #decide to rewire this edge\n ncnodes = non_connected_nodes(ringG, j)\n lennodes = len(ncnodes)\n newnode = ncnodes[random.randint(0, lennodes - 1)]\n ringG.remove_edge(*(j, (j + i + 1)% n))\n ringG.add_edge(*(j, newnode))\n return ringG\n\ndef randflag(p):\n \"\"\"determine if an edge will be rewired at the probability of p\"\"\"\n chance = random.random()\n if chance < p:\n return 1\n else:\n return 0\n\ndef non_connected_nodes(G, node):\n \"\"\"generate all nodes not connected to x\"\"\"\n nodelist = G.nodes()\n neighbor = G.neighbors(node)\n for nei in neighbor:\n nodelist.remove(nei)\n nodelist.remove(node)\n return nodelist\n\n#WS-2 average shortest path length and clustering coefficient\ndef average_shortest_path_length(G):\n \"\"\"connected graph only\"\"\"\n len = nx.average_shortest_path_length(G)\n return len\n\ndef clustering_coefficient(G):\n \"\"\"undirected graph only\"\"\"\n cluster = nx.clustering(G)\n coeff = sum(cluster.values()) / len(cluster)\n return coeff\n\n#WS-3 p values\ndef p_range(k):\n p = []\n x = math.pow(10, -4.0 / k)\n for i in range(k, 0, -1):\n p.append(math.pow(x, i))\n return p\n\n#WS-3 run test with different p values\ndef test_p_n(nlist, k, plist, f):\n runningtime = {}\n for n in nlist:\n time1 = time.time()\n f.write(\"n = %d, k = 20:\\n\" %n)\n for p in plist:\n G = generate_watts_strogatz_graph(n, k, p) \n lp = average_shortest_path_length(G)\n cp = clustering_coefficient(G)\n f.write(\"p = %.3e, l = %.2f, c = %.2f \\n\" %(p, lp, cp)) \n time2 = time.time()\n runningtime[n] = time2 - time1\n for n in nlist:\n f.write(\"Time used for running 20 times n = %d is: %.4fs \\n\" %(n, runningtime[n])) \n print(\"WS-3 p tests completed.\")\n print(\"Time used: %.4fs\" %sum(runningtime.values()))\n return \n\n\n\n#WS-4 generate graph and output\ndef SW_figure1_plot(n, k, prange):\n \"\"\"compute 20 p values with 10 runs, plot the mean and std of l and c\"\"\"\n time0 = time.time()\n p0 = 0\n p1 = 1\n #regular lattice\n G = generate_watts_strogatz_graph(n, k, p0) \n l0 = average_shortest_path_length(G)\n c0 = clustering_coefficient(G)\n\n #rewire the regular lattice\n lave = [] #l values for 20 p\n cave = [] #c values for 20 p\n for p in prange:\n llist = []\n clist = []\n for i in range(10): #run each p value 10 times\n G = generate_watts_strogatz_graph(n, k, p) \n l = average_shortest_path_length(G)\n c = clustering_coefficient(G)\n llist.append(l / l0)\n clist.append(c / c0)\n npllist = np.array(llist)\n npclist = np.array(clist)\n lave.append([npllist.mean(), npllist.std()])\n cave.append([npclist.mean(), npclist.std()])\n\n #random graph\n l1list = []\n c1list = []\n for i in range(10):\n Grand = generate_watts_strogatz_graph(n, k, p1)\n l1 = average_shortest_path_length(Grand)\n c1 = clustering_coefficient(Grand)\n l1list.append(l1 / l0)\n c1list.append(c1 / c0)\n npl1 = np.array(l1list)\n npc1 = np.array(c1list)\n l1ave = [npl1.mean(), npl1.std()]\n c1ave = [npc1.mean(), npc1.std()]\n\n #output results to file\n time1 = time.time()\n runningtime = time1 - time0\n path = 'results/WS4_n_{}.out'.format(n)\n f = open(path, 'w')\n f.write(\"p = 0, l0 = %.3f, c0 = %.3f \\n\" %(l0, c0))\n for p in prange:\n f.write(\"p = %.3e, l/l0 = %.3f %.3e, c/c0 = %.3f %.3e\\n\" \\\n %(p, lave[prange.index(p)][0],lave[prange.index(p)][1], \\\n cave[prange.index(p)][0], cave[prange.index(p)][1]))\n f.write(\"p = 1, l1/l0 = %.3f %.3e, c1/c0 = %.3f %.3e \\n\" \\\n %(l1ave[0], l1ave[1], c1ave[0], c1ave[1]))\n f.write(\"Time used: %.4fs\" %runningtime)\n f.close()\n\n #draw graph based on the result\n nplave = np.array(lave)\n npcave = np.array(cave)\n lave_mean = nplave[:,0]\n lave_std = nplave[:,1]\n cave_mean = npcave[:,0]\n cave_std = npcave[:,1]\n lave_mean = np.insert(lave_mean, 0, 1)\n lave_std = np.insert(lave_std, 0, 0)\n cave_mean = np.insert(cave_mean, 0, 1)\n cave_std = np.insert(cave_std, 0, 0)\n lave_mean = np.append(lave_mean, l1ave[0])\n lave_std = np.append(lave_std, l1ave[1])\n cave_mean = np.append(cave_mean, c1ave[0])\n cave_std = np.append(cave_std, c1ave[1])\n\n ax = plt.subplot()\n ax.set_xscale(\"log\", nonposx='clip')\n npp = np.array(prange)\n npp = np.insert(npp, 0, 0.00008)\n npp = np.append(npp, 1)\n plt.ylim([0, 1.05])\n plt.xlim([0.00008, 1])\n plt.errorbar(npp, lave_mean, yerr = lave_std, linestyle = 'None', marker = 's', markersize = 4, label = 'l(p)/l(0)')\n plt.errorbar(npp, cave_mean, yerr = cave_std, linestyle = 'None', marker = 'o', markersize = 4, label = 'c(p)/c(0)')\n title = 'n = {}, k = {}'.format(n, k)\n ax.set_title(title)\n plt.legend()\n plt.xlabel('p')\n path = 'results/WS4_n_{}.png'.format(n)\n plt.savefig(path) # save as png\n plt.clf()\n print(\"WS4 n={}, k=20 plot finished.\".format(n))\n print(\"Time used: %.4fs\" %runningtime)\n return","sub_path":"WSGraph.py","file_name":"WSGraph.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"271056623","text":"import numpy\r\nboard=numpy.array([['-','-','-'],['-','-','-'],['-','-','-']])\r\npl_1 = \"X\"\r\npl_2 = \"O\"\r\nprint(\"Player_1 is X\\nPlayer_2 is O\")\r\n\r\ndef check_rows(symbol):\r\n for r in range(3):\r\n count = 0\r\n for c in range(3):\r\n if board[r][c] == symbol:\r\n count += 1\r\n if count == 3:\r\n return True \r\n return False\r\n\r\ndef check_columns(symbol):\r\n for c in range(3):\r\n count = 0\r\n for r in range(3):\r\n if board[r][c] == symbol:\r\n count += 1\r\n if count == 3:\r\n return True \r\n return False\r\n\r\ndef check_diagonal(symbol):\r\n count = 0\r\n for r in range(3):\r\n for c in range(3):\r\n if board[r][c] == symbol and r==c :\r\n count += 1\r\n if count == 3:\r\n return True\r\n if board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[1][1] == symbol:\r\n return True\r\n return False \r\n \r\n\r\n\r\ndef won(symbol):\r\n return check_rows(symbol) or check_columns(symbol) or check_diagonal(symbol)\r\n\r\ndef place(symbol):\r\n print(numpy.matrix(board))\r\n while(1):\r\n row = int(input(\"Enter no. of rows 1,2 or 3: \"))\r\n cols= int(input(\"Enter no. of coloumn 1 ,2 or 3: \"))\r\n if row>0 and row <4 and cols >0 and cols <4 and board[row-1][cols-1] == '-':\r\n break\r\n else:\r\n print('Invalid Input Or ALREADY FILLED \\n Try again')\r\n board[row-1][cols-1] = symbol \r\n\r\ndef play():\r\n for turn in range(9):\r\n if turn%2 == 0:\r\n print(\"X turns\")\r\n place(pl_1)\r\n if won(pl_1):\r\n print(\"palyer 1 has won the game\")\r\n break\r\n else:\r\n print(\"O turns\")\r\n place(pl_2)\r\n if won(pl_2):\r\n print(\"palyer 2 has won the game\")\r\n break \r\n if not(won(pl_1)) and not(won(pl_2)):\r\n print(numpy.matrix(board))\r\n print(\"Draw\") \r\nplay()\r\n \r\n","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"260201628","text":"from conans import ConanFile, CMake, tools\nimport os\nimport textwrap\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass Ezc3dConan(ConanFile):\n name = \"ezc3d\"\n description = \"EZC3D is an easy to use reader, modifier and writer for C3D format files.\"\n license = \"MIT\"\n topics = (\"conan\", \"ezc3d\", \"c3d\")\n homepage = \"https://github.com/pyomeca/ezc3d\"\n url = \"https://github.com/conan-io/conan-center-index\"\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def validate(self):\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, 11)\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _patch_sources(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n # don't force PIC\n tools.replace_in_file(os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n \"set(CMAKE_POSITION_INDEPENDENT_CODE ON)\", \"\")\n # fix install\n tools.replace_in_file(os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n \"set(${PROJECT_NAME}_LIB_FOLDER Lib)\",\n \"set(${PROJECT_NAME}_LIB_FOLDER lib)\")\n tools.replace_in_file(os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n \"set(${PROJECT_NAME}_LIB_FOLDER lib/${PROJECT_NAME})\",\n \"set(${PROJECT_NAME}_LIB_FOLDER lib)\")\n tools.replace_in_file(os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n \"set(${PROJECT_NAME}_BIN_FOLDER lib/${PROJECT_NAME})\",\n \"set(${PROJECT_NAME}_BIN_FOLDER bin)\")\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"USE_MATRIX_FAST_ACCESSOR\"] = True\n self._cmake.definitions[\"BINDER_PYTHON3\"] = False\n self._cmake.definitions[\"BINDER_MATLAB\"] = False\n self._cmake.definitions[\"BUILD_EXAMPLE\"] = False\n self._cmake.definitions[\"BUILD_DOC\"] = False\n self._cmake.definitions[\"GET_OFFICIAL_DOCUMENTATION\"] = False\n self._cmake.definitions[\"BUILD_TESTS\"] = False\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"ezc3d\": \"ezc3d::ezc3d\"}\n )\n\n @staticmethod\n def _create_cmake_module_alias_targets(module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\".format(alias=alias, aliased=aliased))\n tools.save(module_file, content)\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(self._module_subfolder,\n \"conan-official-{}-targets.cmake\".format(self.name))\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"ezc3d\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"ezc3d\"\n self.cpp_info.builddirs.append(self._module_subfolder)\n self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"ezc3d\"))\n lib_suffix = {\"Debug\": \"_debug\"}.get(str(self.settings.build_type), \"\")\n self.cpp_info.libs = [\"ezc3d\" + lib_suffix]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs = [\"m\"]\n","sub_path":"recipes/ezc3d/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"62419450","text":"import sys\nfrom gio.TokenIterator import mkTokenIterator\nfrom trees.Trees import *\nfrom Tokens import *\nimport Keywords\nfrom Position import Position\nfrom reporters.Message import Message\n\n\n# All rules should set themselfs up to progross to next token if \n# sucessful\n# All rules are optional. If not, name as ''Fix'\n# We've got problems:\n# - Identifying by name alone does not split between '+' (monop) and\n# '+' (binop). Is this kind of issue what full names are for? It's\n# making the monop scene a mess. This, essentially, is the problem of \n# polymorphism, arrising early.\n# - chaining for the interpreter means signalling the last item, not \n# the first (chain the trailing return into this). But I recall that\n# we chained the first op for a reason? Reversal?\n# - chaining feels compromised? Why have parameters if reaching for\n# the next item in line?\ndef isInfix(name):\n return ((name[-1] == '=') or (name in Keywords.INFIX))\n \n \n \nclass Syntaxer:\n '''\n Generates a tree holding the structure of tokens.\n The sole purpose of this class is to extract and organise data from \n the token stream. Unlike most other parsers it is not concerned with\n names, ''symbols', or anything else. \n '''\n def __init__(self, source, reporter):\n self.source = source\n self.reporter = reporter\n #self.it = source.tokenIterator(reporter)\n self.it = mkTokenIterator(source, reporter)\n self.tok = None\n self.ast = []\n self.chainedItem = None\n # start me up\n self.root()\n #print(self.ast.toString())\n \n \n ## reporter helpers \n def position(self):\n return Position(self.source, self.it.lineCount, self.it.lineOffset)\n \n def error(self, msg):\n tokenTxt = self.it.textOf()\n msg = Message.withPos(msg, self.source, self.position())\n if (tokenTxt):\n msg.details = [\"token text : '{0}'\".format(tokenTxt)]\n self.reporter.error(msg)\n sys.exit(\"Error message\")\n\n def expectedTokenError(self, ruleName, tok):\n self.error(\"In rule '{0}' expected token '{1}' but found '{2}'\".format(\n ruleName,\n tokenToString[tok],\n tokenToString[self.tok]\n ))\n\n def expectedRuleError(self, currentRule, expectedRule):\n self.error(\"In rule '{0}' expected rule '{1}'. Current token: '{2}'\".format(\n currentRule,\n expectedRule,\n tokenToString[self.tok]\n ))\n \n ## iterators\n def textOf(self):\n return self.it.textOf()\n \n def _next(self):\n #self.prevLine = self.it.lineCount()\n #self.prevOffset = self.it.lineOffset()\n self.tok = self.it.__next__()\n\n\n ## Token helpers\n def isToken(self, token):\n return (token == self.tok)\n\n #def option(self, token, action, b):\n #if (token == self.tok):\n #action(b)\n \n def getTokenOrError(self, ruleName, token):\n if(self.tok != token):\n self.expectedTokenError(ruleName, token)\n t = self.textOf()\n self._next()\n return t\n\n def skipTokenOrError(self, ruleName, token):\n if(self.tok != token):\n self.expectedTokenError(ruleName, token)\n self._next()\n \n def skipToken(self, token):\n r = False\n if (token != self.tok):\n r = True\n self._next()\n return r\n\n def optionallySkipToken(self, token):\n '''\n Optionally skip a token.\n If skips, returns True.\n ''' \n r = (token == self.tok)\n if (r):\n self._next()\n return r\n \n \n ## Rule helpers\n #! enable\n def zeroOrMoreDelimited(self, lst, rule, endToken):\n '''\n Often easier and more human for list rules to match the \n delimiter than to keep checking if contained rules match.\n Skips the delimiting token.\n '''\n while(not self.isToken(endToken)):\n rule(lst)\n self._next()\n\n def oneOrMoreDelimited(self, ruleFixed, endToken):\n '''\n Often easier and more human for list rules to match the \n delimiter than to keep checking if contained rules match.\n Skips the delimiting token.\n @rule nust be non-optional 'fixed' (throws error)\n '''\n count = 0\n while(True):\n ruleFixed()\n count += 1\n if (self.isToken(endToken)):\n break\n #print(\"count {}\".format(count))\n #! no?\n self._next()\n return count\n \n def oneOrError(self, ruleOption, currentRuleName, expectedRuleName):\n '''\n Match one rule or mark an error.\n '''\n if(not ruleOption()):\n self.expectedRuleError(currentRuleName, expectedRuleName)\n \n ## Rules\n def optionalKindAnnotation(self, tree):\n '''\n option(':' ~ Kind)\n Optionally match a Kind annotation.\n '''\n coloned = self.optionallySkipToken(COLON)\n if (coloned):\n #tree.kindStr = self.getTokenOrError('Kind Annotation', IDENTIFIER) \n tree.parsedKind = self.getTokenOrError('Kind Annotation', IDENTIFIER) \n # add contents\n #self.optionalGenericParams(k)\n return coloned\n \n #? nmelessData?\n def dataNameless(self):\n '''\n (IntNum | FloatNum | String) ~ option(KindAnnotation)\n '''\n commit = (\n self.isToken(INT_NUM) or \n self.isToken(FLOAT_NUM) or \n self.isToken(STRING) or \n self.isToken(MULTILINE_STRING)\n )\n if (commit):\n t = None\n if (self.isToken(INT_NUM)):\n t = mkIntegerData(self.position(), self.textOf()) \n if (self.isToken(FLOAT_NUM)):\n t = mkFloatData(self.position(), self.textOf()) \n if (self.isToken(STRING)):\n t = mkStringData(self.position(), self.textOf()) \n if (self.isToken(MULTILINE_STRING)):\n t = mkStringData(self.position(), self.textOf()) \n self.ast.append(t)\n self._next()\n self.optionalKindAnnotation(t)\n return commit\n \n \n # lot like a simple call\n # and a parameter\n def identifierOptionalKind(self, lst):\n '''\n identifier ~ Option(Kind)\n Optional type declaration.\n Succeed or error\n '''\n # id\n markStr = self.getTokenOrError('Optional Kinded Identifier', IDENTIFIER) \n t = mkParameterDefinition(self.position(), markStr)\n # delimit\n self.optionalKindAnnotation(t)\n return True\n \n # def defineParameter(self, lst):\n # '''\n # identifier ~ ':' ~ Kind\n # Enforced type declaration.\n # Succeed or error\n # '''\n # # id\n # markStr = self.getTokenOrError('Define Parameter', IDENTIFIER) \n # # delimit\n # self.skipTokenOrError('Define Parameter', COLON)\n # # type\n # t = mkParameterDefinition(self.position(), markStr)\n # t.returnKind = self.getTokenOrError('Define Parameter', IDENTIFIER)\n # self.ast.append(t)\n # return True\n\n # def defineParameters(self, lst):\n # '''\n # '(' ~ zeroOrMore(defineParameter) ~')'\n # Enforced bracketing.\n # Suceed or error\n # '''\n # self.skipTokenOrError('Define Parameters', LBRACKET)\n # self.zeroOrMoreDelimited(lst, self.defineParameter, RBRACKET) \n # return True\n #x\n def dataDefine(self, lst):\n '''\n 'fnc' ~ (Identifier | OperatorIdentifier) ~ DefineParameters ~ Option(Kind) ~ ExplicitSeq\n Definitions attached to code blocks\n Used for both named and operater functions.\n '''\n #! this textOf is direct, but could be done by token lookup\n commit = (\n self.isToken(IDENTIFIER) and \n self.it.textOf() == 'val' or\n self.it.textOf() == 'var'\n )\n if(commit):\n self._next()\n pos = self.position()\n \n # mark \n if(self.tok != IDENTIFIER and self.tok != OPERATER):\n self.tokenError(\"In rule '{}' expected '{}' or '{}' but found '{}'\".format(\n 'Define Data',\n tokenToString[IDENTIFIER],\n tokenToString[OPERATER],\n tokenToString[self.tok]\n ))\n markStr = self.textOf()\n self._next()\n \n # make node\n t = mkDataDefine(pos, markStr)\n self.ast.append(t)\n \n # Kind\n self.optionalKindAnnotation(t)\n \n # body (namelessData)\n #! Perhaps could take an expression\n #? close to namelessFuncCall but inly alowing one expression\n self.skipTokenOrError('Define Data', LCURLY)\n self.dataNameless(t.body)\n self.skipTokenOrError('Define Data', RCURLY)\n return commit\n \n # def functionDefine(self, lst):\n # '''\n # 'fnc' ~ (Identifier | OperatorIdentifier) ~ DefineParameters ~ Option(Kind) ~ ExplicitSeq\n # Definitions attached to code blocks\n # Used for both named and operater functions.\n # '''\n # #! this textOf is direct, but could be done by token lookup\n # commit = (self.isToken(IDENTIFIER) and self.it.textOf() == 'fnc')\n # if(commit):\n # self._next()\n # pos = self.position()\n \n # # mark\n # # currently. can't be dried out\n # if(self.tok != IDENTIFIER and self.tok != OPERATER):\n # self.tokenError(\"In rule '{}' expected '{}' or '{}' but found '{}'\".format(\n # 'Define Function',\n # tokenToString[IDENTIFIER],\n # tokenToString[OPERATER],\n # tokenToString[self.tok]\n # ))\n # markStr = self.textOf()\n # self._next()\n\n # # make node\n # t = mkContextDefine(pos, markStr)\n # self.ast.append(t)\n \n # # params\n # #! generic params\n # self.defineParameters(t.params)\n\n # # Kind\n # self.optionalKindAnnotation(t) \n \n # # body (exp seq)\n # self.skipTokenOrError('Define Function', LCURLY)\n # self.seqContents()\n # self.skipTokenOrError('Define Function', RCURLY)\n # return commit\n \n\n \n #??? test expression embedding\n def parametersForFunctionCall(self, lst):\n '''\n '(' ~ zeroOrMore(expression) ~')' | Empty\n Multiple parameters, optional kind.\n Succeed or error\n '''\n bracketted = self.optionallySkipToken(LBRACKET)\n if (bracketted):\n #if(self.chainedItem):\n #self.ast.append(self.chainedItem)\n #self.chainedItem = None\n self.zeroOrMoreDelimited(lst, self.expressionCall, RBRACKET) \n return True\n \n #def parametersForChainedOperaterFunctionCall(self, lst):\n #'''\n #expression |\n #'(' ~ zeroOrMore(expression) ~ ')'\n #One parameter, optional bracketing.\n #Succeed or error\n #'''\n #bracketted = self.optionallySkipToken(LBRACKET)\n #if (not bracketted):\n ## assume binOp\n #self.oneOrError(lst, \n #self.expressionCall, \n #'parametersForOperaterCall', \n #'expressionCall'\n #)\n #else:\n ## allow any parameters\n #self.zeroOrMoreDelimited(lst, self.expressionCall, RBRACKET)\n #return True\n \n \n #def chainedOperaterBinOpCall(self, lst):\n #'''\n #OperatorIdentifier ~ ExpressionCall\n #'''\n ## get mark \n #print ('binop operator:' + self.textOf())\n #t = mkContextCall(self.position(), self.textOf())\n #self.ast.append(t)\n #self._next()\n ## generic params?\n #self.oneOrError(lst, \n #self.expressionCall, \n #'expressionCall', \n #'chainedOperaterBinOpCall'\n #)\n ##self.optionalKindAnnotation(t) \n #return True\n \n \n #def optionalChainedExpressionCall(self, lst):\n #'''\n #zeroOrMore(period ~ namedFunctionCall) | (operaterFunctionCall))\n #Used after function and operator calls.\n #'''\n #while(True):\n #if (self.tok == PERIOD): \n #self._next()\n #self.functionCall(lst)\n #continue\n #if (self.tok == OPERATER and isInfix(self.it.textOf())):\n #self.chainedOperaterBinOpCall(lst)\n #continue\n #break\n #x\n def functionCall(self, lst, isDotChained):\n '''\n (Identifier | OPERATER) ~ Arguments ~ Option(Kind)\n \n '''\n #! this textOf is direct, but could be done by token lookup\n commit = (self.isToken(IDENTIFIER) or self.isToken(OPERATER))\n if(commit): \n # node \n t = mkContextCall(self.position(), self.textOf())\n \n # if chained (in either way), grab last expression and use \n # as first parameter to this expression\n if (isDotChained or isInfix(self.textOf())):\n t.params.append(lst.pop()) \n \n self.ast.append(t)\n self._next()\n \n # params\n if (not isInfix(t.parsedData)):\n self.parametersForFunctionCall(t.params)\n else:\n # Allow for the special case of infix (binop) operator \n # params, with no brackets and one parameter.\n #print(' infixing : ' + str(t.parsedData))\n self.oneOrError(t.params, \n self.expressionCall, \n 'functionCall infix operator params',\n 'expressionCall'\n )\n #print(' infixed : ' + str(t.params))\n\n # Kind\n self.optionalKindAnnotation(t)\n return commit \n\n\n #! maybe should be in function itself?\n # def operaterMonoFunctionCall(self, lst):\n # '''\n # MonoOperaterIdentifier ~ MonoOpCallParameter ~ Option(Kind)\n # Slightly, but strongly, different to namedFunctionCall.\n # '''\n # commit = (self.isToken(MONO_OPERATER))\n # if(commit): \n # # get mark \n # #print ('MONO operator:' + self.textOf())\n # t = mkMonoOpExpressionCall(self.position(), self.textOf())\n # self.ast.append(t)\n # self._next()\n # #! not expression, as another mono is not available, but otherwise ok\n # self.oneOrError(lst, \n # self.expressionCall, \n # 'parameterForMonoOperaterCall', \n # 'expressionCall'\n # )\n # #self.optionalKindAnnotation(t) \n # return commit\n \n def comment(self):\n commit = self.isToken(COMMENT)\n if (commit):\n t = mkSingleLineComment(self.position(), self.textOf().lstrip())\n self.ast.append(t)\n self._next()\n return commit\n\n def multilineComment(self):\n commit = self.isToken(MULTILINE_COMMENT)\n if (commit):\n t = mkMultiLineComment(self.position(), self.textOf().lstrip())\n self.ast.append(t)\n self._next()\n return commit\n\n\n #x\n def expressionCall(self, lst):\n '''\n dataNameless | namedFunctionCall | operaterFunctionCall\n Calls where they can be used nested (not as the target\n of allocation etc.?)\n '''\n #print('expression')\n #! need a way to spot dot-chaining misapplied \n isDotChained = self.optionallySkipToken(PERIOD)\n\n commit = (\n self.dataNameless(lst) \n or self.functionCall(lst, isDotChained)\n or self.operaterMonoFunctionCall(lst)\n or self.seqNameless()\n )\n \n # chaining\n #! Not DRY (because not convinced of final form yet).\n # for the interpreter, we catch the last return\n # For the compiler, we do inner first.\n #if (commit):\n \n ##t = lst[-1]\n ##if (self.optionallySkipToken(PERIOD)):\n ## t.isChained = True\n ## The iterator is resting on the next op. What if it's an \n ## infix? Prefer this sneaky look-forward to a \n ## high-engineered look-back\n ##elif ((self.isToken(IDENTIFIER) or self.isToken(OPERATER)) and isInfix(self.it.textOf())):\n ## t.isChained = True\n ##elif (isinstance(t, NameMixin) and isInfix(t.parsedData) and len(lst) > 1):\n ##lst[-2].isChained = True\n ## if this was chained, add in tree for parameter\n #if(self.chainedItem):\n #print(' chaining tree: ' + str(self.chainedItem))\n #print(' ...to: ' + str(lst[-1]))\n #lst[-1].params.insert(0, self.chainedItem)\n #self.chainedItem = None\n \n #isBinopId = ((self.isToken(IDENTIFIER) or self.isToken(OPERATER)) and isInfix(self.it.textOf()))\n #if (\n #isBinopId\n #or self.optionallySkipToken(PERIOD)\n #):\n #print(' popping: ' + str(lst[-1]))\n #self.chainedItem = lst.pop()\n \n return commit\n######### NEW\n\n\n## Seq\n#! Option\n def seqNameless(self):\n '''\n '{'~ oneOrMore(ExpressionCall) ~'}'\n '''\n commit = (self.isToken(LCURLY))\n if(commit): \n self._next()\n\n startLen = len(self.ast)\n \n # body\n #self.oneOrMoreDelimited(t.body, self.expressionCall, RCURLY)\n self.seqContents()\n self.skipTokenOrError('CodeSeqNameless', RCURLY)\n\n paramCount = len(self.ast) - startLen \n \n # node \n t = mkCodeSeqNameless(self.position(), paramCount)\n self.ast.append(t)\n \n return commit\n\n\n #? No Kind option\n def seqNamedDefine(self):\n '''\n 'nb' ~ (Identifier | OperatorIdentifier) ~ Option(Kind) ~ ExplicitSeq\n Definitions attached to code blocks\n Used for both named and operater functions.\n '''\n #! this textOf is direct, but could be done by token lookup\n commit = (self.isToken(IDENTIFIER) and self.it.textOf() == 'nb')\n if(commit):\n self._next()\n pos = self.position()\n \n # mark\n if(self.tok != IDENTIFIER and self.tok != OPERATER):\n self.tokenError(\"In rule '{}' expected '{}' or '{}' but found '{}'\".format(\n 'CodeSeq Named',\n tokenToString[IDENTIFIER],\n tokenToString[OPERATER],\n tokenToString[self.tok]\n ))\n markStr = self.textOf()\n self._next()\n\n # make node\n # node \n t = mkCodeSeqNamedDefine(self.position(), markStr)\n self.ast.append(t)\n\n # body\n self.skipTokenOrError('CodeSeq Named', LCURLY) \n self.seqContents()\n self.skipTokenOrError('CodeSeq Named', RCURLY) \n return commit\n \n## Namespace\n\n #! don't call it this, its a nameSet, or something\n #! Code lot like a function call but different (DRY). No return\n #! cause it's assumed to be anamespace or Unit.... if anything.\n def slotDefine(self, lst):\n '''\n 'ns' ~ Identifier ~ ExplicitSeq\n Definition of a namespace. Conceptually, a labeled set of \n expressions.\n '''\n #! this textOf is direct, but could be done by token lookup\n commit = (self.isToken(IDENTIFIER) and self.it.textOf() == 'ns')\n if(commit): \n self._next()\n pos = self.position()\n \n # mark\n if(self.tok != IDENTIFIER):\n self.expectedTokenError(\n 'CodeSlot Define',\n IDENTIFIER\n )\n markStr = self.textOf()\n self._next()\n\n # make node\n # node \n t = mkCodeSlotNamedDefine(self.position(), markStr)\n \n # params\n #self.parametersOption(t.params)\n \n # body\n self.skipTokenOrError('CodeSlot Define', LCURLY) \n self.seqContents(t.body)\n self.skipTokenOrError('Named Block', RCURLY) \n\n self.ast.append(t)\n return commit\n\n #def gteOperatorPrecidence(op1, op2):\n #op1\n #return\n\n def operatorCall(self, opStack):\n commit = self.isToken(OPERATER)\n if (commit):\n t = mkOperatorCall(self.position(), self.textOf())\n # #! for now, assume equal precidence\n while (\n (len(opStack) > 0) and\n # #t.precidence <= opStack.top.precidence\n # this assumes equality and left assoc\n opStack[-1] != LBRACKET\n ):\n self.ast.append(opStack.pop())\n opStack.append(t)\n self._next()\n return commit\n\n def parametersCallOption(self):\n '''\n option('(' ~ oneOrMore(parameter) ~')') \n Enforced bracketing.\n '''\n commit = self.isToken(LBRACKET)\n #print(str(commit))\n count = 0\n if (commit):\n # One or more params\n #! self.multiActionCall(), but not yet\n \n self._next()\n count = self.oneOrMoreDelimited(\n #self.parameterDefine,\n self.multiActionCallFix,\n RBRACKET\n ) \n return count\n \n\n def actionCall(self):\n '''\n (Identifier ~ oneOrMore(parameters) | ((Identifier | Operator) ~ parameter)\n Definitions attached to code blocks\n Used for both named and operater functions.\n '''\n commit = self.isToken(IDENTIFIER)\n if (commit):\n # node\n t = mkContextCall(self.position(), self.textOf())\n self._next()\n \n # params\n paramCount = self.parametersCallOption()\n t.paramCount = paramCount \n self.ast.append(t)\n return commit\n \n def dataActionCall(self):\n commit = False\n if (self.actionCall()):\n commit = True\n elif(self.dataNameless()):\n commit = True\n return commit\n \n #! but whats the difference between a list of parameter calls and a \n # list of instructions? None, bar execution time.\n #? Think this can be simplified but do accept simplifications, like no curly brackets?\n #! Not accepting dual parameter sets\n def multiActionCallFix(self):\n # has no idea if calling within a nameSet, or container, but \n # does it matter?\n print(\"multiActionCallFix {} {}\".format(self.position().toDisplayString(), self.textOf()))\n opStack = []\n # Must have data or monop to start\n prevWasData = False\n # if found op, can progress\n doMore = True\n hasAssignment = False\n while (doMore):\n if (prevWasData):\n \n # Pre-empt test for doubled equality\n if(self.isToken(OPERATER) and \n self.textOf() == \"=\" and \n hasAssignment\n ):\n self.expectedRuleError(\n \"Chained Action Call\",\n \"not Equality...(again)..?\"\n ) \n else:\n hasAssignment = True\n \n # i.e if no operator, quit chaining\n doMore = self.operatorCall(opStack)\n\n \n if (doMore and self.isToken(LBRACKET)):\n opStack.append(LBRACKET)\n self._next()\n prevWasData = False\n else:\n if (self.isToken(MONO_OPERATER)):\n #! should be ultimate precidence\n #? so no probs with a push?\n t = mkMonoOperatorCall(self.position(), self.textOf())\n opStack.append(t)\n self._next()\n\n # i.e if not found data, throw error\n # With no test, this fails if no data or action there\n # Also fails if there was an operator but no following \n # action/data\n commit1 = self.dataActionCall()\n if (not commit1):\n # something to do with EOL\n self.expectedRuleError(\n \"Chained Action Call\",\n \"DataAction Call\"\n ) \n\n #! need to protect against unbalanced brackets\n # if we reach a rbracket without corresponding \n # lbracket, it is not a fail. It may be a parameter\n # delimiter.\n \n if (self.isToken(RBRACKET)):\n print(\"opstack {}\".format(opStack))\n while(\n (len(opStack) > 0) and \n opStack[-1] != LBRACKET\n ):\n self.ast.append(opStack.pop())\n\n # Check if the stack is empty. This means \n # no lbracket was matched here i.e. calling rules \n # handle the token, or it is mismatched. \n # Either way, doMove is false, and there is no\n # next() token\n if (len(opStack) == 0):\n doMore = False\n else:\n opStack.pop()\n self._next()\n\n prevWasData = True\n \n # if not already, empty opStack\n if (len(opStack) > 0):\n self.ast.append(opStack.pop())\n #print(\"multiActionCalFixl2 {}\".format(commit))\n #return commit\n\n def multiActionCall(self):\n # has no idea if calling within a nameSet, or container, but \n # does it matter?\n print(\"multiActionCall {} {}\".format(self.position().toDisplayString(), self.textOf()))\n commit = ( \n # these are the possibilities to open a call\n self.isToken(INT_NUM) or\n self.isToken(FLOAT_NUM) or\n self.isToken(STRING) or\n #self.isToken(MULTILINE_STRING) or\n self.isToken(IDENTIFIER) or\n self.isToken(MONO_OPERATER)\n )\n if (commit):\n # This works for the first case of \n # \"simple call, no chain\" because the first token is\n # tested before commit, so should pass.\n self.multiActionCallFix()\n\n print(\"multiActionCall2 {}\".format(commit))\n return commit\n \n \n def lineFeed(self):\n '''\n 'Nothing'\n '''\n commit = (self.isToken(LINEFEED))\n if(commit): \n self._next()\n return commit\n \n def seqContents(self):\n '''\n Used for body contents.\n Allows definitions.\n '''\n entryCount = len(self.ast)\n while(\n self.comment()\n or self.multilineComment()\n # multiactioncall\n #or self.dataNameless()\n or self.seqNameless()\n or self.seqNamedDefine()\n or self.actionDefine()\n #or self.slotDefine()\n or self.multiActionCall() \n #or self.actionCall()\n #or self.dataDefine()\n #or self.functionDefine()\n # calls must go after defines, which are more \n # specialised in the first token\n #or self.expressionCall()\n or self.lineFeed()\n ):\n pass\n #? what are we doing here at the end?\n #if (len(lst) > 1):\n # lst[-1].prev = lst[-2]\n eCount = len(self.ast) - entryCount\n mkCodeSeqNameless(self.position(), eCount)\n\n\n## Construction parts\n\n\n def parameterDefine(self):\n '''\n identifier ~ Option(':' ~ Kind)\n Succeed or error\n '''\n # id\n markStr = self.getTokenOrError('Parameter Define', IDENTIFIER) \n t = mkParameterDefinition(self.position(), markStr)\n # type\n #! tmp. use type() ???\n #! is this optional?\n self.skipTokenOrError('Parameter Define', COLON)\n #if (self.isToken(COLON)):\n # self._next()\n t.kind = self.getTokenOrError('Parameter Define', IDENTIFIER)\n self.ast.append(t)\n return True\n \n def parametersDefineOption(self):\n '''\n option('(' ~ oneOrMore(parameter) ~')') \n Enforced bracketing.\n '''\n commit = self.isToken(LBRACKET)\n #print(str(commit))\n count = 0\n if (commit):\n # One or more params\n self._next()\n #! tor now. Will be MultiActionCall\n count = self.oneOrMoreDelimited(\n self.parameterDefine,\n RBRACKET\n ) \n return count\n \n \n## Actions\n #! unify paramCount handling\n def actionDefine(self):\n '''\n ('am' | 'ac') ~ \n (\n (Identifier ~ Parameters) |\n (OperatorIdentifier ~ Parameter)\n )\n ~ Option(Kind) ~ '=' ~ Option(ExplicitSeq)\n Definitions attached to code blocks\n Used for both named and operater functions.\n '''\n #! this textOf is direct, but could be done by token lookup\n commit = (\n self.isToken(IDENTIFIER) and\n (self.it.textOf() == 'am' or self.it.textOf() == 'ac')\n )\n if(commit): \n self._next()\n pos = self.position()\n \n # mark\n # currently. can't be dried out\n if(\n self.tok != IDENTIFIER and \n self.tok != OPERATER and \n self.tok != MONO_OPERATER\n ):\n self.expectedTokenError(\n 'Define Action',\n IDENTIFIER\n )\n\n # self.tokenError(\"In rule '{}' expected '{}' or '{}' but found '{}'\".format(\n # 'Define Action',\n # tokenToString[IDENTIFIER],\n # tokenToString[OPERATER],\n # tokenToString[self.tok]\n # ))\n markStr = self.textOf()\n \n if(self.tok == IDENTIFIER):\n # make node\n t = mkCodeSeqContextDefine(pos, markStr)\n \n # params\n self._next()\n # Need to use seperate code to the shunt algorithm to\n # handle actioncalls. \n # label1 + label2\n # is ok,\n # label1 9 + label2\n # will read as a missing operator (not a parameter)\n # label1(8 + 4) + label2\n # will read as missing an operator and as a bracketed\n # sub-action \n paramCount = self.parametersDefineOption()\n t.paramCount = paramCount\n # because body to come\n t.paramCount += 1 \n #print(\"count {}\".format(t.paramCount))\n\n elif(self.tok == OPERATER):\n # make node\n t = mkOperatorContextDefine(pos, markStr)\n \n # params, preset.count\n # one only (other is 'self')\n self._next()\n self.parameterDefine()\n self.parameterDefine()\n \n elif(self.tok == MONO_OPERATER):\n # make node\n t = mkMonoOperatorContextDefine(pos, markStr)\n \n # params, Preset count, one only.\n self._next()\n self.parameterDefine()\n\n # Kind (return)\n #self.optionalKindAnnotation(t)\n \n # Allocate\n #! skipOp\n if (not (self.isToken(OPERATER) and self.it.textOf() == '=')):\n self.expectedTokenError('Action Define', EQUALS)\n self._next()\n\n # body (exp seq)\n self.oneOrError(\n self.seqNameless, \n 'Action Define', \n 'CodeSeq Nameless'\n )\n self.ast.append(t)\n return commit\n \n #? converrted, but not of use?\n # def actionCall(self):\n # '''\n # (Identifier ~ oneOrMore(parameters) | ((Identifier | Operator) ~ parameter)\n # Definitions attached to code blocks\n # Used for both named and operater functions.\n # '''\n # #! this textOf is direct, but could be done by token lookup\n # commit = (\n # self.isToken(IDENTIFIER) or \n # self.isToken(OPERATER) or \n # self.isToken(MONO_OPERATER)\n # )\n # if (commit):\n # # node \n # t = mkContextCall(self.position(), self.textOf())\n \n # #! these need to be expressions, but not now...\n # if(self.tok == IDENTIFIER):\n # # params\n # self._next()\n # self.parametersOption(t.params)\n\n # elif(self.tok == OPERATER):\n # # params, preset.count\n # # one only (other is 'self')\n # self._next()\n # self.parameter(t.params)\n \n # elif(self.tok == MONO_OPERATER):\n # # params, Preset count, one only.\n # self._next()\n # self.parameter(t.params)\n \n # t.paramCount = paramCount \n # self.ast.append(t)\n # return commit\n \n## Root rule\n def root(self):\n try:\n # charge\n self._next()\n self.seqContents()\n # if we don't get StopIteration...\n self.error('Parsing did not complete: lastToken: {},'.format(\n tokenToString[self.tok], \n ))\n except StopIteration:\n # All ok\n print('parsed')\n pass\n","sub_path":"Syntaxer_rpn.py","file_name":"Syntaxer_rpn.py","file_ext":"py","file_size_in_byte":35373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"390828819","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nRemoteConsole -> DebugWindow\n\nAuthor: Remi GASCOU\nLast edited: July 2018\n\"\"\"\n\nimport sys\nfrom lib.core import AppInfos\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nclass DebugWindow(QWidget):\n def __init__(self, parent=None):\n #print(\"[LOG] Parent of DebugWindow\", parent)\n super(DebugWindow, self).__init__()\n self.setGeometry(300, 300, 300, 100)\n self.setWindowTitle('About')\n self._initUI()\n self.show()\n\n def _initUI(self):\n self.layout = QFormLayout()\n self.pushbutton_addtab = QPushButton(\"Add Tab\")\n self.pushbutton_addtab.clicked.connect(self.none)\n self.layout.addRow(\"Add Tab\", self.pushbutton_addtab)\n self.pushbutton_deltab = QPushButton(\"Del Tab\")\n self.pushbutton_deltab.clicked.connect(self.none)\n self.layout.addRow(\"Del Tab\", self.pushbutton_deltab)\n self.setLayout(self.layout)\n self.setGeometry(300, 300, 300, 100)\n self.setWindowTitle('Connect')\n\n @pyqtSlot()\n def none(self):\n pass\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = DebugWindow()\n sys.exit(app.exec_())\n","sub_path":"Python/revshell/lib/ui/windows/DebugWindow.py","file_name":"DebugWindow.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"594092851","text":"# Copyright 2012 Johannes Staffans\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport xml.etree.ElementTree as xml\nimport os\n\nclass Durations(object):\n\n def __init__(self):\n self._result_files = []\n self._durations = {} \n\n def add_result_file(self, file):\n self._result_files.append(file)\n\n def get_result_files(self):\n return self._result_files\n\n def parse_results(self):\n for result_file in self._result_files:\n suitestack = []\n for (event, node) in xml.iterparse(result_file, ['start', 'end']):\n if event == 'end' and node.tag == 'entry':\n suitestack.pop()\n\n if event == 'start':\n if node.tag == 'entry':\n suitestack.append('%suite%')\n if node.tag == 'string' and suitestack[-1] == '%suite%':\n suitestack.pop()\n suitestack.append(node.text)\n if node.tag == 'duration' and node.text != None:\n self.add_duration(suitestack, node.text)\n\n def add_duration(self, suitestack, duration):\n key = os.sep.join(suitestack[0:2])\n if key in self._durations:\n self._durations[key] += int(duration)\n else:\n self._durations[key] = int(duration)\n\n def get_durations(self):\n return self._durations \n\n","sub_path":"durations.py","file_name":"durations.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"415688571","text":"import json\nimport hashlib\nimport math\nfrom quart import jsonify, url_for, request\nfrom lnurl import LnurlPayResponse, LnurlPayActionResponse, LnurlErrorResponse # type: ignore\nfrom lnurl.types import LnurlPayMetadata\nfrom lnbits.core.services import create_invoice\n\nfrom . import copilot_ext\nfrom .crud import get_copilot\n\n\n@copilot_ext.route(\"/lnurl/\", methods=[\"GET\"])\nasync def lnurl_response(cp_id):\n cp = await get_copilot(cp_id)\n if not cp:\n return jsonify({\"status\": \"ERROR\", \"reason\": \"Copilot not found.\"})\n\n resp = LnurlPayResponse(\n callback=url_for(\"copilot.lnurl_callback\", cp_id=cp_id, _external=True),\n min_sendable=10000,\n max_sendable=50000000,\n metadata=LnurlPayMetadata(json.dumps([[\"text/plain\", str(cp.lnurl_title)]])),\n )\n\n params = resp.dict()\n if cp.show_message:\n params[\"commentAllowed\"] = 300\n\n return jsonify(params)\n\n\n@copilot_ext.route(\"/lnurl/cb/\", methods=[\"GET\"])\nasync def lnurl_callback(cp_id):\n cp = await get_copilot(cp_id)\n if not cp:\n return jsonify({\"status\": \"ERROR\", \"reason\": \"Copilot not found.\"})\n\n amount_received = int(request.args.get(\"amount\"))\n\n if amount_received < 10000:\n return (\n jsonify(\n LnurlErrorResponse(\n reason=f\"Amount {round(amount_received / 1000)} is smaller than minimum 10 sats.\"\n ).dict()\n ),\n )\n elif amount_received / 1000 > 10000000:\n return (\n jsonify(\n LnurlErrorResponse(\n reason=f\"Amount {round(amount_received / 1000)} is greater than maximum 50000.\"\n ).dict()\n ),\n )\n comment = \"\"\n if request.args.get(\"comment\"):\n comment = request.args.get(\"comment\")\n if len(comment or \"\") > 300:\n return jsonify(\n LnurlErrorResponse(\n reason=f\"Got a comment with {len(comment)} characters, but can only accept 300\"\n ).dict()\n )\n if len(comment) < 1:\n comment = \"none\"\n\n payment_hash, payment_request = await create_invoice(\n wallet_id=cp.wallet,\n amount=int(amount_received / 1000),\n memo=cp.lnurl_title,\n description_hash=hashlib.sha256(\n (\n LnurlPayMetadata(json.dumps([[\"text/plain\", str(cp.lnurl_title)]]))\n ).encode(\"utf-8\")\n ).digest(),\n extra={\"tag\": \"copilot\", \"copilot\": cp.id, \"comment\": comment},\n )\n resp = LnurlPayActionResponse(\n pr=payment_request,\n success_action=None,\n disposable=False,\n routes=[],\n )\n return jsonify(resp.dict())\n","sub_path":"lnbits/extensions/copilot/lnurl.py","file_name":"lnurl.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"273478385","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\n\n\nclass MyTestCase(TestCase):\n\n def test_one(self):\n for i in xrange(5):\n User.objects.create(username=str(i))\n assert User.objects.count() == 5\n\n def test_two(self):\n for i in xrange(4):\n User.objects.create(username=str(i))\n assert User.objects.count() == 4\n","sub_path":"tests/test_django_base.py","file_name":"test_django_base.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"118535328","text":"import torch.nn as nn\r\nimport torch.nn.functional as f\r\n\r\n\r\n# reference: https://github.com/allenai/allennlp/blob/master/allennlp/modules/highway.py\r\nclass Highway(nn.Module):\r\n \"\"\"\r\n A `Highway layer `_ does a gated combination of a linear\r\n transformation and a non-linear transformation of its input. :math:`y = g * x + (1 - g) *\r\n f(A(x))`, where :math:`A` is a linear transformation, :math:`f` is an element-wise\r\n non-linearity, and :math:`g` is an element-wise gate, computed as :math:`sigmoid(B(x))`.\r\n This module will apply a fixed number of highway layers to its input, returning the final\r\n result.\r\n Parameters\r\n ----------\r\n input_dim : ``int``\r\n The dimensionality of :math:`x`. We assume the input has shape ``(batch_size,\r\n input_dim)``.\r\n num_layers : ``int``, optional (default=``1``)\r\n The number of highway layers to apply to the input.\r\n activation : ``Callable[[torch.Tensor], torch.Tensor]``, optional (default=``f.relu``)\r\n The non-linearity to use in the highway layers.\r\n \"\"\"\r\n\r\n def __init__(self, input_dim, num_layers=1, activation=f.relu):\r\n super(Highway, self).__init__()\r\n self._input_dim = input_dim\r\n self._layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2)\r\n for _ in range(num_layers)])\r\n self._activation = activation\r\n for layer in self._layers:\r\n # We should bias the highway layer to just carry its input forward. We do that by\r\n # setting the bias on `B(x)` to be positive, because that means `g` will be biased to\r\n # be high, to we will carry the input forward. The bias on `B(x)` is the second half\r\n # of the bias vector in each Linear layer.\r\n layer.bias[input_dim:].data.fill_(1)\r\n\r\n def forward(self, inputs):\r\n current_input = inputs\r\n for layer in self._layers:\r\n projected_input = layer(current_input)\r\n linear_part = current_input\r\n nonlinear_part, gate = projected_input.chunk(2, dim=-1)\r\n nonlinear_part = self._activation(nonlinear_part)\r\n gate = f.sigmoid(gate)\r\n current_input = gate * linear_part + (1 - gate) * nonlinear_part\r\n return current_input\r\n","sub_path":"nqa/modules/highway.py","file_name":"highway.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"178188288","text":"# -*- coding: UTF-8 -*-\n\nimport os,time,sys\nimport subprocess\nimport multiprocessing\nfrom datetime import datetime\n\nglobal g\ng = 1 #5分钟检测一次结果\n\nSN='100000f30108a99ccc43'\nlogname = 'wujiance.log'\nlocal = 'area:2' #图像所在区间\n\n\ndef prints(str):\n\n t = datetime.now().strftime('%Y%m%d %H:%M:%S')\n print(t + \" \" + str)\n\n\ndef logs():\n\n os.popen(\"adb -s \" + SN + \" wait-for-device\")\n\n os.popen(\"adb -s \" + SN + \" shell logcat -c\")#清除log缓存\n \n time.sleep(0.5)\n \n t1 = datetime.now().strftime('%Y%m%d %H:%M:%S')\n prints(\"start catch log\")\n\n #subprocess.Popen(\"adb -s \" + SN + \" logcat -v time -s roobosmart | findstr /c:\\\"people out\\\" /c:\\\"head\\\" > \" + logname)\n\n os.popen(\"adb -s \" + SN + \" shell \\\"logcat -v time -s roobosmart | grep -e 'people out' -e 'head'\\\" > \" + logname + \" &\")\n\n #kill()\n\ndef kill():\n \n time.sleep(g * 60)\n\n pids = os.popen(\"adb -s \" + SN + \" shell ps | findstr logcat\").read()\n\n if \"logcat\" in pids:\n\n u = pids.split()\n \n for i in range(0,len(u)):\n \n if u[i] == \"logcat\":\n\n pid = u[i-7]\n\n os.popen(\"adb -s \" + SN + \" shell kill \" + pid)\n \n prints(\"stop catch log\")\n \n\ndef tofile(name,str):\n \n f1 = open(name,\"a+\")\n f1.write(str)\n f1.flush()\n f1.close()\n \n global f0\n f0 = open(name).readlines()\n return len(f0) \n\ndef wrifile(file2):\n\n a = ['area:1','area:2','area:3','area:4','area:5','area:12','area:23','area:34','area:45','area:13','area:24','area:35','area:14','area:25','area:15']\n\n if local in a:\n\n a.remove(local)\n \n f2 = open(logname)\n f3 = open(file2,\"a+\")\n\n for line in f2.readlines():\n\n if file2 == 'area.txt':\n \n if 'head[1-0]' in line:\n \n for i in range(0,13):\n \n if a[i] in line: \n \n f3.write(line)\n f3.flush()\n \n if file2 == 'nopeople.txt':\n \n if 'people out' in line:\n \n f3.write(line)\n f3.flush()\n\n if file2 == 'oktime.txt':\n \n if 'head[1-0]' in line and local in line:\n \n f3.write(line)\n f3.flush() \n \n f3.close()\n f2.close()\n\n global f0\n f0 = open(file2).readlines()\n return len(f0)\n \n \n\ndef head():\n #检测到超过一个头肩\n\n head = os.popen(\"findstr /c:-1] \" + logname).read()\n\n tofile(\"head.txt\",head)\n \n prints(\"head check is wrong: %d times.\"%len(f0))\n \n\ndef area():\n #只检测到一个头肩,但是区间不对\n\n wrifile('area.txt')\n prints(\"area is wrong: %d times.\"%len(f0))\n\n\ndef nopeople():\n #没有检测到头肩\n\n wrifile('nopeople.txt')\n prints(\"can't find people: %d times.\"%len(f0))\n\n\ndef oktime():\n #只检测到一个头肩,并且区间正确\n\n wrifile('oktime.txt')\n prints(\"pass time is: %d times.\"%len(f0))\n\n\nif __name__ == '__main__':\n\n for i in range(1,10001):\n \n ti = g * i\n m = ti % 60\n h = ti / 60\n\n print(\"\\n++++++++++++++++ %d: %dh %dmin +++++++++++++++\"%(i,h,m))\n \n\n m = multiprocessing.Process(target=kill)\n m.start()\n\n logs()\n\n head()\n\n area()\n\n nopeople()\n\n oktime()\n \n","sub_path":"mypython/others/eng/ioe/old/wujian.py","file_name":"wujian.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"389228422","text":"from ftw.builder import Builder\nfrom ftw.builder import create\nfrom opengever.testing import FunctionalTestCase\nfrom zope.event import notify\nfrom zope.lifecycleevent import ObjectModifiedEvent\n\n\nclass TestTreeviewCacheInvalidation(FunctionalTestCase):\n\n def test_modifying_repo_folder_invalidates_treeview_cache(self):\n \"\"\"Changing a repo folder should invalidate the cache for the\n contents of the TreeView portlet.\n See opengever/repository/handlers.py\n \"\"\"\n repository = create(Builder('repository_root'))\n repo_folder = create(Builder('repository')\n .within(repository)\n .having(effective_title=\"Foo\"))\n\n tree_view = repository.restrictedTraverse('tree')\n\n # Test initial title of our repo folder\n html = tree_view.render()\n self.assertIn('1. Foo', html)\n\n # Now modify the repo folder and fire ObjectModified.\n # Changes should reflect in the output of the TreeView portlet\n repo_folder.effective_title = 'Bar'\n notify(ObjectModifiedEvent(repo_folder))\n html = tree_view.render()\n self.assertIn('1. Bar', html)\n","sub_path":"opengever/repository/tests/test_treeview_caching.py","file_name":"test_treeview_caching.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"115436242","text":"#!/usr/bin/python3\n# Changed to Python3 when Pi crashed 2020/1/20\n\nimport RPi.GPIO as GPIO\nimport time\nimport sys, os\nimport lirc\n\nsockid = lirc.init(\"jukebox_ctrl\", blocking=False)\n#sockid = lirc.init(\"jukebox_ctrl\")\n### To-Do\n# NEED a more efficient way to check for playing other than ps all the time. \n# On old RPi, this keeps proc usage up, it seems. If stopped, definitely don't \n# need to check as often.\n\n# Combine pushbutton and LIRC modes in one script w/commmand line switch\n\n# Done: Need to test.) Shutdown more gracefully : http://raspi.tv/2013/rpi-gpio-basics-3-how-to-exit-gpio-programs-cleanly-avoid-warnings-and-protect-your-pi\n# Done: Add LIRC? Not sure how to interrupt those 'waits' with button presses.\n# Done: Handled in myplayer.pl - Make killcodes client-specific\n\n# The hardware side is IR receiver and one LED:\n# off = STOP, on = PLAY, fast-blink = command received, slow-blink = waiting for current song to end to stop and await further instructions.\n\nplay_command = \"~/bin/myplayer.pl -cj &\"\n#stop_command = \"~/bin/myplayer.pl -a\"\nstop_command = \"~/bin/killmyplayer.pl\"\n\n# NOTE: Look to change these pins. \n# IR pin is set in: /etc/modules (currently at 2)\n# Pins 2 & 3 are in use by the i2c bus \n# This is why the status LED is on until the script is started.\n# Have to rewire, but look to maybe 4 and 17 and can use the ground in between (physical pins 7, 9, 11 ). \n# 5v will not be bundled.\n\nLEDPin = 3 # 3 for IR rig, 15 for pushbutton rig\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(LEDPin, GPIO.OUT)\n\ndef ledon():\n GPIO.output(LEDPin, GPIO.HIGH)\ndef ledoff():\n GPIO.output(LEDPin, GPIO.LOW)\n\ndef blink(rate,repeats):\n for x in range(0,repeats):\n ledon()\n time.sleep(rate)\n ledoff()\n time.sleep(rate)\n\n# Some useful constants:\nlong= 5 \nshort = 3\nflash = 1\nfast = .1\nslow = .5\n\nstop_flag = 0\nplaying = 0\nloop_count = 0\n# Here we go!\nprint(\"Waiting to do something . . . \")\ntry:\n while True:\t\t\t# Main loop\n play_button = 0\t\t# Need to reset every time through\n stop_button = 0\n skip_button = 0\n ir = lirc.nextcode()\t# Get latest IR code from lircd\n# print(\"Loop: \", loop_count, \" ir: \", ir)\n if (len(ir)==1):\t# If results aren't empty\n ir = str(ir[0])\t# De-listify\n else:\n ir = ''\t\t# Else \n if (ir == 'play'):\n play_button = 1\n elif (ir == 'stop'):\n stop_button = 1\n elif (ir == 'skip'):\n skip_button = 1\n # Find out if we (or someone else?) is already playing.\n # Reverse logic here, ps returns 0 if process found, 256 if not.\n if (loop_count == 0):\n ps=os.system(\"ps aux |grep 'myplayer.pl -cj' |grep -v grep >/dev/null\")\n elif (playing == 1):\n ps = 0;\n else:\n ps = 256\n if (ps==0): \t\t\t# Yes, playing\n playing = 1\n if (stop_flag == 1):\t# But got we got asked to stop\n blink(slow,flash)\t# So we slow blink until song ends.\n elif (os.system(\"ps aux| grep 'mpg123' |grep -v grep > /dev/null\")):\n # Still loading player, show that with fast, broken blinks.\n blink(fast,short)\n else: \n ledon()\t\t\t# Steady LED if playing unabated.\n if (stop_button):\t\t# Stop command received\n blink(fast,long)\t# Show we got a command\n# print(\"Stop button pressed.\")\n if (stop_flag == 1):\t# We already know . . . Not really needed, just barfs 'status.'\n print(\"Already put the brakes on, waiting for this song to end, hold on to your horses!\")\n else:\n print(\"Stopping after this song finishes.\")\n# ledoff()\n os.system(stop_command) # System call to actually plant kill seed. Need a way to make this client-specific.\n stop_flag = 1\n blink(slow,flash)\n elif (skip_button): \t# Read 'skip_button' and skip current song.\n# print \"Skip received.\"\n print(\"Killing current song!\")\n blink(fast,long)\n os.system('killall mpg123')\n else: \t\t\t\t# NOT playing\n ledoff()\t\t\t# No LED while full-stopped\n if (stop_flag==1):\n print(\"Fully stopped.\")\n playing = 0\n stop_flag = 0\t\t# Clear the brakes after full-stop\n if (play_button):\t\t# But now getting pressed into action again!\n# print(\"Play button pressed.\")\n print(\"Starting jukebox . . . \")\n os.system(play_command)\t# System call.\n blink(fast,flash)\t# Acknowledge button press\n playing = 1\n\n time.sleep(.5) \t\t\t# Adjust this\n if (loop_count > 39 or stop_flag == 1):\n # If we're expecting play to fully stop, we'll poll every loop.\n loop_count = 0\n else:\n loop_count += 1\nexcept KeyboardInterrupt: \n # here you put any code you want to run before the program \n # exits when you press CTRL+C \n print(\"Exiting gracefully.\\n\") # Print something on exit.\n \nexcept: \n # this catches ALL other exceptions including errors. \n #MEM note: This does not catch 'killall' (defautl sig. 9?)\n # You won't get any error messages for debugging \n # so only use it once your code is working \n print(\"Other error or exception occurred!\")\n\nfinally: \n GPIO.cleanup() # this ensures a clean exit\n lirc.deinit()\n","sub_path":"irjuke.py","file_name":"irjuke.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"353248482","text":"\n\nfrom xai.brain.wordbase.nouns._inscription import _INSCRIPTION\n\n#calss header\nclass _INSCRIPTIONS(_INSCRIPTION, ):\n\tdef __init__(self,): \n\t\t_INSCRIPTION.__init__(self)\n\t\tself.name = \"INSCRIPTIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"inscription\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_inscriptions.py","file_name":"_inscriptions.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"258017701","text":"from distutils.dep_util import newer_group\n\n\n# yes, this is was almost entirely copy-pasted from\n# 'newer_pairwise()', this is just another convenience\n# function.\ndef newer_pairwise_group(sources_groups, targets):\n \"\"\"Walk both arguments in parallel, testing if each source group is newer\n than its corresponding target. Returns a pair of lists (sources_groups,\n targets) where sources is newer than target, according to the semantics\n of 'newer_group()'.\n \"\"\"\n if len(sources_groups) != len(targets):\n raise ValueError(\"'sources_group' and 'targets' must be the same length\")\n\n # build a pair of lists (sources_groups, targets) where source is newer\n n_sources = []\n n_targets = []\n for i in range(len(sources_groups)):\n if newer_group(sources_groups[i], targets[i]):\n n_sources.append(sources_groups[i])\n n_targets.append(targets[i])\n\n return n_sources, n_targets\n","sub_path":"contrib/python/setuptools/py3/setuptools/dep_util.py","file_name":"dep_util.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"394801150","text":"import unittest\n\nfrom ansiblelint.rules import RulesCollection\nfrom ansiblelint.rules.UseHandlerRatherThanWhenChangedRule import (\n UseHandlerRatherThanWhenChangedRule,\n)\nfrom ansiblelint.testing import RunFromText\n\nSUCCESS_TASKS = '''\n- name: print helpful error message\n debug:\n var: result\n when: result.failed\n\n- name: do something when hello is output\n debug:\n msg: why isn't this a handler\n when: result.stdout == \"hello\"\n\n- name: never actually debug\n debug:\n var: result\n when: False\n\n- name: Dont execute this step\n debug:\n msg: \"debug message\"\n when:\n - false\n\n- name: check when with a list\n debug:\n var: result\n when:\n - conditionA\n - conditionB\n'''\n\n\nFAIL_TASKS = '''\n- name: execute command\n command: echo hello\n register: result\n\n- name: this should be a handler\n debug:\n msg: why isn't this a handler\n when: result.changed\n\n- name: this should be a handler 2\n debug:\n msg: why isn't this a handler\n when: result|changed\n\n- name: this should be a handler 3\n debug:\n msg: why isn't this a handler\n when: result.changed == true\n\n- name: this should be a handler 4\n debug:\n msg: why isn't this a handler\n when: result['changed'] == true\n\n- name: this should be a handler 5\n debug:\n msg: why isn't this a handler\n when:\n - result['changed'] == true\n - another_condition\n'''\n\n\nclass TestUseHandlerRatherThanWhenChanged(unittest.TestCase):\n collection = RulesCollection()\n collection.register(UseHandlerRatherThanWhenChangedRule())\n\n def setUp(self):\n self.runner = RunFromText(self.collection)\n\n def test_success(self):\n results = self.runner.run_role_tasks_main(SUCCESS_TASKS)\n self.assertEqual(0, len(results))\n\n def test_fail(self):\n results = self.runner.run_role_tasks_main(FAIL_TASKS)\n self.assertEqual(5, len(results))\n","sub_path":"test/TestUseHandlerRatherThanWhenChanged.py","file_name":"TestUseHandlerRatherThanWhenChanged.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"206674116","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n \nfrom database_setup import Category, Base, Item, User\n \nengine = create_engine('sqlite:///catalog.db')\n# Bind the engine to the metadata of the Base class so that the\n# declaratives can be accessed through a DBSession instance\nBase.metadata.bind = engine\n \nDBSession = sessionmaker(bind=engine)\n# A DBSession() instance establishes all conversations with the database\n# and represents a \"staging zone\" for all the objects loaded into the\n# database session object. Any change made against the objects in the\n# session won't be persisted into the database until you call\n# session.commit(). If you're not happy about the changes, you can\n# revert all of them back to the last commit by calling\n# session.rollback()\nsession = DBSession()\n\nDaniel = User(name=\"Daniel Burkard\")\nsession.add(Daniel)\n\nJohn = User(name=\"John Doe\")\nsession.add(John)\n\nBasketball = Category(name=\"Basketball\")\nsession.add(Basketball)\n\nBaselball = Category(name=\"Baselball\")\nsession.add(Baselball)\n\nSnowboarding = Category(name=\"Snowboarding\")\nsession.add(Snowboarding)\n\nJudo = Category(name=\"Judo\")\nsession.add(Judo)\n\nCapoeira = Category(name=\"Capoeira\")\nsession.add(Capoeira)\n\nIceHockey = Category(name=\"Ice Hockey\")\nsession.add(IceHockey)\n\nGoogles = Item(name=\"Googles\",category=Snowboarding,owner=John)\nsession.add(Googles)\n\nBat = Item(name=\"Bat\",category=Basketball,owner=John)\nsession.add(Bat)\n\nJudoGi = Item(name=\"Judogi\",\n\tcategory=Judo,\n\towner=Daniel,\n\timage=\"http://www.goods.pl/images/products/pl/Judogi_plecionka-biale_12oz_GTTA318_200.jpg\",\n\tdescription=\"Judogi is the formal Japanese name for the traditional uniform used for Judo practice and competition.\")\nsession.add(JudoGi)\n\nObi = Item(name=\"Obi\",category=Judo,owner=Daniel,image=\"http://3.bp.blogspot.com/-Rkxk5i9mMd4/Ul_rFJ1KwoI/AAAAAAAAAbU/6lw0hQSqZGg/s1600/Black%2BBelt.png\")\nsession.add(Obi)\n\nSnowboard = Item(name=\"Snowboard\",category=Snowboarding,owner=John)\nsession.add(Snowboard)\n\nBerimbau = Item(name=\"Berimbau\",\n\tcategory=Capoeira,\n\towner=Daniel,\n\timage=\"http://202.67.224.137/pfimage/53/743253_berimbau_capoeira.jpg\",\n\tdescription=\"The berimbau is a single-string percussion instrument, a musical bow, from Brazil. The berimbau was eventually incorporated into the practice of the Afro-Brazilian martial art capoeira, the berimbau (the soul of capoeira) leads the capoeiristas movement in the roda the faster the berimbau is playing the faster the capoeirista moves in the game.\")\nsession.add(Berimbau)\n\nAtabaque = Item(name=\"Atabaque\",\n\tcategory=Capoeira,\n\towner=Daniel,\n\timage=\"http://4.bp.blogspot.com/-7-RtMPOQueQ/TphDxsTAgTI/AAAAAAAAALA/CCFjK5khMrE/s1600/atabaque.jpg\",\n\tdescription=\"The atabaque is a tall, wooden, Afro-Brazilian hand drum. The shell is made traditionally of Jacaranda wood from Brazil. The head is traditionally made from calfskin. A system of ropes are intertwined around the body, connecting a metal ring near the base to the head.\")\nsession.add(Atabaque)\n\nsession.commit()\n\n","sub_path":"addData.py","file_name":"addData.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"485146195","text":"from flask import Flask, flash, jsonify, redirect, render_template, request, session\nfrom flask_session.__init__ import Session\nfrom tempfile import mkdtemp\nimport requests\n\n\napp = Flask(__name__)\n# app.config.from_object('app_config')\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n# SCOPES = ['https://www.googleapis.com/auth/calendar']\n# API_SERVICE_NAME = 'calendar'\n# API_VERSION = 'v3'\n# CLIENT_SECRET_FILE = \"/home/mprojekt/mysite/client_secret.json\"\n# API_KEY_FILE_NAME = 'credentials.json' # Name of json file you downloaded earlier\n# CALENDAR_ID = 'primary'\n# CREDS_FILENAME = 'credentials.json'\nURL = \"https://live-stream365.com/api/get.php?key=84bd360faa5112bad32d69407a25e53d&lang=ru\"\nheaders = {\n\n 'content-type': 'application/json',\n\n}\n\ndef api():\n response = requests.request(\"GET\", URL, headers=headers)\n response = response.json()\n return response\n@app.route(\"/\")\ndef home():\n response = requests.request(\"GET\", URL, headers=headers)\n response = response.json()\n\n print(response)\n # for i in response[\"Value\"]:\n # print(i['Start'])\n return render_template('home.html', response=response)\n@app.route(\"/event/\")\ndef event(id):\n response = requests.request(\"GET\", URL, headers=headers)\n response = response.json()\n # print(response['Value'][])\n for events in response['Value']:\n if events['Url'] == f\"https://live-stream365.com/online/84bd360faa5112bad32d69407a25e53d/{id}\":\n return render_template(\"event.html\", event=events)\n return render_template(\"event.html\")\n@app.route('/search', methods=[\"GET\", \"POST\"])\ndef search():\n event_list = []\n print(event_list)\n if request.method == 'POST':\n event_list = []\n response = api()\n for events in response['Value']:\n if request.form.get('search').lower() in events['Opp1'].lower() or request.form.get('search').lower() in events['Opp2'].lower() or events['Opp2'].lower() in request.form.get('search').lower() or events['Opp1'].lower() in request.form.get('search').lower() or request.form.get('search').lower() in events['Sport'].lower():\n event_list.append(events)\n print(events)\n print(event_list)\n return render_template(\"search.html\", event=event_list)\n return render_template(\"search.html\")\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"sport.py","file_name":"sport.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"327362729","text":"import nibabel as nib\nimport numpy as np\nfrom scipy.io import loadmat\nfrom scipy.signal import savgol_filter\nfrom scipy import stats\nimport glob\nimport matplotlib.pyplot as plt\nimport popeye.utilities as utils\nfrom popeye.spinach import generate_2dcos_receptive_field\n\ndef align(responses, center):\n \n response_rolled = np.zeros_like(responses)\n \n # align to 180\n for i in range(responses.shape[0]):\n s = responses[i]\n roll = find(s==1)\n response_rolled[i] = np.roll(responses[i], center-roll)\n \n return response_rolled\n\ndef stimgen(angles, xx):\n stims = []\n for angle in angles:\n stim = np.zeros(360)\n stim[np.round(angle-1)] = 1\n stims.append(stim)\n return np.array(stims)\n\ndef tangen(files, key):\n tans = []\n for f in files:\n xy = loadmat(f)[key]\n tans.extend(np.mod(np.arctan2(xy[:,1],xy[:,0]) * 180/np.pi,360))\n return np.array(tans)\n\ndef make_basis_function( x, mu, num_chans, sigma=5):\n # basis_func=np.power(np.cos(np.subtract(x, mu)),np.subtract(num_chans,(num_chans%2)))\n rf = np.exp(-((xx-mu)**2)/(2*sigma**2))\n \n return rf\n \ndef kfolder(bold, stimulus, folds, timepoints, shuffle=False):\n \n # all folds \n ks = np.arange(0, bold.shape[0], bold.shape[0]/folds)\n \n # all trials\n all_trials = set(list(np.arange(bold.shape[0])))\n \n # initialize output\n trn_IDX = []\n tst_IDX = []\n trn_STIM = []\n trn_BOLD = []\n tst_STIM = []\n tst_BOLD = []\n \n for k in ks:\n \n ### sequestration\n tst_idx = np.arange(k,k+bold.shape[0]/folds).astype('int64')\n trn_idx = np.array(list(all_trials - set(tst_idx))).astype('int64')\n \n trn_IDX.append(trn_idx)\n tst_IDX.append([tst_idx,])\n \n ### training\n b = bold[trn_idx]\n b = b[...,timepoints]\n trn_BOLD.append(b)\n trn_STIM.append(stimulus[trn_idx])\n \n ### testing\n b = bold[tst_idx]\n b = b[...,timepoints]\n tst_BOLD.append(b)\n tst_STIM.append(stimulus[tst_idx])\n \n return trn_STIM, trn_BOLD, tst_STIM, tst_BOLD, trn_IDX, tst_IDX\n\n\n###########\n# IMAGING #\n###########\n\n# mask\nmask = nib.load('sc_both_150mm.nii.gz').get_data().astype(bool)\n\n# init\nbold = None\nmats = []\n\n# loop, find, load\nSESS = glob.glob('SESS*')\nfor sess in SESS:\n \n # extraction 1\n for r in np.arange(1,11,1):\n \n # extract\n f = utils.find_files('./%s/fodder_150mm' %(sess), 'func_zscore%d.nii.gz' %(r))[0]\n print(f)\n dat = nib.load(f).get_data()\n dat = dat[mask]\n dat = utils.zscore(dat,-1)\n dat = savgol_filter(dat, 3, 1, axis=-1)\n \n if r>1:\n bold = np.concatenate((bold,dat[:,4:-4].reshape(16,np.sum(mask),14)),0)\n else:\n bold = dat[:,4:-4].reshape(16,np.sum(mask),14)\n \n mat = [utils.find_files('./%s/behav/' %(sess), '*_r%.02d*2017*mat' %(r))[0] for r in np.arange(1,11,2)]\n mats.extend(mat)\n \n#########\n# model #\n#########\n\nnum_chans = 8\n\n# x-values for evaluating functions. \nxx = np.arange(360)\n\n# channel centers\nchan_centers = np.linspace(0,360,num_chans)\n\n# make the basis functions\nbasis_set = np.array([make_basis_function(xx, chan_center, num_chans, 360/num_chans/2) for chan_center in chan_centers])\n\n########\n# CUE #\n#######\nstimulus, xys, angles = stimgen(mats, 'cue_xy', xx)\ntt = np.arange(13)\ntuning_cue = np.zeros((tt.shape[0],xx.shape[0],))\nstim_tuning_cue = np.zeros((tt.shape[0],xx.shape[0],))\n\ntidx = 0\n\nfor t in tt:\n \n trn_STIM, trn_BOLD, tst_STIM, tst_BOLD, trn_IDX, tst_IDX = kfolder(bold, stimulus, bold.shape[0], t)\n \n recons = []\n actual = []\n \n for k in range(len(trn_STIM)):\n \n trn_data = trn_BOLD[k]\n tst_data = tst_BOLD[k]\n \n # training stim\n trnX = trn_STIM[k] @ basis_set.T\n trnX /= trnX.max()\n \n # weights\n w = linalg.inv(trnX.T @ trnX) @ trnX.T @ trn_data.T\n \n # responses\n chan_resp = (linalg.inv(w @ w.T) @ w @ tst_data).T\n \n # recon\n recons.append(np.squeeze(chan_resp @ basis_set))\n actual.append(np.squeeze(tst_STIM[k]))\n \n # tuning\n recons = np.array(recons)\n recons_roll = align(recons, 180)\n tuning_cue[t] = np.mean(recons_rolled,0)\n tidx += 1\n\n########\n# MGS #\n#######\n\nstimulus, xys, angles = stimgen(mats, 'mgs_xy', xx)\ntt = np.arange(13)\ntuning_mgs = np.zeros((tt.shape[0],xx.shape[0],))\nstim_tuning_mgs = np.zeros((tt.shape[0],xx.shape[0],))\ntidx = 0\n\nfor t in tt:\n \n trn_STIM, trn_BOLD, tst_STIM, tst_BOLD, trn_IDX, tst_IDX = kfolder(bold, stimulus, bold.shape[1], t)\n \n recons = []\n actual = []\n \n for k in range(len(trn_STIM)):\n \n trn_data = trn_BOLD[k]\n tst_data = tst_BOLD[k]\n \n # training stim\n trnX = trn_STIM[k] @ basis_set.T\n trnX /= trnX.max()\n \n # weights\n w = linalg.inv(trnX.T @ trnX) @ trnX.T @ trn_data.T\n \n # responses\n chan_resp = (linalg.inv(w @ w.T) @ w @ tst_data).T\n \n # recon\n recons.append(np.squeeze(chan_resp @ basis_set))\n actual.append(np.squeeze(tst_STIM[k]))\n \n # tuning\n recons = np.array(recons)\n recons_roll = align(recons, 180)\n tuning_mgs[t] = np.mean(recons_rolled,0)\n tidx += 1\n \n########\n# VGS #\n#######\n\nstimulus, xys, angles = stimgen(mats, 'vgs_xy', xx)\ntt = np.arange(13)\ntuning_vgs = np.zeros((tt.shape[0],xx.shape[0],))\nstim_tuning_vgs = np.zeros((tt.shape[0],xx.shape[0],))\ntidx = 0\n\nfor t in tt:\n \n trn_STIM, trn_BOLD, tst_STIM, tst_BOLD, trn_IDX, tst_IDX = kfolder(bold, stimulus, bold.shape[1], t)\n \n recons = []\n actual = []\n \n for k in range(len(trn_STIM)):\n \n trn_data = trn_BOLD[k]\n tst_data = tst_BOLD[k]\n \n # training stim\n trnX = trn_STIM[k] @ basis_set.T\n trnX /= trnX.max()\n \n # weights\n w = linalg.inv(trnX.T @ trnX) @ trnX.T @ trn_data.T\n \n # responses\n chan_resp = (linalg.inv(w @ w.T) @ w @ tst_data).T\n \n # recon\n recons.append(np.squeeze(chan_resp @ basis_set))\n actual.append(np.squeeze(tst_STIM[k]))\n \n # tuning\n recons = np.array(recons)\n recons_roll = align(recons, 180)\n tuning_vgs[t] = np.mean(recons_rolled,0)\n tidx += 1\n\n\n\ntuning = np.array([tuning_cue, tuning_mgs, tuning_vgs])\nstim_tuning = np.array([stim_tuning_cue, stim_tuning_mgs, stim_tuning_vgs])\n\n\n\n","sub_path":"analysis/doublesaccade_spatial_iem_1d_trwise.py","file_name":"doublesaccade_spatial_iem_1d_trwise.py","file_ext":"py","file_size_in_byte":6600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"498609709","text":"from tkinter import *\r\nimport firebasemanager\r\nimport pirateDb\r\n\r\nwindow1 = Tk()\r\n#popup new pirate window\r\nwindow2 = \"\"\r\n\r\nframe1 = Frame(window1)\r\nframe2 = Frame(window1)\r\nframe3 = Frame(window1)\r\nframe4 = Frame(window1)\r\n\r\nlabel1 = Label(frame1, text=\"Pirate Database\", font=\"Arial 20\", bg=\"salmon\")\r\nlabel1.pack()\r\n\r\nlabel2 = Label(frame2, text=\"Search\", font=\"Arial 20\", bg=\"olive\")\r\nlabel2.grid(row=0, column=0)\r\n\r\ndef searchUpdate(e):\r\n doFilter()\r\n\r\nentry = Entry(frame2)\r\nentry.bind(\"\", searchUpdate)\r\nentry.grid(row=0, column=1)\r\n\r\n#searchButton = Button(frame2, text=\"Go\")\r\n#searchButton.grid(row=0, column=2)\r\ndef doFilter():\r\n filt = entry.get()\r\n listbox.delete(0, \"end\")\r\n for pirate in d:\r\n if (filt.lower() in d[pirate] [\"name\"].lower() or\r\n filt.lower() in d[pirate] [\"ship\"].lower()):\r\n listbox.insert(END, d[pirate] [\"name\"])\r\n\r\ndef display(pirateId):\r\n label3.config(text=d[pirateId] [\"name\"])\r\n shipLabel.config(text=d[pirateId] [\"ship\"])\r\n if d[pirateId] [\"fictional\"] == \"True\":\r\n ficLabel.config(text=\"Fictional\")\r\n else:\r\n ficLabel.config(text=\"Real\")\r\n\r\ndef scrollRight():\r\n index = int(listbox.curselection() [0] )\r\n listbox.selection_clear(index)\r\n if index == len(d) -1:\r\n index = 0\r\n else:\r\n index += 1\r\n updateListbox(index)\r\n \r\ndef scrollLeft():\r\n index = int(listbox.curselection() [0] )\r\n listbox.selection_clear(index)\r\n if index == 0:\r\n index = len(d) -1\r\n else:\r\n index -= 1\r\n updateListbox(index)\r\n \r\ndef updateListbox(index):\r\n listbox.selection_set(index)\r\n piratename = listbox.get(index)\r\n for pirate in d:\r\n if piratename.lower() == d[pirate] [\"name\"].lower():\r\n display(pirate)\r\n\r\nlabel3 = Label(frame3, text=\"Pirate Name\", font=\"Arial 20\", bg=\"cyan\")\r\nlabel3.grid(row=0, column=0)\r\nleftImg = PhotoImage(file=\"left-arrow.gif\")\r\nrightImg = PhotoImage(file=\"right-arrow.gif\")\r\nleftButton = Button(frame3, image=leftImg, command=scrollLeft)\r\nrightButton = Button(frame3, image=rightImg, command=scrollRight)\r\nleftButton.grid(row=1, column=0)\r\nrightButton.grid(row=1, column=2)\r\nplaceholder = PhotoImage(file=\"pirate.gif\")\r\npiratePic = Label(frame3, image=placeholder, text=\"Arr\")\r\npiratePic.grid(row=1, column=1)\r\nshipLabel = Label(frame3, text=\"Ship Name\", font=\"Arial 20\")\r\nshipLabel.grid(row=2, column=1)\r\nficLabel = Label(frame3, text=\"Fictional?\", font=\"Arial 20\")\r\nficLabel.grid(row=3, column=1)\r\n\r\ndef onselect(e):\r\n w = e.widget\r\n try:\r\n index = int(w.curselection()[0])\r\n piratename = w.get(index)\r\n for pirate in d:\r\n if piratename.lower() == d[pirate] [\"name\"].lower():\r\n display(pirate)\r\n except:\r\n pass\r\n\r\nlistbox = Listbox(frame4, font=\"Times 20\")\r\nlistbox.bind(\"<>\", onselect)\r\nlistbox.pack()\r\nfm = firebasemanager.FirebaseManager()\r\nd = fm.getAllPirates()\r\n\r\n#function for new pirate button\r\ndef new_pirate():\r\n global window2\r\n window2 = Toplevel()\r\n pirateDb.loadWindow(window2)\r\n\r\ndef listDelete():\r\n index = int(listbox.curselection() [0] )\r\n listbox.selection_set(index)\r\n piratename = listbox.get(index)\r\n for pirate in d:\r\n if piratename.lower() == d[pirate] [\"name\"].lower():\r\n #save the id becuase you can't delete while looping through a dictionary\r\n deletekey = pirate\r\n #use the firebase manager to delte from the database\r\n fm.DeletePirate(deletekey)\r\n #also need to remove from the dictionary\r\n d.pop(deletekey)\r\n #update the listbox\r\n listbox.delete(ANCHOR)\r\n #or use doFilter()\r\n\r\ndeleteButton = Button(frame4, text=\"Delete\", command=listDelete)\r\ndeleteButton.pack()\r\nnewButton = Button(frame4, text=\"New Pirate\", command=new_pirate)\r\nnewButton.pack()\r\n\r\ndef refresh_list():\r\n global d\r\n #refresh the dictionary from Firebase\r\n d = fm.getAllPirates()\r\n #clear out the list box\r\n listbox.delete(0, \"end\")\r\n #refresh the list box\r\n fillListBox()\r\n\r\ndef fillListBox():\r\n global d\r\n for item in d:\r\n pirate = d[item]\r\n listbox.insert(END, pirate[\"name\"])\r\n #set the initial selection\r\n updateListbox(0)\r\n\r\ndef quitWindow():\r\n window1.destroy()\r\n\r\nexitButton = Button(frame4, text=\"Quit\", command=quitWindow)\r\nexitButton.pack()\r\n\r\nrefreshButton= Button(frame4, text=\"Refresh\", command=refresh_list)\r\nrefreshButton.pack()\r\n \r\nframe1.grid(row=0, column=0)\r\nframe2.grid(row=0, column=1)\r\nframe3.grid(row=1, column=0)\r\nframe4.grid(row=1, column=1)\r\n\r\nwindow1.mainloop()\r\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"90756083","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nnorm= lambda x: 1/np.sqrt(2*np.pi)*np.exp(-1/2*x**2)\n\nx= 0\nsample_num =1000\nx_list= []\nfor i in range(sample_num):\n\n y= np.random.uniform(0, norm(x))\n x_lower= -math.sqrt(2*math.log(1/(math.sqrt(2*math.pi)*y)))\n x_upper= math.sqrt(2*math.log(1/(math.sqrt(2*math.pi)*y)))\n x_list.append(x)\n x= np.random.uniform(x_lower, x_upper)\n\nX= np.linspace(-10,10, 1000)\nplt.figure()\nplt.hist(np.array(x_list),bins=50, density= 1/sample_num)\nplt.plot(X, norm(X))","sub_path":"slice_sampling_tut.py","file_name":"slice_sampling_tut.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"329808972","text":"# -*- coding: utf-8 -*-\n\"\"\" Unit tests for the verta.deployment.DeployedModel class. \"\"\"\nimport json\nimport os\nimport random\nfrom typing import Any, Dict\n\nimport hypothesis\nimport pytest\n\nfrom tests import utils\n\nnp = pytest.importorskip(\"numpy\")\npd = pytest.importorskip(\"pandas\")\nfrom requests import Session, HTTPError\nfrom requests.exceptions import RetryError\nimport responses\nfrom unittest.mock import patch\nfrom urllib3 import Retry\nfrom hypothesis import strategies as st\nfrom hypothesis import given\n\nfrom verta.credentials import EmailCredentials\nfrom verta.deployment import DeployedModel\nfrom verta._internal_utils import http_session\n\nPREDICTION_URL: str = 'https://test.dev.verta.ai/api/v1/predict/test_path'\nBATCH_PREDICTION_URL: str = 'https://test.dev.verta.ai/api/v1/batch-predict/test_path'\nTOKEN: str = '12345678-xxxx-1a2b-3c4d-e5f6g7h8'\nMOCK_RETRY: Retry = http_session.retry_config(\n max_retries=http_session.DEFAULT_MAX_RETRIES,\n status_forcelist=http_session.DEFAULT_STATUS_FORCELIST,\n backoff_factor=http_session.DEFAULT_BACKOFF_FACTOR\n)\nMOCK_SESSION: Session = http_session.init_session(retry=MOCK_RETRY)\nVERTA_CLASS = 'verta.deployment._deployedmodel'\n\n\n@patch.dict(\n os.environ,\n {'VERTA_EMAIL': 'test_email@verta.ai',\n 'VERTA_DEV_KEY': '123test1232dev1232key123'},\n)\n@patch(\n f'{VERTA_CLASS}.http_session.retry_config',\n return_value=MOCK_RETRY,\n)\n@patch(\n f'{VERTA_CLASS}.http_session.init_session',\n return_value=MOCK_SESSION,\n)\ndef test_deployed_model_init(mock_session, mock_retry) -> None:\n \"\"\" Validate the creation of an object of deployment.DeployedModel class with desired Session. \"\"\"\n creds = EmailCredentials.load_from_os_env()\n created_dm_details = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n ).__dict__\n expected_dm_details: Dict[str, Any] = {\n '_prediction_url': PREDICTION_URL,\n '_credentials': creds,\n '_access_token': '12345678-xxxx-1a2b-3c4d-e5f6g7h8',\n '_retry_config': mock_retry.return_value,\n '_session': mock_session.return_value\n }\n assert created_dm_details['_prediction_url'] == expected_dm_details['_prediction_url']\n assert created_dm_details['_access_token'] == expected_dm_details['_access_token']\n assert created_dm_details['_credentials'] == expected_dm_details['_credentials']\n assert created_dm_details['_session'] == expected_dm_details['_session']\n\n\ndef test_predict_http_defaults_200(mocked_responses) -> None:\n \"\"\" Calling predict with the default settings and getting a 200 response returns the response as expected. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={\"test_key\": \"test_val\"},\n status=200,\n headers={'verta-request-id': 'hereISaTESTidFROMtheUSER'},\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n prediction_response = dm.predict(x=['test_prediction'])\n assert prediction_response == {\"test_key\": \"test_val\"}\n\n\ndef test_predict_http_defaults_404_retry_error(mocked_responses) -> None:\n \"\"\" Calling predict with the default settings and getting a 404 results in retries being exhausted. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={},\n status=404,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n with pytest.raises(RetryError):\n dm.predict(x=['test_prediction'])\n\n\ndef test_predict_http_defaults_429_retry_error(mocked_responses) -> None:\n \"\"\" Calling predict with the default settings and getting a 429 results in retries being exhausted. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={},\n status=429,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n with pytest.raises(RetryError):\n dm.predict(x=['test_prediction'])\n\n\ndef test_predict_http_defaults_status_not_in_retry(mocked_responses) -> None:\n \"\"\" Verify that calling predict with the default settings and getting a response not in `status_forcelist`\n does not result in retries. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n headers={'verta-request-id': 'hereISaTESTidFROMtheUSER'},\n json={},\n status=999,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n dm.predict(x=['test_prediction'])\n mocked_responses.assert_call_count(PREDICTION_URL, 1)\n\n\ndef test_predict_http_default_max_retry_observed(mocked_responses) -> None:\n \"\"\" Calling predict with the default settings and getting a 429 results in retries being exhausted. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={},\n status=429,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n with pytest.raises(RetryError):\n dm.predict(x=['test_prediction'])\n mocked_responses.assert_call_count(PREDICTION_URL, http_session.DEFAULT_MAX_RETRIES + 1)\n # max_retries + 1 original attempt = total call count\n\n\ndef test_predict_with_altered_retry_config(mocked_responses) -> None:\n \"\"\" Calling predict with custom retry parameters changes the retry config and makes the correct requests. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={},\n status=888,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n with pytest.raises(RetryError):\n dm.predict(\n x=['test_prediction'],\n max_retries=9,\n retry_status={888},\n backoff_factor=0.1\n )\n mocked_responses.assert_call_count(PREDICTION_URL, 10)\n\n\ndef test_predict_with_prediction_id_provided(mocked_responses) -> None:\n \"\"\" Calling predict while providing a value for `prediction_id` updates and includes the headers in the request. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={'test1': 'test1'},\n status=200,\n match=[responses.matchers.header_matcher({'verta-request-id': 'hereISaTESTidFROMtheUSER'})],\n headers={'verta-request-id': 'hereISaTESTidFROMtheUSER'},\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n dm.predict(\n x=['test_prediction'],\n prediction_id='hereISaTESTidFROMtheUSER',\n )\n mocked_responses.assert_call_count(PREDICTION_URL, 1)\n\n\ndef test_predict_with_id_response_includes_id(mocked_responses) -> None:\n \"\"\" Calling predict_with_id returns both the ID from teh request response, and the prediction results \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n headers={'verta-request-id': 'AutoGeneratedTestId'},\n # Adds this header to the mocked http response.\n json={'test2': 'test2'},\n status=200,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n prediction = dm.predict_with_id(x=['test_prediction'])\n assert prediction == ('AutoGeneratedTestId', {'test2': 'test2'})\n\n\ndef test_predict_with_id_prediction_id_provided(mocked_responses) -> None:\n \"\"\" Calling predict_with_id while including the `prediction_id` adds the id to the header of the request and\n includes the id provided in the response with the prediction results \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n match=[responses.matchers.header_matcher({'verta-request-id': 'hereISomeTESTidFROMtheUSER'})],\n # Makes sure the prediction id was included as a header in the request\n headers={'verta-request-id': 'hereISomeTESTidFROMtheUSER'},\n # Adds this header to the mocked http response.\n json={'test2': 'test2'},\n status=200,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n prediction = dm.predict_with_id(\n x=['test_prediction'],\n prediction_id='hereISomeTESTidFROMtheUSER'\n )\n assert prediction == ('hereISomeTESTidFROMtheUSER', {'test2': 'test2'})\n\n\ndef test_predict_with_id_http_defaults_200(mocked_responses) -> None:\n \"\"\" Calling predict with the default settings and getting a 200 response returns the response as expected. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={\"test_key\": \"test_val\"},\n status=200,\n headers={'verta-request-id': 'hereISthisTESTidFROMtheUSER'},\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n prediction_response = dm.predict_with_id(x=['test_prediction'])\n assert prediction_response == ('hereISthisTESTidFROMtheUSER', {\"test_key\": \"test_val\"})\n\n\ndef test_predict_with_id_http_defaults_404_retry_error(mocked_responses) -> None:\n \"\"\" Calling predict with the default settings and getting a 404 results in retries being exhausted. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={},\n status=404,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n with pytest.raises(RetryError):\n dm.predict_with_id(x=['test_prediction'])\n\n\ndef test_predict_with_id_altered_retry_config(mocked_responses) -> None:\n \"\"\" Calling predict with custom retry parameters changes the retry config and makes the correct requests. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={},\n status=888,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n with pytest.raises(RetryError):\n dm.predict_with_id(\n x=['test_prediction'],\n max_retries=9,\n retry_status={888},\n backoff_factor=0.1\n )\n mocked_responses.assert_call_count(PREDICTION_URL, 10)\n\n\ndef test_default_retry_after_custom_retry(mocked_responses) -> None:\n \"\"\" Calling predict with default params after calling predict with custom\n params uses default retry settings and not the custom settings from\n the previous call. \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={},\n status=777,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n with pytest.raises(RetryError):\n dm.predict(\n x=['test_prediction'],\n max_retries=1,\n retry_status={777},\n backoff_factor=0.1,\n )\n mocked_responses.assert_call_count(PREDICTION_URL, 2)\n # 1 attempt + 1 retry = 2\n\n mocked_responses.post(\n PREDICTION_URL,\n json={},\n status=429,\n )\n with pytest.raises(RetryError):\n dm.predict(x=['test_prediction']) # use defaults\n mocked_responses.assert_call_count(PREDICTION_URL, 16)\n # previous 2 + 1 attempt + default 13 retries = 16\n\n\ndef test_predict_400_error_message_extraction(mocked_responses) -> None:\n \"\"\" Getting a 400 will render the attached message form the backend if present \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n json={\"message\": \"Here be a message in the response\"},\n status=400,\n headers={'verta-request-id': 'AutoGeneratedTestId'},\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n with pytest.raises(RuntimeError) as err:\n dm.predict(x=['test_prediction'])\n assert str(err.value) == (\n 'deployed model encountered an error: Here be a message in the response'\n )\n\n\ndef test_predict_400_error_message_missing(mocked_responses) -> None:\n \"\"\" Getting a 401 error, with no message provided by the back-end will fall back\n to raise_for_http_error style error formatting.\n \"\"\"\n mocked_responses.post(\n PREDICTION_URL,\n status=400,\n headers={'verta-request-id': 'AutoGeneratedTestId'},\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n with pytest.raises(HTTPError) as err:\n dm.predict(x=['test_prediction'])\n assert str(err.value)[:-30] == (\n '400 Client Error: Bad Request for url: '\n 'https://test.dev.verta.ai/api/v1/predict/test_path at '\n )\n\n\ndef test_batch_predict_with_one_batch_with_no_index(mocked_responses) -> None:\n \"\"\" Call batch_predict with a single batch. \"\"\"\n expected_df = pd.DataFrame({\"A\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], \"B\": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]})\n expected_df_body = json.dumps(expected_df.to_dict(orient=\"split\"))\n mocked_responses.post(\n BATCH_PREDICTION_URL,\n body=expected_df_body,\n status=200,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n # the input below is entirely irrelevant since it's smaller than the batch size\n prediction_df = dm.batch_predict(pd.DataFrame({\"hi\": \"bye\"}, index=[1]), 10)\n pd.testing.assert_frame_equal(expected_df, prediction_df)\n\n\ndef test_batch_predict_with_one_batch_with_index(mocked_responses) -> None:\n \"\"\" Call batch_predict with a single batch, where the output has an index. \"\"\"\n expected_df = pd.DataFrame({\"A\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], \"B\": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"])\n expected_df_body = json.dumps(expected_df.to_dict(orient=\"split\"))\n mocked_responses.post(\n BATCH_PREDICTION_URL,\n body=expected_df_body,\n status=200,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n # the input below is entirely irrelevant since it's smaller than the batch size\n prediction_df = dm.batch_predict(pd.DataFrame({\"hi\": \"bye\"}, index=[1]), 10)\n pd.testing.assert_frame_equal(expected_df, prediction_df)\n\n\ndef test_batch_predict_with_five_batches_with_no_indexes(mocked_responses) -> None:\n \"\"\" Since the input has 5 rows and we're providing a batch_size of 1, we expect 5 batches.\"\"\"\n expected_df_list = [pd.DataFrame({\"A\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}),\n pd.DataFrame({\"B\": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]}),\n pd.DataFrame({\"C\": [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]}),\n pd.DataFrame({\"D\": [31, 32, 33, 34, 35, 36, 37, 38, 39, 40]}),\n pd.DataFrame({\"E\": [41, 42, 43, 44, 45, 46, 47, 48, 49, 50]}),\n ]\n for expected_df in expected_df_list:\n mocked_responses.add(\n responses.POST,\n BATCH_PREDICTION_URL,\n body=json.dumps(expected_df.to_dict(orient=\"split\")),\n status=200,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n input_df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5], \"b\": [11, 12, 13, 14, 15]})\n prediction_df = dm.batch_predict(input_df, batch_size=1)\n expected_df = pd.concat(expected_df_list)\n pd.testing.assert_frame_equal(expected_df, prediction_df)\n\n\ndef test_batch_predict_with_batches_and_indexes(mocked_responses) -> None:\n \"\"\" Since the input has 5 rows and we're providing a batch_size of 1, we expect 5 batches.\n Include an example of an index.\n \"\"\"\n expected_df_list = [\n pd.DataFrame({\"A\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}, index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]),\n pd.DataFrame({\"B\": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]),\n pd.DataFrame({\"C\": [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]),\n pd.DataFrame({\"D\": [31, 32, 33, 34, 35, 36, 37, 38, 39, 40]},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]),\n pd.DataFrame({\"E\": [41, 42, 43, 44, 45, 46, 47, 48, 49, 50]},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]),\n ]\n for expected_df in expected_df_list:\n mocked_responses.add(\n responses.POST,\n BATCH_PREDICTION_URL,\n body=json.dumps(expected_df.to_dict(orient=\"split\")),\n status=200,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n input_df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5], \"b\": [11, 12, 13, 14, 15]}, index=[\"A\", \"B\", \"C\", \"D\", \"E\"])\n prediction_df = dm.batch_predict(input_df, 1)\n expected_final_df = pd.concat(expected_df_list)\n pd.testing.assert_frame_equal(expected_final_df, prediction_df)\n\n\n@st.composite\ndef generate_data(draw, max_rows=50, max_cols=6):\n \"\"\" Return a dict that represents a dataframe. Generates ints, floats, and strings.\"\"\"\n num_rows = draw(st.integers(min_value=1, max_value=max_rows))\n num_cols = draw(st.integers(min_value=1, max_value=max_cols))\n col_names = draw(st.lists(st.text(), max_size=num_cols, min_size=num_cols, unique=True))\n data = {}\n for name in col_names:\n\n type_probability = utils.gen_probability()\n if type_probability <= 0.3:\n col_values = st.integers()\n elif type_probability <= 0.6:\n col_values = st.floats()\n else:\n col_values = st.text()\n col = draw(st.lists(col_values, max_size=num_rows, min_size=num_rows))\n data[name] = col\n\n out_dict = {\"data\": data}\n index_probability = utils.gen_probability()\n if index_probability <= 0.5:\n index = draw(st.lists(st.text(), max_size=num_rows, min_size=num_rows))\n out_dict[\"index\"] = index\n return out_dict\n\n\n@hypothesis.settings(deadline=None) # client utils make DataFrame handling slow at first\n@given(json_df=generate_data(), batch_size=st.integers(min_value=1, max_value=10))\ndef test_batch(json_df, batch_size) -> None:\n \"\"\" Test that the batch_predict method works with a variety of inputs. \"\"\"\n with responses.RequestsMock() as rsps:\n if \"index\" in json_df:\n input_df = pd.DataFrame(json_df[\"data\"], index=json_df[\"index\"])\n else:\n input_df = pd.DataFrame(json_df[\"data\"])\n for i in range(0, len(input_df), batch_size):\n batch = input_df.iloc[i:i + batch_size]\n serialized_batch = batch.to_dict(orient=\"split\")\n rsps.add(\n responses.POST,\n BATCH_PREDICTION_URL,\n body=json.dumps(serialized_batch),\n status=200,\n )\n creds = EmailCredentials.load_from_os_env()\n dm = DeployedModel(\n prediction_url=PREDICTION_URL,\n creds=creds,\n token=TOKEN,\n )\n prediction_df = dm.batch_predict(input_df, batch_size=batch_size)\n pd.testing.assert_frame_equal(input_df, prediction_df)\n\n","sub_path":"client/verta/tests/unit_tests/deployment/test_deployed_model.py","file_name":"test_deployed_model.py","file_ext":"py","file_size_in_byte":20067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"601495852","text":"import requests\nfrom bs4 import BeautifulSoup\n# try:\n# kv = {'wd':'python'}\n# r = requests.get(\"http://www.baidu.com/s\",params=kv)\n# r.raise_for_status() #如果请求的返回码不是200 则抛出异常\n# # 1 用r.status_code去分析返回的状态码 200为正确返回\n# # 2 如果证券返回 使用r.encoding(head中设置的encoding) r.text(字符串形式) r.apparent_encoding(从内容分析出来的编码) r.content(二进制内容如图片)\n# print(r.status_code)\n# print(r.request.url)\n# if r.status_code == 200:\n# print(r.text[:100])\n# print(r.encoding)\n# print(r.apparent_encoding)\n# r.encoding = r.apparent_encoding #设置当前编码为内容编码\n# print(r.text[:100])\n# except:\n# print(\"err\")\n# requests.HTTPError\n\npath = \"https://www.ccc860.com/htm/gif0/817.htm\"\ntry:\n r = requests.get(path)\n r.raise_for_status()\n soup = BeautifulSoup(r.text,'html.parser')\n print(soup.body.prettify())\n for theTag in soup.body.descendants:\n if theTag.name == 'img':\n rr = requests.get(theTag.attrs['src'])\n fileName = (theTag.attrs['src']).split('/')[-1]\n f = open(fileName,'wb')\n f.write(r.content)\nexcept:\n print(\"no\")","sub_path":"requestLib/requestTest.py","file_name":"requestTest.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"115497567","text":"from flask import jsonify, Flask, request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom random import choice, shuffle\r\nimport time\r\nimport os\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n# Connect to database\r\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(\"DATABASE_URL\")\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\ndb = SQLAlchemy(app)\r\n\r\n\r\nSECONDS_IN_30_DAYS = 86400 * 30\r\nSTART_TIME = round(time.time())\r\nEND_TIME = START_TIME + SECONDS_IN_30_DAYS\r\n\r\n\r\n# User TABLE Configuration\r\nclass User(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n email = db.Column(db.String(250), unique=True, nullable=False)\r\n token = db.Column(db.String(250), unique=True, nullable=False)\r\n start_time = db.Column(db.Integer, unique=False, nullable=False)\r\n end_time = db.Column(db.Integer, unique=False, nullable=False)\r\n app1_status = db.Column(db.Integer, unique=False, nullable=False)\r\n app2_status = db.Column(db.Integer, unique=False, nullable=False)\r\n app3_status = db.Column(db.Integer, unique=False, nullable=False)\r\n app4_status = db.Column(db.Integer, unique=False, nullable=False)\r\n app5_status = db.Column(db.Integer, unique=False, nullable=False)\r\n app6_status = db.Column(db.Integer, unique=False, nullable=False)\r\n app7_status = db.Column(db.Integer, unique=False, nullable=False)\r\n app8_status = db.Column(db.Integer, unique=False, nullable=False)\r\n app9_status = db.Column(db.Integer, unique=False, nullable=False)\r\n app10_status = db.Column(db.Integer, unique=False, nullable=False)\r\n active_status = db.Column(db.Integer, unique=False, nullable=False)\r\n\r\n def to_dict(self):\r\n dictionary = {}\r\n # Loop through each column in the data record\r\n for column in self.__table__.columns:\r\n # Create a new dictionary entry;\r\n # where the key is the name of the column\r\n # and the value is the value of the column\r\n dictionary[column.name] = getattr(self, column.name)\r\n return dictionary\r\n\r\n\r\ndb.create_all()\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n return '

This is the home page and there is nothing to see here

'\r\n\r\n\r\n# Read Record\r\n@app.route('/all')\r\ndef all_users():\r\n users = db.session.query(User).all()\r\n return jsonify(users=[user.to_dict() for user in users]), 200\r\n\r\n\r\n@app.route('/token')\r\ndef get_user_from_token():\r\n token = request.args.get('token')\r\n user = db.session.query(User).filter_by(token=token).first()\r\n\r\n if user:\r\n return jsonify(user=user.to_dict()), 200\r\n else:\r\n return jsonify(error={'Not Found': 'Sorry. That token does not exist on our database'}), 404\r\n\r\n\r\n@app.route('/check_token_validity')\r\ndef registering_users_token_validity():\r\n token = request.args.get('token') + '\\n'\r\n\r\n with open('used tokens.txt', 'r') as f:\r\n used_tokens = f.readlines()\r\n f.close()\r\n\r\n with open('unused tokens.txt', 'r') as f:\r\n unused_tokens = f.readlines()\r\n f.close()\r\n\r\n if token in used_tokens:\r\n return jsonify(error=\"That token is already used\"), 404\r\n\r\n elif token in unused_tokens:\r\n\r\n return jsonify(success=\"That token exists but hasn't been used \"), 200\r\n\r\n else:\r\n return jsonify(error=\"That token does not exist!\"), 404\r\n\r\n\r\n@app.route('/change_token/', methods=['PATCH'])\r\ndef change_token(token):\r\n new_token = request.args.get('new_token')\r\n\r\n user = db.session.query(User).filter_by(token=token).first()\r\n\r\n if user:\r\n user.token = new_token\r\n return jsonify(sucess={\"Success\": \"Successfully changed the token\"}), 200\r\n\r\n else:\r\n return jsonify(error={'error': 'That token is not registered'}), 404\r\n\r\n\r\n@app.route('/email')\r\ndef get_user_from_email():\r\n email = request.args.get('email')\r\n user = db.session.query(User).filter_by(email=email.lower()).first()\r\n\r\n if user:\r\n return jsonify(user=user.to_dict()), 200\r\n else:\r\n return jsonify(error={'Not Found': 'Sorry. That email does not exist on our database'}), 404\r\n\r\n\r\n# Create user\r\n@app.route('/add', methods=['POST'])\r\ndef add_user():\r\n email = request.form.get('email').lower()\r\n new_user = User(\r\n email=email,\r\n token=request.form.get('token'),\r\n start_time=START_TIME,\r\n end_time=END_TIME,\r\n app1_status=0,\r\n app2_status=0,\r\n app3_status=0,\r\n app4_status=0,\r\n app5_status=0,\r\n app6_status=0,\r\n app7_status=0,\r\n app8_status=0,\r\n app9_status=0,\r\n app10_status=0,\r\n active_status=0\r\n )\r\n\r\n db.session.add(new_user)\r\n db.session.commit()\r\n\r\n with open('unused tokens.txt', 'r') as f:\r\n unused_tokens = f.readlines()\r\n f.close()\r\n\r\n token = request.form.get('token') + '\\n'\r\n\r\n with open('used tokens.txt', 'a') as f:\r\n f.write(token + '\\n')\r\n f.close()\r\n\r\n unused_tokens.remove(token)\r\n\r\n with open('unused tokens.txt', 'w') as f:\r\n for token in unused_tokens:\r\n f.write(token)\r\n f.close()\r\n\r\n return jsonify(response={'Success': 'Successfully added a new user'})\r\n\r\n\r\n# Renew license\r\n@app.route('/renew/', methods=['PATCH'])\r\ndef renew_license(email):\r\n new_start_time = START_TIME\r\n new_end_time = END_TIME\r\n user = db.session.query(User).filter_by(email=email).first()\r\n\r\n if user:\r\n user.start_time = new_start_time\r\n user.end_time = new_end_time\r\n db.session.commit()\r\n return jsonify(response={'Success': 'Successfully renewed the license'}), 200\r\n else:\r\n return jsonify(error={'Not Found': 'Sorry. No user with that email is on our database'}), 404\r\n\r\n\r\n@app.route('/update_license/', methods=['PATCH'])\r\ndef update_license(email):\r\n new_start_time = request.args.get('start_time')\r\n new_end_time = request.args.get('end_time')\r\n\r\n user = db.session.query(User).filter_by(email=email).first()\r\n\r\n if user:\r\n user.start_time = new_start_time\r\n user.end_time = new_end_time\r\n db.session.commit()\r\n\r\n return jsonify(Success={'Success': 'Successfully changed the license time'}), 200\r\n\r\n else:\r\n return jsonify(Error={'error': 'No such user'}), 404\r\n\r\n\r\n@app.route('/check_validity/')\r\ndef check_validity(token):\r\n user = db.session.query(User).filter_by(token=token).first()\r\n if user:\r\n if user.start_time < user.end_time:\r\n return jsonify(sucess={'Success': 'The user is still valid'}), 200\r\n else:\r\n return jsonify(failure={'Failure': 'The user has expired'}), 404\r\n else:\r\n return jsonify(error={'Not Found': 'Sorry. No user with that token is on our database'}), 404\r\n\r\n\r\n@app.route('/check_app_status/')\r\ndef check_app_status(token):\r\n user = db.session.query(User).filter_by(token=token).first()\r\n if user:\r\n app_no = int(request.args.get('app_no'))\r\n if app_no == 1:\r\n status = user.app1_status\r\n elif app_no == 2:\r\n status = user.app2_status\r\n elif app_no == 3:\r\n status = user.app3_status\r\n elif app_no == 4:\r\n status = user.app4_status\r\n elif app_no == 5:\r\n status = user.app5_status\r\n elif app_no == 6:\r\n status = user.app6_status\r\n elif app_no == 7:\r\n status = user.app7_status\r\n elif app_no == 8:\r\n status = user.app8_status\r\n elif app_no == 9:\r\n status = user.app9_status\r\n else:\r\n status = user.app10_status\r\n\r\n return jsonify(status={'Status': status})\r\n else:\r\n return jsonify(error={'Not Found': 'Sorry. No user with that token is on our database'}), 404\r\n\r\n\r\n@app.route('/change_app_status/', methods=['PATCH'])\r\ndef change_app_status(token):\r\n user = db.session.query(User).filter_by(token=token).first()\r\n if user:\r\n app_no = int(request.args.get('app_no'))\r\n if app_no == 1:\r\n user.app1_status = 1\r\n elif app_no == 2:\r\n user.app2_status = 1\r\n elif app_no == 3:\r\n user.app3_status = 1\r\n elif app_no == 4:\r\n user.app4_status = 1\r\n elif app_no == 5:\r\n user.app5_status = 1\r\n elif app_no == 6:\r\n user.app6_status = 1\r\n elif app_no == 7:\r\n user.app7_status = 1\r\n elif app_no == 8:\r\n user.app8_status = 1\r\n elif app_no == 9:\r\n user.app9_status = 1\r\n elif app_no == 10:\r\n user.app10_status = 1\r\n else:\r\n pass\r\n\r\n db.session.commit()\r\n\r\n return jsonify(success={'Success': f'Successfully changed the status of app number {app_no}'}), 200\r\n else:\r\n return jsonify(error={'Not Found': 'Sorry. No user with that token is on our database'}), 404\r\n\r\n\r\n@app.route('/check_active_status/')\r\ndef check_active_status(token):\r\n user = db.session.query(User).filter_by(token=token).first()\r\n\r\n if user:\r\n if user.active_status == 0:\r\n return jsonify(the_status_code={'Status': user.active_status},\r\n Success={'Success': f\"App status is {user.active_status}. App not running\",\r\n 'Status code': user.active_status}), 200\r\n else:\r\n return jsonify(the_status_code={'Status': user.active_status},\r\n Error={'Error': f'App status is {user.active_status}. App is running',\r\n 'Status code': user.active_status}), 404\r\n\r\n\r\n@app.route('/change_active_status/', methods=['PATCH'])\r\ndef change_active_status(token):\r\n user = db.session.query(User).filter_by(token=token).first()\r\n\r\n if user:\r\n return jsonify(Success={'Success': \"Successfully changed the active status\"}), 200\r\n else:\r\n return jsonify(Error={'Error': 'No such user with that token'}), 404\r\n\r\n\r\n@app.route('/generate_token/')\r\ndef generate_token(number):\r\n def generate():\r\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'p', 'q', 'r', 's', 't',\r\n 'u',\r\n 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',\r\n 'P',\r\n 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\r\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\r\n\r\n token_letters = [choice(letters) for _ in range(5)]\r\n token_numbers = [choice(numbers) for _ in range(3)]\r\n\r\n token_list = token_letters + token_numbers\r\n shuffle(token_list)\r\n generated_token = ''.join(token_list)\r\n\r\n token = ''\r\n\r\n position = 0\r\n for j in range(len(generated_token)):\r\n if position == 4:\r\n token += '-'\r\n position = 0\r\n token += generated_token[j]\r\n position += 1\r\n\r\n return token\r\n\r\n with open('unused tokens.txt', 'r') as f:\r\n unused_tokens = f.readlines()\r\n f.close()\r\n\r\n with open('used tokens.txt', 'r') as f:\r\n used_tokens = f.readlines()\r\n f.close()\r\n\r\n for i in range(number):\r\n new_token = generate()\r\n\r\n if new_token not in unused_tokens and new_token not in used_tokens:\r\n with open('unused tokens.txt', 'a') as f:\r\n f.write(f'{new_token}\\n')\r\n f.close()\r\n\r\n return jsonify(Success={'Success': f\"Successfully generated {number} new tokens!\"}), 200\r\n\r\n\r\n@app.route('/fetch_token')\r\ndef fetch_token():\r\n with open('unused tokens.txt', 'r') as f:\r\n tokens = f.readlines()\r\n new_token = choice(tokens).strip('\\n')\r\n f.close()\r\n\r\n return jsonify(Success={'New token': f\"{new_token}\"})\r\n\r\n\r\n@app.route('/delete/')\r\ndef delete_user(token):\r\n user = db.session.query(User).filter_by(token=token).first()\r\n\r\n if user:\r\n db.session.delete(user)\r\n db.session.commit()\r\n return jsonify(Success={'Success':'Successfully deleted the user'}), 200\r\n\r\n else:\r\n return jsonify(Error={'Error': 'No such user'}), 404\r\n\r\n\r\n@app.route('/current_time')\r\ndef current_time():\r\n return jsonify(Time=f\"{int(START_TIME)}\")\r\n\r\n\r\n@app.route('/testing_the_shit')\r\ndef testing_the_shit():\r\n return '

Wow! The damn thing worked!

'\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":12548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"57276564","text":"# crawling\nimport pandas as pd\nimport numpy\nfrom selenium import webdriver\nimport re\nimport time\nimport csv\nimport datetime as dt\nimport pymysql\nfrom sqlalchemy import create_engine\n#from PIL import Image\nimport base64\n#from io import BytesIO\n\n# airflow \nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom datetime import datetime, timedelta\nimport sys\nimport pendulum\nimport requests\n\n#--------------------------------실행 초기 설정 코드----------------------------------#\n\ndef get_musinsa_category_count(**kwargs):\n conn = pymysql.connect(host='35.185.210.97', port=3306, user='footfootbig', password='footbigmaria!',\n database='footfoot')\n\n try:\n with conn.cursor() as curs:\n select_count = \"\"\"\n SELECT count(*) from musinsa_category;\n \"\"\"\n curs.execute(select_count)\n count = curs.fetchone()[0]\n\n finally:\n conn.close()\n\n return count + 1\n\n#--------------------------------크롤링 코드----------------------------------#\n\n# 무신사 모델 정보 뽑기\ndef get_shoes_info(category, page, **kwargs):\n\n # 크롬 드라이버 옵션\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument('--no-sandbox')\n options.add_argument('--disable-gpu')\n options.add_argument('--disable-dev-shm-usage')\n options.add_argument('--user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36')\n driver = webdriver.Chrome(executable_path='/usr/bin/chromedriver',options=options)\n\n # 모델 musinsa_id 추출 \n prod_id = []\n url = 'https://store.musinsa.com/app/items/lists/'\\\n +str(page)\\\n +'/?category=&d_cat_cd=005&u_cat_cd=&brand=&sort=pop&sub_sort=&display_cnt=3000&page=1&page_kind=category&list_kind=small&free_dlv=&ex_soldout=N&sale_goods=&exclusive_yn=&price=&color=&a_cat_cd=&size=&tag=&popup=&brand_favorite_yn=&goods_favorite_yn=&blf_yn=&campaign_yn=&bwith_yn=&price1=&price2=&chk_soldout=on'\n driver.get(url)\n time.sleep(10)\n driver.implicitly_wait(90)\n prod_id_list = driver.find_elements_by_css_selector('#searchList > li > div.li_inner > div.list_img > a > img')\n for q in prod_id_list:\n raw_prod_id = q.get_attribute(\"data-original\")\n prod_id_cook = raw_prod_id.split('/')[6]\n prod_id.append(prod_id_cook)\n\n # 모델 상세 정보 추출\n prod_info = []\n for prod_id_one in prod_id:\n url2 = 'https://store.musinsa.com/app/product/detail/' + str(prod_id_one) + '/0'\n driver.get(url2)\n time.sleep(1)\n driver.implicitly_wait(10)\n\n prod_main_img = driver.find_element_by_css_selector('#bigimg')\n img_url = prod_main_img.get_attribute('src')\n try:\n # 브랜드, id\n id_and_brand = driver.find_element_by_class_name('product_article_contents')\n #id_and_brand = driver.find_element_by_css_selector('product_order_info > div.explan_product.product_info_section > ul > li:nth-child(1) > p.product_article_contents > strong')\n try:\n prod_brand = driver.find_element_by_css_selector('#page_product_detail > div.right_area.page_detail_product > div.right_contents.section_product_summary > div.product_info > p > a:nth-child(3)')\n except:\n continue\n prod_brand_text = prod_brand.text\n prod_brand_clean = prod_brand_text.replace(' ','').replace('(','').replace(')','')\n id_and_brand_text = id_and_brand.text\n # prod_brand = id_and_brand_text.split('/')[0] # 브랜드\n try :\n name_id = id_and_brand_text.split('/')[1].strip() # 모델품번\n except :\n name_id = id_and_brand_text # 품번이 없는 제품이 가끔 있음\n \n try:\n prod_name = driver.find_element_by_class_name('product_title')\n prod_name_text = prod_name.text\n except:\n prod_name_text = name_id\n try: # 영어 이름이 있는 경우 제거\n prod_name_eng = driver.find_element_by_class_name('product_title_eng')\n prod_name_eng_text = prod_name_eng.text\n prod_name_text = prod_name_text.replace(prod_name_eng_text, '')\n except: # 영어 이름이 없는 경우 pass\n pass\n \n # 사이즈\n try:\n size = driver.find_element_by_class_name('option1')\n except: # 단일 사이즈인 제품이 아주 가끔 있어서 예외처리\n size = '사이즈 정보 없음'\n # 사이즈가 option1이 아닌 경우 예외처리\n try:\n size_texts = size.text\n size_text_split = size_texts.split()[2:]\n size_text = []\n # '옵션' '(3개남음)' 과 같은 이상한거 전부 제거하고 사이즈만 추출\n for regex_check in size_text_split:\n temp = str(re.findall('2\\d[0|5]',regex_check)[0])\n size_text.append(temp)\n \n join_size_text = '-'.join(size_text)\n except:\n join_size_text = '-'\n\n # 성별\n gender = driver.find_element_by_class_name('txt_gender')\n gender_text = gender.text # 성별\n\n # 가격\n try:\n price = driver.find_element_by_css_selector('#goods_price > del')\n except:\n price = driver.find_element_by_css_selector('#goods_price')\n price_text = price.text # 일반가격\n\n\n # 모델 이름에서 품번, 광고성 문구, 색상 등 기타정보 제거\n modelname = ''\n if len(prod_name_text.split()) != 1: # 모델명이 품번이 아닌 경우\n if prod_name_text.startswith('['): # [로 시작하는 광고 제거, 예)[키높이]\n try:\n if len(prod_name_text.split(']')) > 2: # []가 여러개 있는 경우\n modelname = ''.join(prod_name_text.split(']')[2:])\n if modelname == '': # []가 끝에 있는경우\n modelname = ''.join(prod_name_text.split(']')[1:-1])\n else:\n modelname = prod_name_text.split(']')[1]\n except: # 오타 있어서 [쏠라} 와 같은 것 때매 에러남\n modelname = prod_name_text.split('}')[1]\n\n elif prod_name_text.startswith('('): # (로 시작하는 추가 정보가 있는 경우 예:(비브람솔)\n modelname = ''.join(prod_name_text.split(')')[1:])\n else: # 광고성 괄호가 없는 경우\n modelname = prod_name_text\n\n modelname = modelname.replace(name_id,'').replace('/','') # 품번 제거\n modelname = modelname.split('(')[0].split('-')[0] # 색상, 설명 제거\n else:\n modelname = prod_name_text # 모델명이 품번인 경우\n \n if (modelname == '') | (modelname == ' '):\n modelname = name_id\n \n prod_info.append([category, prod_brand_clean, name_id, modelname, gender_text, join_size_text, int(prod_id_one), int(''.join(price_text.split(','))), img_url])\n except:\n pass\n musinsa_df = pd.DataFrame(\n data=prod_info\n , columns=['category', 'brand', 'shono', 'modelname', 'shosex', 'size', 'musinsa_id', 'price_m', 'img_src']\n )\n \n # 무신사 데이터 편집\n musinsa_df.drop(musinsa_df[musinsa_df['shosex'] == '남 여 아동'].index, axis=0, inplace=True)\n musinsa_df.drop(musinsa_df[musinsa_df['shosex'] == '아동'].index, axis=0, inplace=True)\n musinsa_df.drop(musinsa_df[musinsa_df['shosex'] == '라이프'].index, axis=0, inplace=True)\n musinsa_df.drop(musinsa_df[musinsa_df['shosex'] == '여 아동'].index, axis=0, inplace=True)\n\n musinsa_df['shosex'].replace('남 여', \"남녀공용\", inplace=True)\n musinsa_df['shosex'].replace('남', \"남성용\", inplace=True)\n musinsa_df['shosex'].replace('여', \"여성용\", inplace=True)\n\n musinsa_df['minsize'] = None\n musinsa_df['maxsize'] = None\n musinsa_df['sizeunit'] = None\n\n\n for i in musinsa_df.index:\n try:\n musinsa_df['minsize'][i] = int(musinsa_df['size'].str.split('-')[i][0])\n musinsa_df['maxsize'][i] = int(musinsa_df['size'].str.split('-')[i][-1])\n musinsa_df['sizeunit'][i] = \\\n int(musinsa_df['size'].str.split('-')[i][1]) - int(musinsa_df['size'].str.split('-')[i][0])\n except:\n pass\n\n del musinsa_df['size']\n\n musinsa_df.to_csv(f'/root/reviews/musinsa_{category}_id.csv')\n\n # 마리아디비로 전송\n engine = create_engine(\"mysql+pymysql://footfootbig:\" + \"footbigmaria!\" + \"@35.185.210.97/footfoot\" + \"?charset=utf8mb4\")\n conn = engine.connect()\n try:\n musinsa_df.to_sql(name='musinsa_shoes', con=engine, if_exists='append', index=False)\n finally:\n conn.close()\n\n\n# DB에서 category, page 갖고오기\n\ndef get_category_page(count, **kwargs):\n conn = pymysql.connect(host='35.185.210.97', port=3306, user='footfootbig', password='footbigmaria!', database='footfoot')\n\n try:\n with conn.cursor() as curs:\n\n select_brand = \"\"\"\n SELECT category, page\n FROM musinsa_category\n WHERE idx=%s;\n \"\"\"\n curs.execute(select_brand, count)\n category, page = curs.fetchone()\n\n get_shoes_info(category, page)\n finally:\n conn.close()\n\n\ndef truncate(**kwargs):\n conn = pymysql.connect(host='35.185.210.97', port=3306, user='footfootbig', password='footbigmaria!',\n database='footfoot')\n try:\n with conn.cursor() as curs:\n truncate_table = \"\"\"\n truncate table musinsa_shoes;\n \"\"\"\n curs.execute(truncate_table)\n finally:\n conn.close()\n\ndef xcom_push(**kwargs):\n kwargs['ti'].xcom_push(key='musinsa_id_crawling_end', value=True)\n\n#--------------------------------에어 플로우 코드----------------------------------#\n\ndef check_id_start_notify(**kwargs):\n check = False\n while not check:\n try:\n check = kwargs['ti'].xcom_pull(key='id_crawling_start', dag_id='line_notify_id_crawling')\n except:\n pass\n if not check:\n time.sleep(60*5)\n \n# 서울 시간 기준으로 변경\nlocal_tz = pendulum.timezone('Asia/Seoul')\ntoday = datetime.today()\n# airflow DAG설정 \ndefault_args = {\n 'owner': 'Airflow',\n 'depends_on_past': False,\n 'start_date': datetime(today.year, today.month, today.day, tzinfo=local_tz) - timedelta(days=15),\n 'catchup': False,\n 'provide_context': True\n}\n\n# DAG인스턴스 생성\ndag = DAG(\n # 웹 UI에서 표기되며 전체 DAG의 ID\n dag_id='musinsa_id_crawling_to_sql'\n # DAG 설정을 넣어줌\n , default_args=default_args\n # 최대 실행 횟수\n , max_active_runs=1\n # 실행 주기\n , schedule_interval=timedelta(days=14)\n)\n\n# 시작 감지\ncheck_id_start_notify = PythonOperator(\n task_id='check_id_start_notify',\n python_callable=check_id_start_notify,\n dag=dag,\n)\n\n# 테이블 초기화 DAG\ntruncate = PythonOperator(\n task_id = 'truncate',\n python_callable = truncate,\n dag = dag,\n)\n\n# 테이블 초기화 DAG\nxcom_push = PythonOperator(\n task_id = 'xcom_push',\n python_callable = xcom_push,\n dag = dag,\n)\n\n# DAG 동적 생성\n# 크롤링 DAG\ncount = get_musinsa_category_count()\n\nfor count in range(1, count):\n id_crawling = PythonOperator(\n task_id='{0}_id_crawling'.format(count),\n python_callable=get_category_page,\n op_kwargs={'count':count},\n dag=dag\n )\n check_id_start_notify >> truncate >> id_crawling>> xcom_push\n\n","sub_path":"MUSINSA_ID_CRAWLING_DAG.py","file_name":"MUSINSA_ID_CRAWLING_DAG.py","file_ext":"py","file_size_in_byte":12206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"237309454","text":"from setuptools import setup, find_packages\n\ntry:\n with open('README.rst') as f:\n readme = f.read()\nexcept IOError:\n readme = ''\n\nsetup(\n name=\"tornado-whois\",\n version=\"0.8.1\",\n url=\"https://github.com/mehmetkose/tornado-whois\",\n license=\"MIT\",\n author=\"Mehmet Kose\",\n author_email=\"mehmet.py@gmail.com\",\n keywords=[\"tornado\", \"whois\", \"tornado-whois\", \"async\"],\n description=\"Asynchronous python tornado whois client\",\n long_description=readme,\n packages=find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: MIT License\",\n ],\n install_requires=[\"tornado>=4.3\"],\n requires=[\"tornado (>=4.3)\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"471919454","text":"# -*- coding:gb2312 -*-\n\nimport urllib.request as urllib2\nimport socket\nimport re\nimport os\nimport time\nimport requests\n\ndef get_header_from_console():\n print(\"输入目标网页Header,每行只输入一项,输入pass结束:\\n\")\n header_lines = []\n while True:\n line = input()\n if line == \"pass\":\n return list(filter(lambda x: x != \"\", header_lines))\n header_lines.append(line)\n\n\ndef get_header_from_input(header_string):\n return list(filter(lambda x: x != \"\", header_string.split(\"\\n\")))\n\n\ndef parse_line(header_line):\n sep_index = header_line.find(\":\")\n k = header_line[:sep_index].strip()\n v = header_line[sep_index+1:].strip()\n return k, v\n\n\ndef parse_header(header_lines):\n return dict(list(map(parse_line, header_lines)))\n\n\ndef get_web_content(url, header=None):\n if header:\n req = urllib2.Request(url, headers=header)\n res = urllib2.urlopen(req)\n else:\n res = urllib2.urlopen(url)\n return res.read()\n\n\ndef get_galleries(galleries_string):\n name_pattern = re.compile(r\"title=\\\".+?\\\"\")\n link_pattern = re.compile(r\"href=\\\"/models/joanna-may-parker/galleries/\\d+\\\"\")\n name = name_pattern.findall(galleries_string)[0][7:-1]\n link = link_pattern.findall(galleries_string)[0][6:-1]\n return name, link\n\nif __name__ == \"__main__\":\n socket.setdefaulttimeout(2)\n\n # URL和Header的字符串形式\n target_url = 'http://www.coedcherry.com/models/joanna-may-parker'\n target_header = '''Host: www.coedcherry.com\nConnection: keep-alive\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\n'''\n target_header = parse_header(get_header_from_input(target_header))\n # 获取网页内容\n content = get_web_content(target_url, target_header).decode('utf-8')\n # print(content)\n # 匹配并匹配指向模特的各个相册的链接和相册名字\n galleries_pattern = re.compile(r'
')\n galleries_strings = galleries_pattern.findall(content)\n galleries_dic = dict(list(map(get_galleries, galleries_strings)))\n\n for k, v in galleries_dic.items():\n time.sleep(1)\n # 根据相册名字创建文件夹\n cur_dir = 'E:\\WorkSpace\\Beauty'\n folder_name = k\n if os.path.isdir(cur_dir) and not os.path.isdir(cur_dir+\"\\\\\\\\\"+k):\n os.mkdir(os.path.join(cur_dir, folder_name))\n # 进入相册\n next_url = \"http://\" + target_header['Host'] + v\n print(next_url)\n galleries_content = get_web_content(next_url, target_header).decode('utf-8')\n # 匹配全部图片\n picture_pattern = re.compile(r\"http://content\\d.coedcherry.com/joanna-may-parker/\\d+?/\\w+?.jpg\")\n # picture_pattern = re.compile(r\"content.+?jpg\")\n # 制作图片链接列表\n picture_list = picture_pattern.findall(galleries_content)\n\n # 下载\n index = 0\n for picture_url in picture_list:\n print(picture_url)\n time.sleep(1)\n # 更新图片的Header\n picture_header_pattern = re.compile(\"content\\d\\.coedcherry\\.com.+?\\.jpg\")\n content_number = picture_header_pattern.findall(picture_url)[0][len(\"content\"):len(\"content\")+1]\n\n img_header_string = '''\n Host: content4.coedcherry.com\n Connection: keep-alive\nUpgrade-Insecure-Requests: 1\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\nAccept-Encoding: gzip, deflate, sdch\nAccept-Language: zh-CN,zh;q=0.8\nCookie: __cfduid=dd3a4338f53449a8d07a65bae657d99231492316730'''\n\n picture_header = parse_header(get_header_from_input(img_header_string))\n picture_header['Host'] = \"content\" + content_number + \".coedcherry.com\"\n print(picture_header)\n\n # picture = requests.get(picture_url, headers=picture_header, stream=True)\n picture = get_web_content(picture_url, header=picture_header)\n\n with open(str(cur_dir+\"\\\\\\\\\"+folder_name+\"\\\\\\\\%s.jpg\" % index), 'wb') as f:\n f.write(picture)\n index += 1","sub_path":"url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"356342307","text":"\nimport numpy as np\nimport pickle\nfrom pycparser import c_ast, c_parser, c_generator\nfrom pycparser.c_ast import TypeDecl, ID\nfrom gensim.models.word2vec import Word2Vec\n\n\ndef restrict_w2v(w2v, restricted_word_set):\n new_vectors = []\n new_vocab = {}\n new_index2entity = []\n new_vectors_norm = []\n\n for i in range(len(w2v.vocab)):\n word = w2v.index2entity[i]\n vec = w2v.vectors[i]\n vocab = w2v.vocab[word]\n vec_norm = w2v.vectors_norm[i]\n if word in restricted_word_set:\n vocab.index = len(new_index2entity)\n new_index2entity.append(word)\n new_vocab[word] = vocab\n new_vectors.append(vec)\n new_vectors_norm.append(vec_norm)\n\n w2v.vocab = new_vocab\n w2v.vectors = np.array(new_vectors)\n w2v.index2entity = np.array(new_index2entity)\n w2v.index2word = np.array(new_index2entity)\n w2v.vectors_norm = np.array(new_vectors_norm)\n\n\ndef get_antonym(word):\n return restricted_word2vec.most_similar(positive=[], negative=[word], topn=1, restrict_vocab=None)[0][0]\n\nclass declarationRenamer(c_ast.NodeVisitor):\n def visit_Decl(self, node):\n var_name = node.name\n antonymn = get_antonym(var_name)\n node.name = antonymn\n if type(node.type) is TypeDecl:\n node.type.declname = antonymn\n \nclass assignmentRenamer(c_ast.NodeVisitor):\n def visit_Assignment(self, node):\n if type(node.lvalue) is ID:\n var_name = node.lvalue.name\n antonymn = get_antonym(var_name)\n node.lvalue.name = antonymn\n if type(node.rvalue) is ID:\n var_name = node.rvalue.name\n antonymn = get_antonym(var_name)\n node.rvalue.name = antonymn\n \nclass unaryOpRenamer(c_ast.NodeVisitor):\n def visit_UnaryOp(self, node):\n if type(node.expr) is ID:\n var_name = node.expr.name\n antonymn = get_antonym(var_name)\n node.expr.name = antonymn\n \nclass binaryOpRenamer(c_ast.NodeVisitor):\n def visit_BinaryOp(self, node):\n if type(node.left) is ID:\n var_name = node.left.name\n antonymn = get_antonym(var_name)\n node.left.name = antonymn\n if type(node.right) is ID:\n var_name = node.right.name\n antonymn = get_antonym(var_name)\n node.right.name = antonymn\n\n\n\n\nused_vars = pickle.load( open( \"/home/david/projects/university/astnn/var_names.pkl\", \"rb\" ) )\nrestricted_word2vec = Word2Vec.load(\"/home/david/projects/university/astnn/data/train/embedding/node_w2v_128\").wv\nrestricted_word2vec.most_similar(\"a\")\nrestrict_w2v(restricted_word2vec, used_vars)\n\ndef rename_vars(ast):\n try:\n declaration_renamer = declarationRenamer()\n assignment_renamer = assignmentRenamer()\n unary_op_renamer = unaryOpRenamer()\n binary_op_renamer = binaryOpRenamer()\n \n declaration_renamer.visit(ast)\n assignment_renamer.visit(ast)\n unary_op_renamer.visit(ast)\n binary_op_renamer.visit(ast)\n except:\n pass\n\nimport random\n\ndead_codes = [\n '''\n int main() {\n int alpha;\n }\n ''',\n '''\n int main() {\n int alpha = 0;\n int beta = 5;\n int gamma = alpha + beta;\n }\n ''',\n '''\n int main() {\n const int ALPHA = 10;\n const int BETA = 5;\n }\n ''',\n '''\n int main() {\n int alpha = 0;\n if(false) {\n alpha = 1;\n }\n }\n '''\n ,\n '''\n int main() {\n int alpha = 0;\n if(false) {\n alpha = 1;\n } else {\n alpha = 2;\n }\n }\n ''',\n '''\n int main() {\n int alpha;\n }\n ''',\n '''\n int main() {\n int alpha = 0;\n int beta = 5;\n int gamma = alpha + beta;\n }\n ''',\n '''\n int main() {\n const int ALPHA = 10;\n const int BETA = 5;\n }\n ''',\n '''\n int main() {\n int alpha = 0;\n if(false) {\n alpha = 1;\n }\n }\n '''\n ,\n '''\n int main() {\n int alpha = 0;\n if(false) {\n alpha = 1;\n } else {\n alpha = 2;\n }\n }\n '''\n]\n\nparser = c_parser.CParser()\n\ncompounds = []\nfor code in dead_codes:\n ast = parser.parse(code)\n compounds.append(ast.ext[0].body)\n\nclass deadCodeAdder(c_ast.NodeVisitor):\n def visit_FuncDef(self, node):\n if node.decl.name == 'main':\n for compound in compounds:\n index = random.randrange(len(node.body.block_items))\n node.body.block_items = node.body.block_items[:index] + compound.block_items + node.body.block_items[index:]\n\ndef add_dead_code(ast):\n v = deadCodeAdder()\n v.visit(ast)\n \n ","sub_path":"transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"342469486","text":"\"\"\"Unit Tests for all methods of class Person\nPylint Score: 4.55\nThe 2 objections are for self.name and self.p_id on line 25,\nthat the class TestPerson has no such attributes.\nSo, while testing with scenarios, this is a must error in pylint.\n\"\"\"\n\nimport testscenarios\nfrom Codes.person import Person\n\n\nclass TestPerson(testscenarios.TestWithScenarios):\n \"\"\"Class for unit-tests of person.py\"\"\"\n\n scenarios = [\n ('person1', {'name': 'Hassaan',\n 'p_id': 0}),\n\n ('person2', {'name': 'Nouman',\n 'p_id': 1}),\n\n ('person3', {'name': 'Waqas',\n 'p_id': 2}),\n ]\n\n def setUp(self):\n self.person = Person(self.name, self.p_id)\n\n def tearDown(self):\n self.person = None\n\n def test_person_attributes(self):\n \"\"\"Tests for checking person's attributes\"\"\"\n\n self.assertIsInstance(self.person.name, str)\n self.assertIsInstance(self.person.person_id, int)\n","sub_path":"Module5/5a/person_test.py","file_name":"person_test.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"549136843","text":"from random import choice as rchoice\nimport os\nimport yfinance as yf\n\n\ndef _get_proxies(path: str) -> list:\n \"\"\"Convert a file to a line by line list equivalent\n\n Arg:\n path: filepath to read and convert to list\n\n Example:\n >>> _get_proxies('./proxies.txt')\n ... ['192.162.1.1', '192.172.98.13', '192.183.32.11']\n \"\"\"\n with open(path, 'r') as file:\n proxies = [row.strip('\\n') for row in file]\n return proxies\n\n\ndef download_stocks(companies_path: str, dl_path: str, proxies_path: str = None, max_dl: int = 0):\n \"\"\"Fetches stocks csv's on yahoo finance.\n\n Args:\n companies_path: Represents the filepath to the file listing the\n companies name (ticker name). It's waiting for a csv, and\n will read the first column.\n dl_path: Filepath to indicate where to store the downloaded stocks.\n proxies_path: Filepath to indicate the proxies to take\n max_dl: Maximun number of files to download, 0 if there is no limit\n\n Example:\n\n This example will download at most 100 csv stock files on yahoo\n finance and store them in `./store/`.\n\n >>> download_stocks('./companies/list.csv', './store', max_dl=100)\n \"\"\"\n assert max_dl >= 0, 'Argument max_dl must be >= 0.'\n\n if proxies_path is not None:\n proxies = _get_proxies(proxies_path)\n\n downloads = 0\n with open(companies_path, 'r') as file:\n file.readline()\n for i, row in enumerate(file):\n\n symbol = row.split(',')[0].strip('\\n')\n stock_name = os.path.join(dl_path, symbol + '.csv')\n\n if os.path.exists(stock_name):\n continue\n\n if proxies_path is not None:\n proxy = rchoice(proxies)\n print(f'{i} - Downloading {symbol} on proxy {proxy} ...')\n stock = yf.download(symbol, proxy=proxy)\n else:\n print(f'{i} - Downloading {symbol} on proxy 0.0.0.0 ...')\n stock = yf.download(symbol)\n\n if stock.shape[0] > 2:\n stock.to_csv(stock_name)\n downloads += 1\n else:\n print(f'Couldn\\'t download stock {symbol}')\n\n if max_dl != 0 and downloads - 1 == max_dl:\n break\n\n print(f'Downloaded {downloads} stocks.')\n","sub_path":"back/irma/predictor/src/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"512920949","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('login', views.LoginForm.as_view(), name='login'),\n path('signup', views.SignUp.as_view(), name='signup'),\n path('main//', views.mainPage.as_view(), name='main'),\n path('getlist//', views.getList.as_view(), name='getlist'),\n path('getuser//', views.getuser.as_view(), name='getuser'),\n path('getCatalogue/', views.getCatalogue.as_view(), name='catalogue'),\n path('search//', views.searchTitle.as_view(), name='search'),\n path('getbook//', views.getBook.as_view(), name='getbook'),\n path('addbook//', views.addTolist.as_view(), name='addbook'),\n path('removebook//', views.removeFromList.as_view(), name='removebook'),\n path('getAuthorName/', views.getAuthor.as_view(), name='getAuthor'),\n path('borrow//', views.borrow.as_view(), name='borrow'),\n path('userprofile//', views.getUserProfile.as_view(), name='userprofile'),\n path('getBorrowList//', views.getBorrowedBooks, name='borrowed books'),\n path('addMembership///', views.addMembership.as_view(), name='addMember')\n]\n","sub_path":"library/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"454677927","text":"\"\"\"Commands module for EVENTS UTILITIES.\nAccess: Role-based\"\"\"\n\nimport asyncio\n\nimport discord\n\nfrom petal.commands import core\nfrom petal.menu import Menu\n\n\nclass CommandsEvent(core.Commands):\n auth_fail = \"This command requires the `{role}` role.\"\n role = \"xPostRole\"\n\n async def cmd_event(\n self, src, _message: str = \"\", _channels: str = \"\", _nomenu: bool = False, **_\n ):\n \"\"\"Post a message announcing the start of an event.\n\n Define a message which will be sent to one or more predefined channels. The message may include mass pings by way of including tags `{{e}}` and `{{h}}` for substitution.\n Destination channels may be selected conversationally or by way of a reaction-based menu.\n\n Options:\n `--message=` :: Define the message to send ahead of time. This will skip the step where Petal asks you what you want the message to say.\n `--nomenu` :: Forsake the Reaction UI and determine destination channels conversationally.\n \"\"\"\n channels_list = []\n channels_dict = {}\n msg = \"\"\n for chan in self.config.get(\"xPostList\"):\n channel = self.client.get_channel(chan)\n if channel is not None:\n msg += (\n str(len(channels_list))\n + \". (\"\n + channel.name\n + \" [{}]\".format(channel.server.name)\n + \")\\n\"\n )\n channels_list.append(channel)\n channels_dict[channel.server.name + \"/#\" + channel.name] = channel\n else:\n self.log.warn(\n chan + \" is not a valid channel. I'd remove it if I were you.\"\n )\n\n # Get channels to send to.\n if _nomenu:\n # Do it only conversationally.\n while True:\n await self.client.send_message(\n src.author,\n src.channel,\n \"Hi there, \"\n + src.author.name\n + \"! Please select the number of \"\n + \"each server you want to post \"\n + \"to. (dont separate the numbers)\",\n )\n\n await self.client.send_message(src.author, src.channel, msg)\n\n chans = await self.client.wait_for_message(\n channel=src.channel, author=src.author, timeout=20\n )\n\n if chans is None:\n return (\n \"Sorry, the request timed out. Please make sure you\"\n + \" type a valid sequence of numbers.\"\n )\n if self.validate_channel(channels_list, chans.content):\n break\n else:\n await self.client.send_message(\n src.author,\n src.channel,\n \"Invalid channel choices. You may try again immediately.\",\n )\n post_to = []\n for i in chans.content:\n print(channels_list[int(i)])\n post_to.append(channels_list[int(i)])\n else:\n # Use the ReactionUI.\n menu = Menu(\n self.client,\n src.channel,\n \"Where shall the message be posted?\",\n user=src.author,\n )\n selection = await menu.get_multi(list(channels_dict))\n if not selection:\n return \"No target channels selected; Post canceled.\"\n post_to = [channels_dict[c] for c in selection]\n\n await self.client.send_message(\n src.author,\n src.channel,\n \"What do you want to send? (remember: {e} = `@ev` and {h} = `@here`)\",\n )\n\n msgstr = (\n _message\n or (\n await self.client.wait_for_message(\n channel=src.channel, author=src.author, timeout=120\n )\n ).content\n ).format(e=\"@everyone\", h=\"@here\")\n\n embed = discord.Embed(\n title=\"Message to post\", description=msgstr, colour=0x0ACDFF\n )\n\n embed.add_field(name=\"Channels\", value=\"\\n\".join([c.mention for c in post_to]))\n\n await self.client.embed(src.channel, embed)\n await self.client.send_message(\n src.author,\n src.channel,\n \"If this is ok, type confirm. \"\n + \" Otherwise, wait for it to timeout \"\n + \" and try again\",\n )\n\n msg2 = await self.client.wait_for_message(\n channel=src.channel, author=src.author, content=\"confirm\", timeout=10\n )\n if msg2 is None:\n return \"Event post timed out\"\n\n posted = []\n for i in post_to:\n posted.append(await self.client.send_message(src.author, i, msgstr))\n await asyncio.sleep(2)\n\n await self.client.send_message(\n src.author, src.channel, \"Messages have been posted\"\n )\n\n subkey, subname = self.get_event_subscription(msgstr)\n\n if subkey is None:\n await self.client.send_message(\n src.author,\n src.channel,\n \"I was unable to auto-detect any game titles in your post. \"\n + \"No subscribers will be notified for this event.\",\n )\n else:\n tempm = await self.client.send_message(\n src.author,\n src.channel,\n \"I auto-detected a possible game in your announcement: **\"\n + subname\n + \"**. Would you like to notify subscribers? [y/N]\",\n )\n n = await self.client.wait_for_message(\n channel=tempm.channel, author=src.author, timeout=20\n )\n if not n:\n return \"Timed out.\"\n elif n.content.lower() not in (\"y\", \"yes\"):\n return \"Subscribers will not be notified.\"\n else:\n response = await self.notify_subscribers(src.channel, posted[0], subkey)\n todelete = \"[{}]\".format(subkey)\n for post in posted:\n content = post.content\n # print(content)\n # print(todelete)\n if todelete in content:\n # print(\"replacing\")\n content = content.replace(todelete, \"\")\n # print(\"replaced: \" + content)\n await self.client.edit_message(post, content)\n\n return response\n\n\n# Keep the actual classname unique from this common identifier\n# Might make debugging nicer\nCommandModule = CommandsEvent\n","sub_path":"petal/commands/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":6789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"30757502","text":"from httpx import AsyncClient, BasicAuth\nfrom typing import List\nfrom uuid import UUID\n\nfrom app.connectors.mailgun.models import InvitasjonUploadUrl, InvitasjonEmail\n\n\nclass MailgunClient:\n def __init__(self, domain: str, secret: str, tusd_url: str, testmode: bool = False):\n \"\"\"Initialize a new MailgunClient\"\"\"\n self.url = f'https://api.eu.mailgun.net/v3/{domain}/messages'\n self.secret = secret\n self.domain = domain\n self.tusd_url = tusd_url\n self.testmode = testmode\n\n async def send_invitasjon(self, to: List[str], arkivuttrekk_obj_id: UUID, arkivuttrekk_tittel: str, invitasjon_ekstern_id: UUID):\n \"\"\"Send an invitasjon email to a list of recipients\"\"\"\n\n upload_url = InvitasjonUploadUrl(arkivuttrekk_obj_id, self.tusd_url, invitasjon_ekstern_id).as_base64_url()\n email = InvitasjonEmail(self.domain, to, arkivuttrekk_obj_id, arkivuttrekk_tittel, upload_url)\n\n async with AsyncClient(auth=BasicAuth('api', self.secret)) as client:\n email_data = email.as_data()\n\n if self.testmode:\n email_data['o:testmode'] = 'true'\n\n resp = await client.post(self.url, data=email_data)\n\n return resp\n","sub_path":"mottak-arkiv-service/app/connectors/mailgun/mailgun_client.py","file_name":"mailgun_client.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"188607018","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nImplemented by Michael Meckl.\n\"\"\"\n\nimport sys\nfrom argparse import ArgumentParser\nimport DIPPID\nfrom PyQt5 import QtWidgets, QtCore, uic\nfrom game_widget import Direction, Velocity\n\n# import os\n# CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))\n# ui_file = os.path.join(CURRENT_DIR, \"dippid_game.ui\")\n# form, base = uic.loadUiType(ui_file)\n\n\nclass DippidGame(QtWidgets.QWidget):\n\n ALL_CAPABILITIES = [\"accelerometer\", \"gyroscope\", \"gravity\", \"button_1\", \"button_2\", \"button_3\", \"button_4\"]\n\n def __init__(self, port=5700):\n super(DippidGame, self).__init__()\n self.sensor = DIPPID.SensorUDP(port)\n\n # self.setupUi(self)\n self.ui = uic.loadUi(\"dippid_game.ui\", self)\n self._show_introduction()\n\n def _show_introduction(self):\n self.ui.stackedWidget.setCurrentIndex(0)\n self.ui.btn_start_game.setFocusPolicy(QtCore.Qt.NoFocus) # prevent auto-focus of the start button\n self.ui.btn_start_game.setEnabled(False) # disable the start game button until the sensor device connected!\n\n # check the connection to the device repeatedly until successful connection; without the connection\n # the game can't be played so it would be useless to allow a user to start it\n self.timer = QtCore.QTimer(self)\n self.timer.timeout.connect(self._check_connected_status)\n self.timer.start(10) # wait for 10 ms until next try\n\n self.ui.btn_start_game.clicked.connect(self._show_game)\n\n def _check_connected_status(self):\n if self._is_connected():\n self.ui.connected_status.setStyleSheet(\"QLabel { font-weight: bold; color : green;}\")\n self.ui.connected_status.setText(\"Connected\")\n self.ui.btn_start_game.setEnabled(True)\n else:\n self.ui.connected_status.setStyleSheet(\"QLabel { font-weight: bold; color : red;}\")\n self.ui.connected_status.setText(\"Not connected\")\n self.ui.btn_start_game.setEnabled(False)\n\n def _is_connected(self) -> bool:\n # TODO find a better method to test connection!\n # (if we have been connected once it won't recognize a connection loss as the capabilities are already there!)\n # check if all capabilities have been registered (if all work the sensor is obviously sending data)\n capabilities_ready = all(self.sensor.has_capability(capability) for capability in DippidGame.ALL_CAPABILITIES)\n return True if capabilities_ready else False\n\n def _show_game(self):\n self.timer.stop() # stop the qtimer that checks whether we are connected or not (we have to be if we are here)\n\n index = self.ui.stackedWidget.currentIndex() + 1\n # switch widget index to the element in the stack at the given index (i.e. move to this page)\n self.ui.stackedWidget.setCurrentIndex(index)\n self._start_game()\n\n def _start_game(self):\n self.ui.game_widget.start(level_finished_callback=self._update_level,\n points_changed_callback=self._update_points)\n\n # the callbacks need to be registered AFTER checking connected status and starting the game, otherwise we can't\n # be sure about the connected status as they register themselves as capabilities as well (and would fire before\n # the game even started)\n self._register_sensor_callbacks()\n\n def _update_level(self, level: int):\n self.ui.level.setText(str(level))\n\n def _update_points(self, new_points: int):\n self.ui.points.setText(str(new_points))\n\n def _register_sensor_callbacks(self):\n # self.sensor.register_callback('button_1', self._handle_button_press)\n # self.sensor.register_callback('accelerometer', self._handle_acceleration)\n self.sensor.register_callback('gravity', self._handle_position_change)\n self.sensor.register_callback('gyroscope', self._handle_angle_acceleration)\n\n def _handle_angle_acceleration(self, data):\n if data[\"x\"] > 2.5:\n # the mobile device was moved rapidly around the x-axis!\n self.ui.game_widget.switch_lane(direction=Direction.UP)\n elif data[\"x\"] < -2.5:\n self.ui.game_widget.switch_lane(direction=Direction.DOWN)\n\n def _handle_position_change(self, data):\n if data[\"x\"] <= -9.0:\n # the mobile device is tilted in x-direction!\n self.ui.game_widget.move_character_forward(velocity=Velocity.FAST)\n elif data[\"x\"] <= -5.0:\n self.ui.game_widget.move_character_forward(velocity=Velocity.NORMAL)\n\n def _handle_button_press(self, data):\n try:\n # print(\"Button data: \", data)\n if int(data) == 0:\n print('button released')\n else:\n print('button pressed')\n except Exception as e:\n sys.stderr.write(f\"Something went wrong when trying to cast button data: {e}\")\n\n # def closeEvent(self, event: QtGui.QCloseEvent):\n # self.sensor.disconnect() # stop sensor before closing!\n # event.accept()\n\n\ndef main():\n # parse command line input and print out some helpful information\n parser = ArgumentParser(description=\"A small game that can be played with mobile phone movement recognized via the\"\n \" DIPPID protocol.\")\n parser.add_argument(\"-p\", \"--port\", help=\"The port on which the mobile device sends its data via DIPPID\", type=int,\n default=5700, required=False)\n args = parser.parse_args()\n port = args.port\n\n app = QtWidgets.QApplication(sys.argv)\n dippid_game = DippidGame(port=port)\n dippid_game.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dippid_game.py","file_name":"dippid_game.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"57398020","text":"import shutil\n\nimport mlflow\nimport pytest\nfrom cookiecutter.main import cookiecutter\nfrom kedro import __version__ as kedro_version\nfrom kedro.framework.cli.starters import TEMPLATE_PATH\nfrom kedro.framework.hooks.manager import get_hook_manager\nfrom kedro.framework.session.session import _deactivate_session\n\nfrom kedro_mlflow.framework.cli.cli import TEMPLATE_FOLDER_PATH\nfrom kedro_mlflow.framework.cli.cli_utils import write_jinja_template\n\n\n@pytest.fixture(autouse=True)\ndef cleanup_mlflow_after_runs():\n # A test function will be run at this point\n yield\n while mlflow.active_run():\n mlflow.end_run()\n\n\n@pytest.fixture(autouse=True)\ndef cleanup_kedro_session():\n # A test function will be run at this point\n yield\n _deactivate_session()\n\n\n@pytest.fixture(autouse=True)\ndef clear_hook_manager():\n yield\n hook_manager = get_hook_manager()\n plugins = hook_manager.get_plugins()\n for plugin in plugins:\n hook_manager.unregister(plugin)\n\n\n@pytest.fixture\ndef kedro_project(tmp_path):\n # TODO : this is also an integration test since this depends from the kedro version\n config = {\n \"output_dir\": tmp_path,\n \"kedro_version\": kedro_version,\n \"project_name\": \"This is a fake project\",\n \"repo_name\": \"fake-project\",\n \"python_package\": \"fake_project\",\n \"include_example\": True,\n }\n\n cookiecutter(\n str(TEMPLATE_PATH),\n output_dir=config[\"output_dir\"],\n no_input=True,\n extra_context=config,\n )\n\n shutil.rmtree(\n tmp_path / \"fake-project\" / \"src\" / \"tests\"\n ) # avoid conflicts with pytest\n\n return tmp_path / \"fake-project\"\n\n\n@pytest.fixture\ndef kedro_project_with_mlflow_conf(kedro_project):\n write_jinja_template(\n src=TEMPLATE_FOLDER_PATH / \"mlflow.yml\",\n is_cookiecutter=False,\n dst=kedro_project / \"conf\" / \"local\" / \"mlflow.yml\",\n python_package=\"fake_project\",\n )\n\n return kedro_project\n\n\n@pytest.fixture\ndef kedro_project_with_tcl(tmp_path):\n # TODO: find a better way to inject dynamically\n # the templated config loader without modifying the template\n\n config = {\n \"output_dir\": tmp_path,\n \"kedro_version\": kedro_version,\n \"project_name\": \"A kedro project with a templated config loader\",\n \"repo_name\": \"kedro-project-with-tcl\",\n \"python_package\": \"kedro_project_with_tcl\",\n \"include_example\": True,\n }\n\n cookiecutter(\n str(TEMPLATE_PATH),\n output_dir=config[\"output_dir\"],\n no_input=True,\n extra_context=config,\n )\n\n shutil.rmtree(\n tmp_path / config[\"repo_name\"] / \"src\" / \"tests\"\n ) # avoid conflicts with pytest\n\n hooks_py = \"\"\"\nfrom typing import Any, Dict, Iterable, Optional\n\nfrom kedro.config import TemplatedConfigLoader\nfrom kedro.framework.hooks import hook_impl\nfrom kedro.io import DataCatalog\nfrom kedro.pipeline import Pipeline\nfrom kedro.versioning import Journal\n\n\nclass ProjectHooks:\n @hook_impl\n def register_pipelines(self) -> Dict[str, Pipeline]:\n return {\"__default__\": Pipeline([])}\n\n @hook_impl\n def register_config_loader(self, conf_paths: Iterable[str]) -> TemplatedConfigLoader:\n return TemplatedConfigLoader(\n conf_paths,\n globals_pattern=\"*globals.yml\",\n globals_dict={}\n )\n\n @hook_impl\n def register_catalog(\n self,\n catalog: Optional[Dict[str, Dict[str, Any]]],\n credentials: Dict[str, Dict[str, Any]],\n load_versions: Dict[str, str],\n save_version: str,\n journal: Journal,\n ) -> DataCatalog:\n return DataCatalog.from_config(\n catalog, credentials, load_versions, save_version, journal\n )\n\"\"\"\n\n def _write_py(filepath, txt):\n filepath.write_text(txt)\n\n kedro_project_with_tcl = tmp_path / config[\"repo_name\"]\n\n _write_py(\n kedro_project_with_tcl / \"src\" / config[\"python_package\"] / \"hooks.py\", hooks_py\n )\n\n return kedro_project_with_tcl\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"35305621","text":"from rest_framework import generics, status\nfrom rest_framework.response import (Response)\nfrom rest_framework.filters import SearchFilter, OrderingFilter\nfrom django.db import IntegrityError\n\nfrom rest_framework.permissions import (IsAuthenticated,\n AllowAny)\n\nfrom .serializers import (\n ProfileSerializer, FollowSerializer, FollowersSerializer)\nfrom .models import (Profile, Follow)\nfrom authors.apps.authentication.models import User\n\n\nclass ViewAllProfiles(generics.ListAPIView):\n \"\"\"\n this view will enable you to view all profiles in\n the profiles table.\n \"\"\"\n serializer_class = ProfileSerializer\n filter_backends = (SearchFilter, OrderingFilter)\n search_fields = ('bio', 'user__email', 'user__username', 'image')\n ordering_fields = ('bio', 'user__username')\n queryset = Profile.objects.all()\n\n def get_serializer_context(self, *args, **kwargs):\n \"\"\"\n pass the view context to the serializer\n \"\"\"\n return {\"request\": self.request}\n\n\nclass ProfileView(generics.RetrieveUpdateAPIView):\n '''\n class view to update or get a user profile\n '''\n serializer_class = ProfileSerializer\n queryset = Profile.objects.all()\n lookup_field = \"username\"\n\n def get(self, request, username, *args, **kwargs):\n \"\"\"Retrieve a single profile\"\"\"\n try:\n\n profile = Profile.objects.get(\n user__username__iexact=username)\n return Response(data={\"profile\":\n self.serializer_class(\n profile,\n context={'request': request}).data},\n status=status.HTTP_200_OK)\n except Profile.DoesNotExist:\n return Response(data={\"error\": \"Profile not found\"},\n status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, username, *args, **kwargs):\n \"\"\"update a single profile\"\"\"\n # update the profile with the fields provided\n try:\n profile = Profile.objects.get(\n user__username__iexact=username)\n # check if the username in the profile\n # matches the username of the requester\n if profile.user.username != request.user.username:\n data = {'error':\n 'You are not allowed to edit or delete this profile'}\n return Response(data, status.HTTP_403_FORBIDDEN)\n serializer = self.serializer_class(\n instance=profile,\n data=request.data,\n partial=True, context={'request': request})\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({'profile': serializer.data}, status.HTTP_200_OK)\n except Profile.DoesNotExist:\n Response(data={\"error\": \"Profile not found\"},\n status=status.HTTP_404_NOT_FOUND)\n\n\nclass FollowersView(generics.RetrieveAPIView):\n '''\n class to view user followers\n '''\n permission_classes = (AllowAny,)\n serializer_class = FollowersSerializer\n queryset = Follow.objects.all()\n lookup_field = \"username\"\n\n def get(self, request, username, *args, **kwargs):\n \"\"\"Retrieve a single profile\"\"\"\n try:\n\n Profile.objects.get(\n user__username__iexact=username)\n\n except Profile.DoesNotExist:\n return Response(data={\n \"error\":\n \"A User with the username {} is not found\".format(username)},\n status=status.HTTP_404_NOT_FOUND)\n userfollowers = Follow.objects.filter(\n following__username__iexact=username\n )\n return Response(data={\"followers\":\n self.serializer_class(userfollowers,\n many=True).data},\n status=status.HTTP_200_OK)\n\n\nclass FollowingView(generics.RetrieveAPIView):\n permission_classes = (AllowAny,)\n serializer_class = FollowSerializer\n queryset = Follow.objects.all()\n lookup_field = \"username\"\n\n def get(self, request, username, *args, **kwargs):\n \"\"\"Retrieve a single profile\"\"\"\n try:\n\n Profile.objects.get(\n user__username__iexact=username)\n\n except Profile.DoesNotExist:\n return Response(data={\n \"error\":\n \"A User with the username {} is not found\".format(username)},\n status=status.HTTP_404_NOT_FOUND)\n userfollowers = Follow.objects.filter(\n user__username__iexact=username\n )\n return Response(data={\"following\":\n self.serializer_class(userfollowers,\n many=True).data},\n status=status.HTTP_200_OK)\n\n\nclass FollowView(generics.CreateAPIView, generics.DestroyAPIView):\n \"\"\"\n class to view follow a specific user.\n \"\"\"\n permission_classes = (IsAuthenticated,)\n serializer_class = FollowSerializer\n queryset = Follow.objects.all()\n lookup_field = \"username\"\n\n def post(self, request, username, *args, **kwargs):\n try:\n\n user = User.objects.get(\n username__iexact=username)\n\n except User.DoesNotExist:\n return Response(data={\n \"error\":\n \"A User with the username {} is not found\".format(username)},\n status=status.HTTP_404_NOT_FOUND)\n\n data = {\n }\n\n serializer = self.serializer_class(\n data=data\n )\n serializer.is_valid(raise_exception=True)\n try:\n serializer.save(following=user, user=self.request.user)\n except IntegrityError:\n return Response(data={\n \"error\": \"You follow {} already\".format(username)\n }, status=status.HTTP_409_CONFLICT)\n return Response(data={\n \"following\": serializer.data\n }, status=status.HTTP_201_CREATED)\n\n def delete(self, request, username, *args, **kwargs):\n\n try:\n\n User.objects.get(\n username__iexact=username)\n\n except User.DoesNotExist:\n return Response(data={\n \"error\":\n \"A User with the username {} was not found\".format(username)},\n status=status.HTTP_404_NOT_FOUND)\n try:\n follow = Follow.objects.get(\n following__username__iexact=username,\n user=self.request.user)\n follow.delete()\n return Response(data={\n \"data\":\n \"You have successfully unfollowed {}\".format(username)\n })\n except Follow.DoesNotExist:\n return Response(data={\n \"error\":\n \"You had not followed {} so cannot unfollow\".format(username)\n }, status=status.HTTP_404_NOT_FOUND)\n","sub_path":"authors/apps/profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"113540347","text":"#!/usr/bin/env python3\n\n\"\"\"\nA simple controller script to communicate with signboard via serial.\n\"\"\"\n\nimport serial # pySerial lib\nimport time\n\n\"\"\"\nclass\n- connect via serial\n- craft a signboard message w/ options\n- send a crafted message\n- send a regular message l1 vs l2\n- clear the display\n\nCreate a new instance with the Signboard Port \n - sb = Signboard(\"/dev/ttyUSB0\")\n\"\"\"\n\n# To do, \n# Define Defaults, validate defaults against user settings\n#\n\n#DRYRUN = True\nDRYRUN = False\nclass Signboard:\n\n\n\tdef __init__(self, port, charlimit=20, linelimit=2, messagelimit=256):\n\t\t\"\"\"\n\t\tBasic instance initizliation\n\t\trequired inputs:\n\t\t - port: OS serial port (/dev/ttyUSB0, or COM1)\n\n\t\t optional inputs:\n\t\t - charlimit: # of characters display can handle\n\t\t - linelimit: # of lines the display can handle\n\t\t - messagelimit: Max # of ascii characters in a single message (including controlcodes)\n\t\t\"\"\"\n\n\t\tself.port = port\n\t\tself.baud = 9600\n\t\t# possibly add parity/stopbits/control (8N1 defaults) for now the pyserial defaults work perfectly.\n\n\t\tself.charlimit = charlimit\n\t\tself.linelimit = linelimit\n\t\tself.messagelimit = messagelimit\n\n\t\t#default attributes\n\t\tself.blinkrate = 128\n\t\tself.blink = \"-\"\n\t\tself.inverseblink = \"-\"\n\t\tself.hue_foreground = \"0\"\n\t\tself.hue_background = \"0\"\n\t\tself.font = \"1\"\n\t\tself.ESC=\"\\x1b\"\n\t\tself.scroll_rate = 2\n\t\tself.scroll_repeat = 0\n\t\t# possible font options:\n\n\n\tdef set_scroll_rate(self, i):\n\t\t\"\"\"\n\t\tThis argument specifies the scroll rate. Where 1 is the slowest setting and \n\t\t3 is the fastest setting. A value of zero or no number selects the previous \n\t\trate, or, if no previous rate is available, selects the default rate of 2 \n\t\t(medium). \n\t\t\"\"\"\n\t\tif i > 3:\n\t\t\tprint(\"Error: invalid scroll rate: %s\" % (i))\n\t\telse:\n\t\t\tself.scroll_rate = i\n\n\tdef set_scroll_repeat(self, i):\n\t\t\"\"\"\n\t\tThis argument specifies the number of times the scrolling text should \n\t\trepeat. Acceptable values are from 1 through 3, and represent the actual \n\t\tnumber of repeats. A value of zero or no number will cause the text to \n\t\tscroll continuously until it is explicitly cleared or a new \n\t\tmessage is received by the display (also called an infinite scroll). \n\t\t\"\"\"\n\t\tif i > 3:\n\t\t\tprint(\"Error: invalid scroll repeat: %s\" % (i))\n\t\telse:\n\t\t\tself.scroll_repeat = i\n\n\tdef set_hue(self,f=0,b=0):\n\t\t\"\"\"\n\t\tf - forground\n\t\tb - background\n\t\t1 Selects dimmest. \n\t\t2 Selects medium brightness. \n\t\t3 Selects brightest (power up setting). \n\t\t4 Selects sparkle. \n\t\t\"\"\"\n\t\tif f > 4 or b > 4:\n\t\t\tprint(\"Error: invalid hue option f=%s, b=%s\" % (f,b))\n\t\t\treturn False\n\t\telse:\n\t\t\tself.hue_foreground = f\n\t\t\tself.hue_background = b\n\n\t\t\tself._write(\"%s%s;%sH\" % (self.ESC, self.hue_foreground, self.hue_background))\n\t\t\treturn True\n\n\tdef set_font(self, f):\n\t\t\"\"\"\n\t\tSet the font for the next message\n\t\t1 - 8x6 pixels 2 lines of 20 characters. \n\t\t2 - 8x8 pixels 2 lines of 15 characters. \n\t\t3 - 16x12 pixels 1 lines of 10 characters. \n\t\t4 - 16x15 pixels 1 lines of 8 characters. \n\t\t5 - 16x8 pixels 1 lines of 15 characters. \n\t\t6 - 16x10 pixels 1 lines of 12 characters. \n\t\t7 - 8x6 pixels 2 lines of 20 characters. * JIS8 / Katakana\n\t\t8 - 8x6 pixels 2 lines of 20 characters. * Slavic\n\t\t9 - 8x6 pixels 2 lines of 20 characters. * Cyrillic \n\t\t\"\"\"\n\t\tif int(f) > 9:\n\t\t\tprint(\"Error: invalid font selection: %s\" % (f))\n\t\t\treturn False\n\t\telse:\n\t\t\tself._write(\"%s%sf\" %(self.ESC, f))\n\t\t\treturn True\n\n\tdef connect(self):\n\t\t\"\"\"\n\t\tcreates the connection to the serial device.\n\t\t\"\"\"\n\t\tself.serialport = serial.Serial(self.port)\n\t\tself._write(\"\\x1b 24A\")\n\t\t#self.name = serialport.name\n\t\treturn True\n\n\tdef close(self):\n\t\t\"\"\"\n\t\tcloses the connection to the serial device.\n\t\t\"\"\"\n\t\tself.serialport.close()\n\t\treturn True\n\n\tdef _write(self, s):\n\t\t\"\"\"\n\t\tInternal function for sending data across the port, this should only be called by other methods\n\t\tthat have setup thier data first. This handles the ascii/binary encoding requires to communicate\n\t\twith the signboard.\n\n\t\tinput: str\n\t\t\"\"\"\n\n\t\tif len(s) <= self.messagelimit:\n\t\t\tif DRYRUN:\n\t\t\t\tprint(\"%s\" % (s))\n\t\t\telse:\n\t\t\t\tself.serialport.write(b'%s' % (s.encode('ascii')))\n\t\t\t\treturn True\n\t\telse:\n\t\t\tprint(\"Error: Message exceed char limit of %s\" % (self.messagelimit))\n\t\t\treturn False\n\n\tdef print_msg(self, s, l, type=\"static\"):\n\t\t\"\"\"\n\t\tDisplays message: s on line: l\n\t\tYou can pre-setup some options, using the following methods, they all have defaults\n\t\tset_hue\n\t\tset_scroll_rate\n\t\tset_scroll_repeat\n\t\tset_font\n\n\n\t\tOptional arguments:\n\t\ttype = \"static\" - display a static 20character message\n\t\ttype = \"scroll\" - scrolls a larger message (up to 256 characters including control codes)\n\t\t\"\"\"\n\n\t\t# check to see if we're requesting something outside the line bounds.\n\t\tif l > self.linelimit:\n\t\t\tprint(\"Error: max line limit is %s\" % (self.linelimit))\n\t\t\treturn False\n\n\t\t# check to see if we're requesting something outside the char limit PER line.\n\t\tif len(s) > self.charlimit and type == static:\n\t\t\tprint(\"Error: message exceeds the character limit of %s\" % (self.charlimit))\n\t\t\treturn False\n\n\t\tif type == \"static\":\n\t\t\tmessage = \"\\x1b%s;1C%s\\r\" % (l,s)\n\n\t\tif type == \"scroll\":\n\t\t\tmessage = \"\\x1b%s \\x1bS %s \\r\" % (l,s)\n\n\t\t# figure out formatting/font settings and pre-pend them to the message.\n\t\tself._write(message)\n\t\treturn True\n\n\n\tdef clear(self):\n\t\t\"\"\"\n\t\tclears the display \n\t\t\"\"\"\n\t\t#self._write(\"\\x1b2i\\r\".encode('ascii')) # fuck this didnt work\n\n\t\t#okay this should be simplier, we can derive the spaces from the maxlengths.\n\t\tself.print_msg(\" \",1,\"static\")\n\t\tself.print_msg(\" \",2,\"static\")\n\t\treturn True\n","sub_path":"signboard.py","file_name":"signboard.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"408354533","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.spiders import Spider\n\nfrom sina_edu.items import CollegeItem, MajorItem\n\n\nclass MajorSpider(Spider):\n name = 'major'\n allowed_domains = ['kaoshi.edu.sina.com.cn']\n basic_url = 'http://kaoshi.edu.sina.com.cn/college/majorlist?name=&type1=&type2=&ie=utf-8&page='\n urls = []\n for i in range(1, 83):\n urls.append(basic_url + str(i))\n start_urls = urls\n\n def parse(self, response):\n major_list = response.xpath('//table[@class=\"tbL2\"]/tr')\n for major in major_list:\n if major.xpath('td/a/@href').extract_first() != None:\n major_url = major.xpath('td/a/@href').extract_first()\n major_name = major.xpath('td/a/text()').extract_first()\n major_id = major.xpath('td[2]/text()').extract_first()\n major_dl = major.xpath('td[3]/text()').extract_first()\n major_xl = major.xpath('td[4]/text()').extract_first()\n\n item = MajorItem()\n item['name'] = major_name\n item['id'] = major_id\n item['dl'] = major_dl\n item['xl'] = major_xl\n # print(major_url)\n yield scrapy.Request(str(major_url),\n headers=self.settings['HESDERS'],\n callback=self.parse_major,\n meta={'item': item},\n dont_filter=True)\n\n def parse_major(self, response):\n item = response.meta['item']\n degree = response.xpath('//div[@class=\"leftWrap clearfix\"]/div[1]/div[1]/h2[2]/text()').extract_first()\n time = response.xpath('//div[@class=\"leftWrap clearfix\"]/div[1]/div[1]/h2[3]/text()').extract_first()\n lessons = response.xpath('//div[@class=\"leftWrap clearfix\"]/div[1]/div[3]/p/text()').extract_first()\n target = response.xpath('//div[@class=\"leftWrap clearfix\"]/div[2]/div[3]/p/text()').extract_first()\n requirements = response.xpath('//div[@class=\"leftWrap clearfix\"]/div[2]/div[4]/p/text()').extract_first()\n ability = response.xpath('//div[@class=\"leftWrap clearfix\"]/div[2]/div[5]/p/text()').extract_first()\n\n item['degree'] = degree\n item['time'] = time\n item['lessons'] = lessons\n item['target'] = target\n item['requirements'] = requirements\n item['ability'] = ability\n\n # print(degree)\n # print(time)\n # print(lessons)\n # print(target)\n # print(requirements)\n # print(ability)\n\n yield item\n","sub_path":"sina_edu/spiders/major_spider.py","file_name":"major_spider.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"66815252","text":"import numpy as np\r\nimport sys\r\n\r\n# train_input=sys.argv[1]\r\n# index_to_word=sys.argv[2]\r\n# index_to_tag=sys.argv[3]\r\n# hmmprior=sys.argv[4]\r\n# hmmemit=sys.argv[5]\r\n# hmmtrans=sys.argv[6]\r\n\r\ntrain_input = \"trainwords.txt\"\r\nindex_to_word = \"index_to_word.txt\"\r\nindex_to_tag = \"index_to_tag.txt\"\r\nhmmprior = \"hmmprior.txt\"\r\nhmmemit = \"hmmemit.txt\"\r\nhmmtrans = \"hmmtrans.txt\"\r\nsequence = 10000\r\n\r\n\r\n## store data tag and word into dictionary\r\ndef getDictionary(filename):\r\n with open(filename, \"r\") as doc:\r\n i = 0 # index start from 0\r\n dictionary = {}\r\n for line in doc:\r\n line = line.rstrip()\r\n dictionary[line] = i\r\n i = i + 1\r\n return dictionary\r\n\r\n\r\n## read train file as input\r\ndef read_train(filename):\r\n with open(filename, \"r\") as doc:\r\n file = []\r\n i = 1\r\n for line in doc: # for each line\r\n if (i <= sequence):\r\n subline = []\r\n newline = line.rstrip()\r\n word = newline.split(\" \") # store each word in the line\r\n for word in word: # for each word element\r\n text = word.split(\"_\")[0]\r\n tag = word.split(\"_\")[1]\r\n element = [text, tag]\r\n subline.append(element)\r\n file.append(subline)\r\n print(i)\r\n i = i + 1\r\n return file\r\n\r\n\r\ndef getPrior(trainfile, dictionary):\r\n prior = [0] * len(dictionary)\r\n for line in trainfile:\r\n first_word = line[0]\r\n tag = first_word[1]\r\n index = dictionary[tag]\r\n prior[index] = prior[index] + 1\r\n new_prior = [x + 1 for x in prior]\r\n return new_prior\r\n\r\n\r\ndef getHmmPrior(prior):\r\n SUM = sum(prior)\r\n hmm_prior = []\r\n for line in prior:\r\n hmm_prior.append(line / SUM)\r\n return hmm_prior\r\n\r\n\r\ndef getTrans(trainfile, dictionary):\r\n result = [[0] * len(dictionary)] * len(dictionary)\r\n result = np.array(result)\r\n for line in trainfile:\r\n length = len(line)\r\n for i in range(0, length - 1):\r\n front_word = line[i]\r\n front_tag = front_word[1]\r\n front_index = dictionary[front_tag]\r\n back_word = line[i + 1]\r\n back_tag = back_word[1]\r\n back_index = dictionary[back_tag]\r\n result[front_index][back_index] = result[front_index][back_index] + 1\r\n new_result = np.add(result, 1)\r\n return new_result\r\n\r\n\r\ndef getHmmTrans(trans_list):\r\n result = []\r\n for line in trans_list:\r\n SUM = sum(line)\r\n subline = []\r\n for element in line:\r\n subline.append(element / SUM)\r\n result.append(subline)\r\n return result\r\n\r\n\r\ndef getEmit(trainfile, tag_dic, word_dic):\r\n result = [[0] * len(word_dic)] * len(tag_dic)\r\n result = np.array(result)\r\n for line in trainfile:\r\n for word in line:\r\n text = word[0]\r\n tag = word[1]\r\n text_index = word_dic[text]\r\n tag_index = tag_dic[tag]\r\n result[tag_index][text_index] = result[tag_index][text_index] + 1\r\n new_result = np.add(result, 1)\r\n return new_result\r\n\r\n\r\ndef getHmmEmit(emit_list):\r\n result = []\r\n for line in emit_list:\r\n SUM = sum(line)\r\n subline = []\r\n for element in line:\r\n subline.append(element / SUM)\r\n result.append(subline)\r\n return result\r\n\r\n\r\n# export files into txt\r\ndef export(listname, exportname):\r\n file = open(exportname, \"w\")\r\n for line in listname:\r\n content = \"\"\r\n for i in range(0, len(line)):\r\n if i < len(line) - 1:\r\n content = content + str(line[i]) + \" \"\r\n else:\r\n content = content + str(line[i])\r\n file.write(str(content) + '\\n')\r\n # print(content)\r\n file.close()\r\n\r\n\r\ndef export2(listname, exportname):\r\n file = open(exportname, \"w\")\r\n for line in listname:\r\n file.write(str(line) + '\\n')\r\n # print(line)\r\n file.close()\r\n\r\n\r\ntag_dic = getDictionary(index_to_tag)\r\nword_dic = getDictionary(index_to_word)\r\ntrain_file = read_train(train_input)\r\n\r\n# get the prior\r\nprior_list = getPrior(train_file, tag_dic)\r\nHmm_prior = getHmmPrior(prior_list)\r\n\r\n# get the trans\r\ntrans_list = getTrans(train_file, tag_dic)\r\nHmm_trans = getHmmTrans(trans_list)\r\n\r\n# get the emit\r\nemit_list = getEmit(train_file, tag_dic, word_dic)\r\nHmm_emit = getHmmEmit(emit_list)\r\n\r\n# export 3 files\r\nexport2(Hmm_prior, hmmprior)\r\nexport(Hmm_trans, hmmtrans)\r\nexport(Hmm_emit, hmmemit)\r\n","sub_path":"HW7-HiddenMarkov/learnhmmplot.py","file_name":"learnhmmplot.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"14138275","text":"import praw\n\nreddit = praw.Reddit(client_id = '6O2l6qiy-aVOuA',\n\tclient_secret = None,\n\tuser_agent = 'reddit is very fun by u/bots-',\n\tredirect_uri = 'http://example.com')\n\nreddit.read_only = True\n\nsub = reddit.subreddit('all')\nprint('What subreddit would you like to visit?')\ninp = input()\n\nif inp.strip() != '':\n\tsub = reddit.subreddit(inp)\n\nposts = sub.hot(limit = 10)\n\npostlist = []\n\nfor post, num in zip(posts, range(1,11)):\n\tprint(str(num)+'. '+post.title)\n\tpostlist.append(post)\n\nprint(\"Which one's comments would you like to read?\")\ncommNum = int(input()) - 1\n\npostRead = postlist[commNum]\n\nprint('--------------')\ncomms = postRead.comments\ncomms.replace_more()\n\nerrcount = 0\n\nfor i in comms:\n\ttry:\n\t\tif i.is_root:\n\t\t\tprint(i.body)\n\n\t\t\tfor reply in i.replies:\n\t\t\t\tprint('>>>>>'+reply.body)\n\t\t\t\tprint('--')\n\t\t\t\tprint('\\n')\n\t\t\t\n\t\t\tprint('------------------')\n\t\t\tprint('\\n\\n')\n\texcept Exception as e: errcount += 1\n\nprint(errcount)","sub_path":"Python/redditapp.py","file_name":"redditapp.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"320343002","text":"import os\nimport webapp2\n\n# rendering of HTML files\nimport jinja2\n\n\n# read regular expressions\nimport re\n\n\n# for hashing user id and pass\nimport random\nimport hashlib\nimport hmac\nimport string\n\n\n# import db to save info\nfrom google.appengine.ext import db\n\n\n# for adding a time delay for DB to update\nimport time\n\n\n# import models for use\nfrom comments import Comments\n\nfrom blogs import blog_post\n\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\n# autoescape to autoescape html characters submitted\njinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),\n autoescape=True)\n\n# * - positional arguments\n# ** - keyword arguments\n\n\n# make salt: random string to add with name and password to make hash\ndef make_salt():\n return ''.join(random.choice(string.letters) for i in range(0, 5))\n\n\n# making hash of name pass and salt. salt stored as well\ndef make_pass_hash(name, pw, salt=None):\n if not salt:\n salt = make_salt()\n h = hashlib.sha256(name+pw+salt).hexdigest()\n return '%s|%s' % (salt, h)\n\n\n# check if password is valid\ndef valid_pw(name, password, h):\n salt = h.split('|')[0]\n return h == make_pass_hash(name, password, salt)\n\n\n# def make_pass_hash(pw):\n# h = hashlib.sha256(pw).hexdigest()\n# return '%s' % (h)\n\n\nsecret = 'hello'\n\n\ndef make_secure_cookie(pw):\n return '%s|%s' % (pw, hmac.new(secret, pw).hexdigest())\n\n\ndef check_secure_cookie(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_cookie(val):\n return val\n\n\nUSER_RE = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\n\n\ndef valid_username(username):\n return username and USER_RE.match(username)\n\n\nPASS_RE = re.compile(r\"^.{3,20}$\")\n\n\ndef valid_password(password):\n return password and PASS_RE.match(password)\n\n\nEMAIL_RE = re.compile(r'^[\\S]+@[\\S]+\\.[\\S]+$')\n\n\ndef valid_email(email):\n return not email or EMAIL_RE.match(email)\n\n\nclass Handler(webapp2.RequestHandler):\n def write(self, *template, **params):\n self.response.out.write(*template, **params)\n\n def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n def render(self, template, **args):\n self.write(self.render_str(template, **args))\n\n# This function uses the hash function on the password and sets\n# the cookie to that\n def set_cookie(self, user_id, userid):\n cookie_val = make_secure_cookie(userid)\n username = str(user_id)\n self.response.headers.add_header('Set-Cookie', '%s = %s; Path = /'\n % (username, cookie_val))\n\n def read_secure_cookie(self, username):\n cookie_val = self.request.cookies.get(username)\n return cookie_val and check_secure_cookie(cookie_val)\n\n\n# Implement an initialize function that checks cookie for user id.\n# Once checked and found also checks db. If both are found ....\n# Other functions using handler now have access to User model in db\n# and its properties\n\n def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))\n\n def login(self, user):\n self.set_cookie('user_id', str(user.key().id()))\n\n def logout(self):\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path = /')\n\n\n# Import User models, after loading all the functions that the models use.\nfrom users import User\n\n\nclass BlogMainPage(Handler):\n # Populates main blog page with ten most recent posts\n def get(self):\n if self.user:\n user = self.user\n posts = db.GqlQuery('Select * From blog_post ORDER BY created \\\n Desc limit 10')\n self.render('all-blog-entries.html', posts=posts, user=user)\n else:\n posts = db.GqlQuery('Select * From blog_post ORDER BY created \\\n Desc limit 10')\n self.render('all-blog-entries-unauth.html', posts=posts)\n\n\n# Handling the like POST of users\nclass LikePost(Handler):\n def get(self):\n user = self.user\n uid = user.key().id()\n post_id = self.request.get(\"post_id\")\n post = blog_post.get_by_id(int(post_id))\n # check user to make sure they are not trying to like their own post\n if post.author == user.username:\n return self.redirect('/errorhandler/2/blog')\n\n # Check to see if user already liked post with liked_post classmethod\n # First if statement initializes user.like datastore\n if not user.like:\n post.likes = post.likes+1\n user.like = str(post_id) + \",\"\n elif User.liked_post(uid, post_id):\n user.like = user.like.replace(str(post_id) + \",\", \"\")\n post.likes = post.likes - 1\n else:\n user.like = user.like + str(post_id) + ','\n post.likes = post.likes + 1\n\n user.put()\n post.put()\n\n self.redirect('/redirect/1/blog')\n\n\n# New post handler.\nclass NewPost(Handler):\n def get(self):\n if self.user:\n self.render('blog-entry-page.html')\n else:\n self.redirect('/login')\n\n def post(self):\n if not self.user:\n return self.redirect('/login')\n author = self.user.username\n title = self.request.get('title')\n entry = self.request.get('entry')\n if title and entry:\n # Save blog post\n p = blog_post(title=title, entry=entry,\n author=author, likes=0)\n p.put()\n self.redirect('/blog/%s' % str(p.key().id()))\n else:\n error = 'Please enter Title and Post'\n self.render('blog-entry-page.html', error=error,\n title=title, entry=entry)\n\n\n# Page displaying user blog posts. Users can also comment on blog posts.\nclass PostPage(Handler):\n def get(self, post_id):\n key = db.Key.from_path('blog_post', int(post_id))\n post = db.get(key)\n # Check to see if post exists and handles error if it does not\n if post:\n author = blog_post.get_by_id(int(post_id)).author\n postId = int(post_id)\n userId = self.user.key().id()\n comments = Comments.all().filter(\n \"postId =\", postId).order(\n \"created\")\n self.render('new_post_page.html', post=post, author=author,\n comments=comments, userId=userId)\n else:\n self.redirect('/errorhandler/2/blog')\n\n\n# Handling Post requests for comments on user blog posts\n# post_id comes from the URL\n def post(self, post_id):\n if not self.user:\n return self.redirect('/login')\n key = db.Key.from_path('blog_post', int(post_id))\n post = db.get(key)\n # Check to see if post exists and handles error if it does not\n if post:\n postId = int(post_id)\n userId = self.user.key().id()\n author = self.user.username\n comment = self.request.get(\"comment\")\n if comment:\n comment = Comments(postId=postId, userid=userId, author=author,\n comment=comment)\n comment.put()\n time.sleep(1)\n comments = Comments.all().filter(\n \"postId =\", postId).order(\n \"created\")\n self.render(\"new_post_page.html\",\n post=post,\n author=author,\n userId=userId,\n comments=comments)\n else:\n comments = Comments.all().filter(\n \"postId =\", postId).order(\n \"created\")\n self.render(\"new_post_page.html\",\n post=post,\n author=author,\n comments=comments,\n userId=userId,\n error=\"Please enter comment before submitting\")\n else:\n self.redirect('/errorhandler/2/blog')\n\n\n# This class serves the display content for editing a comment\nclass EditComment(Handler):\n def get(self, commentId):\n comment = Comments.get_by_id(int(commentId))\n # Check if comment else handle error.\n # Also check that user accessing created comment\n if comment:\n if not self.user:\n self.redirect('/login')\n if self.user.username != comment.author:\n self.redirect('/unauth/2/blog')\n else:\n self.render(\"edit-comment.html\", comment=comment)\n else:\n self.redirect('/errorhandler/2/blog')\n\n def post(self, commentId):\n comment = Comments.get_by_id(int(commentId))\n # Check person trying to edit comment. If not creator redirect\n if self.user and self.user.username == comment.author:\n # Check if comment else handle error\n if comment:\n # getting post info for the redirect URL\n post = comment.postId\n purpose = self.request.get(\"purpose\")\n # get data from request\n entry = self.request.get(\"comment\")\n if \"delete\" in purpose:\n if not self.user:\n self.redirect('/login')\n else:\n comment.delete()\n time.sleep(1)\n self.redirect('/blog/%s' % (post))\n else:\n if not self.user:\n self.redirect('/login')\n else:\n if entry:\n comment.comment = entry\n comment.put()\n time.sleep(1)\n self.redirect('/blog/%s' % (post))\n else:\n self.redirect('/errorhandler/2/blog')\n else:\n self.redirect('/unauth/2/blog')\n\n\n# Editing and deleting blog posts\nclass EditPage(Handler):\n def get(self, post_id):\n key = db.Key.from_path('blog_post', int(post_id))\n post = db.get(key)\n # Check to see if blog post exists, else redirect\n if post:\n # Check to see user owns blog post else redirect unauth\n if self.user.username == post.author:\n self.render('edit-content.html', post=post)\n else:\n self.redirect('/unauth/2/blog')\n else:\n self.redirect('/errorhandler/2/blog')\n\n\n# Post request depends on whether user is deleting or editing comment\n# post_id comes from the URL\n def post(self, post_id):\n post = blog_post.get_by_id(int(post_id))\n # Check to see if blog post exists, else redirect\n if post:\n # Check to see user owns blog post else redirect unauth\n if self.user.username == post.author:\n purpose = self.request.get(\"purpose\")\n title = self.request.get(\"title\")\n entry = self.request.get(\"entry\")\n\n if \"delete\" in purpose:\n # Check to see if valid user\n if not self.user:\n self.redirect('/login')\n else:\n post.delete()\n self.redirect('/redirect/1/blog')\n else:\n # Check to see if valid user\n if not self.user:\n self.redirect('/login')\n else:\n if title and entry:\n post.title = title\n post.entry = entry\n post.put()\n self.redirect('/redirect/1/blog')\n else:\n self.redirect('/unauth/2/blog')\n else:\n self.redirect('/errorhandler/2/blog')\n\n\n# Registering users and handling logging in and out\nclass MainPage(Handler):\n def get(self):\n self.redirect('/signup')\n\n\nclass sign_up(Handler):\n def get(self):\n self.render('user_signup.html')\n\n def post(self):\n error_present = False\n self.username = self.request.get('username')\n self.password = self.request.get('password')\n verify = self.request.get('verify')\n self.email = self.request.get('email')\n params = dict(username=self.username,\n email=self.email)\n\n if not valid_username(self.username):\n params['error_username'] = 'That was not a valid username.'\n error_present = True\n\n if not valid_password(self.password):\n params['error_password'] = \"That was not a valid password.\"\n error_present = True\n\n elif self.password != verify:\n params['error_verify'] = \"Your passwords didn't match.\"\n error_present = True\n\n if not valid_email(self.email):\n params['error_email'] = \"That was not a valid email.\"\n error_present = True\n\n if error_present:\n self.render('user_signup.html', **params)\n else:\n # Register takes in sign up and after\n # getting info runs done function located in Register\n self.done()\n\n def done(self, *a, **kw):\n raise NotImplementedError\n\n\nclass Register(sign_up):\n def done(self):\n username = self.username\n password = self.password\n email = self.email\n u = User.check_name(username)\n if u:\n error = 'That username already exists'\n self.render('user_signup.html', error_username=error)\n else:\n # self.set_cookie(self.username, self.password)\n u = User.register(username, password, email)\n u.put()\n\n self.login(u)\n self.redirect('/welcome')\n\n\nclass WelcomePage(Handler):\n def get(self):\n if self.user:\n username = self.user.username\n self.render('welcome-page.html', username=username)\n else:\n self.redirect('/login')\n\n\nclass Login(Handler):\n def get(self):\n self.render('login-form.html')\n\n def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n u = User.login(username, password)\n if u:\n self.login(u)\n self.redirect('/welcome')\n else:\n error = 'Invalid login'\n self.render('login-form.html', error=error)\n\n\nclass Logout(Handler):\n def get(self):\n self.logout()\n self.redirect('/signup')\n\n\n# Redirect delay page that allows for the DB to update. Please note the three\n# entries in URL corresponding to input for get requests\nclass RedirectDelay(Handler):\n def get(self, time, page):\n page = '/' + page\n self.render('redirectdelay.html', time=time, page=page)\n\n\n# Error handler for incorrect Blog and comment post requests\nclass Errorhandler(Handler):\n def get(self, time, page):\n page = '/' + page\n self.render('errorhandler.html', time=time, page=page)\n\n\n# Error handler for unauthorized user edit\nclass Unauth(Handler):\n def get(self, time, page):\n page = '/' + page\n self.render('unauthorized.html', time=time, page=page)\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/welcome', WelcomePage),\n ('/signup', Register),\n ('/login', Login),\n ('/logout', Logout),\n ('/blog', BlogMainPage),\n ('/blog/newpost', NewPost),\n ('/likepost', LikePost),\n ('/edit/([0-9]+)', EditPage),\n ('/editcomment/([0-9]+)', EditComment),\n ('/errorhandler/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)', Errorhandler),\n ('/unauth/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)', Unauth),\n ('/redirect/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)', RedirectDelay),\n ('/blog/([0-9]+)', PostPage)\n\n], debug=True)\n","sub_path":"user-auth-blog/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"90945448","text":"'''\nLattice paths\nStarting in the top left corner of a 2×2 grid, and only being able to move to the right and down,\nthere are exactly 6 routes to the bottom right corner.\nHow many such routes are there through a 20×20 grid?\n'''\n\nn = int(input())\nd = {1: [1 for i in range(n)]}\nfor i in range(2, n + 1):\n for j in range(n):\n d[i] = d.get(i, []) + [sum(d[i - 1][j:])]\n\nsumma = 1\nfor i in d.values():\n summa += sum(i)\nprint(summa)\n\n# second way\n# But long execution:\n\n'''\nn = int(input())\ndef summa_1(num):\n return num\n\nrows = summa_1(n)\n\nfor i in range(2, n + 1):\n exec(f'def summa_{str(i)}(num):\\n s = 0\\n for i in range(num):\\n s += summa_{str(i-1)}(num - i)\\n return s')\n exec(f'rows += (summa_{str(i)}(n))')\nprint(rows + 1)\n\n'''\n","sub_path":"task15.py","file_name":"task15.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"336165025","text":"################################################################################\n# app.py:\n# This file handles the user interface and databinding for the application. \n################################################################################\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIcon, QPixmap\nfrom PyQt5.QtCore import Qt\n\nimport sys\nimport alg\nfrom fileManager import Algorithms, Style, ImageThread\nimport os\nimport subprocess\n\n# A QT Dialog for importing in new styles\nclass StyleLoader(QDialog):\n def __init__(self, parent = None):\n super().__init__(parent)\n self.initUI()\n\n # Initialize the UI and various paramters\n def initUI(self):\n layout = QVBoxLayout(self)\n\n # Select Name\n nameWrapper = QHBoxLayout()\n nameLabel = QLabel(\"Name the Style\")\n self.nameEdit = QLineEdit(self)\n self.nameEdit.setText(\"NewStyle\")\n self.name = \"NewStyle\"\n self.nameEdit.editingFinished.connect(self.updateName)\n self.updateName()\n nameWrapper.addWidget(nameLabel)\n nameWrapper.addWidget(self.nameEdit)\n layout.addLayout(nameWrapper)\n\n\n # Add Description\n descrWrapper = QHBoxLayout()\n descrLabel = QLabel(\"Add a short description\")\n self.descrEdit = QLineEdit(self)\n self.descrEdit.setText(\"Description\")\n self.descr = \"Description\"\n self.descrEdit.editingFinished.connect(self.updateDescr)\n descrWrapper.addWidget(descrLabel)\n descrWrapper.addWidget(self.descrEdit)\n layout.addLayout(descrWrapper)\n\n # Style Image Selection\n imgWrapper = QHBoxLayout()\n self.imgSelect = QPushButton(\"Select the style image\")\n self.imgSelect.clicked.connect(self.updateImage)\n self.imgPath = QLabel()\n self.styleImage = \"\"\n imgWrapper.addWidget(self.imgSelect)\n imgWrapper.addWidget(self.imgPath)\n layout.addLayout(imgWrapper)\n\n # Select algorithm parameters\n algWrapper = QGridLayout()\n self.iterLabel = QLabel(\"Iterations: \")\n self.iterSelect = genSlider(5)\n self.iterSelect.setOrientation(Qt.Horizontal)\n self.iterSelect.sliderMoved.connect(self.updateIter)\n self.updateIter(1)\n algWrapper.addWidget(self.iterLabel, 0, 0)\n algWrapper.addWidget(self.iterSelect, 0, 2)\n\n self.resLabel = QLabel(\"Resolution: \")\n self.resSelect = genSlider(6)\n self.resSelect.sliderMoved.connect(self.updateRes)\n self.updateRes(1)\n algWrapper.addWidget(self.resLabel, 1, 0)\n algWrapper.addWidget(self.resSelect, 1, 2)\n layout.addLayout(algWrapper)\n\n # Select the style directory\n dirWrapper = QHBoxLayout()\n self.dirSelect = QPushButton(\"Select the output/style directory\")\n self.dirSelect.clicked.connect(self.updateStyleDir)\n self.dirPath = QLabel()\n self.styleDir = \"\"\n dirWrapper.addWidget(self.dirSelect)\n dirWrapper.addWidget(self.dirPath)\n layout.addLayout(dirWrapper)\n\n\n # Confirmation Buttons\n buttons = QDialogButtonBox(\n QDialogButtonBox.Ok | QDialogButtonBox.Cancel, self)\n buttons.accepted.connect(self.accept)\n buttons.rejected.connect(self.reject)\n buttons.button(QDialogButtonBox.Ok).setEnabled(False)\n layout.addWidget(buttons)\n self.buttons = buttons\n\n # Pop up a file selection dialog\n def updateImage(self):\n fileTypes = \"Images (*.jpg *.png)\"\n self.styleImage = QFileDialog.getOpenFileName(self, \n \"Choose an Image\", \"../styles\", fileTypes)[0]\n self.imgPath.setText(self.styleImage)\n self.checkCompleteness()\n\n # Grab text entry fields\n def updateName(self):\n self.name = self.nameEdit.text()\n\n def updateDescr(self):\n self.descr = self.descrEdit.text()\n\n # Grab the parameters from a slider\n def updateIter(self, value):\n self.iterations = value * 100\n self.iterLabel.setText(\"Iterations: %d\" % self.iterations)\n\n def updateRes(self, value):\n self.resolution = value * 128\n self.resLabel.setText(\"Resolution: %d\" % self.resolution)\n\n # Grab an output directory\n def updateStyleDir(self):\n self.styleDir = QFileDialog.getExistingDirectory(self, \n \"Choose a folder\", \"../styles\")\n self.dirPath.setText(self.styleDir)\n self.checkCompleteness()\n\n # Make sure all the required fields are filled in\n def checkCompleteness(self):\n if self.styleImage != \"\" and self.styleDir != \"\":\n self.buttons.button(QDialogButtonBox.Ok).setEnabled(True)\n else:\n self.buttons.button(QDialogButtonBox.Ok).setEnabled(False)\n \n # Return a new style object with the members of the class\n def getStyle(self):\n return Style(name = self.name, descr = self.descr, \n styleImage = self.styleImage, \n alg = Algorithms(self.iterations, self.resolution),\n computed = False, styleDir = self.styleDir)\n \n @staticmethod\n def getNewStyle(parent = None):\n dialog = StyleLoader(parent)\n result = dialog.exec_()\n if result == QDialog.Accepted:\n return dialog.getStyle()\n else:\n return None\nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"StyleDev\")\n folders = [\"../styles/starrynight\", \"../styles/sketch\", \"../styles/picasso\"]\n self.styles = [Style.styleFromFolder(folder) for folder in folders]\n self.initUI()\n\n def initUI(self):\n self.setGeometry(200,200,800,800)\n layoutGrid = QGridLayout(self)\n self.setLayout(layoutGrid)\n\n # Initialize UI Elements, then populate them with updateStyle()\n # Main Preview Image\n self.img = QLabel()\n layoutGrid.addWidget(self.img, 0, 0, 5, 1)\n\n # If necessary, progress bar\n self.progressBar = QProgressBar()\n self.progressBar.setMinimum(0)\n self.progressBar.setMaximum(100)\n layoutGrid.addWidget(self.progressBar, 6, 0)\n\n # Style Selection Menu \n self.styleMenu = QComboBox()\n self.populateStyles()\n self.styleMenu.currentIndexChanged.connect(self.updateStyle)\n layoutGrid.addWidget(self.styleMenu, 0, 1)\n\n # ISO/Game Image Selection\n self.isoSelect = QPushButton(\"Game Image Selection\")\n self.isoSelect.clicked.connect(self.updateISO)\n self.isoPath = \"\"\n layoutGrid.addWidget(self.isoSelect, 1, 1)\n\n # Select the style directory\n self.dolphinSelect = QPushButton(\"Select Dolphin Installation Path\")\n self.dolphinSelect.clicked.connect(self.updateDolphinPath)\n self.dolphinPath = \"\"\n layoutGrid.addWidget(self.dolphinSelect, 2, 1)\n\n # Info Box\n self.info = QLabel()\n layoutGrid.addWidget(self.info, 3, 1) \n\n\n # Import New Style\n self.importButton = QPushButton(\"Import New Style...\")\n self.importButton.clicked.connect(self.importStyle)\n layoutGrid.addWidget(self.importButton, 4, 1)\n\n # Compute Button\n self.computeButton = QPushButton(\"Calculate Style\")\n self.computeButton.clicked.connect(self.computeActiveStyle)\n self.computeThread = None\n layoutGrid.addWidget(self.computeButton, 5, 1)\n\n # Launch Button\n self.launchButton = QPushButton(\"Start Game!\")\n self.launchButton.clicked.connect(self.startGame)\n layoutGrid.addWidget(self.launchButton, 6, 1)\n \n\n # Populate the interface\n self.curStyle = self.styles[0]\n self.updateStatus()\n self.updateStyle(0)\n\n # Show the window \n self.show()\n\n # Populate the style dropdown\n def populateStyles(self):\n self.styleMenu.clear()\n for style in self.styles:\n self.styleMenu.addItem(style.icon, style.name)\n return\n\n # Change the style currently being displayed\n def updateStyle(self, value):\n # Update the preview image\n style = self.styles[value]\n self.img.setPixmap(style.displayImage)\n\n self.info.setText(str(style))\n self.curStyle = style\n self.updateStatus()\n\n # Launch a new thread for computation of the current style\n def computeActiveStyle(self):\n if self.computeThread == None:\n style = self.curStyle\n self.computeButton.setText(\"Cancel Calculation\")\n self.computeThread = ImageThread(style, self)\n self.computeThread.finished.connect(self.done)\n self.computeThread.start()\n else:\n self.updateStatus()\n self.progressBar.setValue(0)\n self.computeThread.terminate()\n self.computeThread = None\n\n # Handle the thread finishing\n def done(self):\n self.thread = None\n self.updateStatus()\n return\n\n # Retrieve an ISO path with a file dialog\n def updateISO(self):\n fileTypes = \"GameCube Files (*.iso *.wbfs)\"\n self.isoPath = QFileDialog.getOpenFileName(self, \n \"Browse to the game\", \"../\", fileTypes)[0]\n self.updateStatus()\n\n # Retrieve a dolphin directory with a file dialog\n def updateDolphinPath(self):\n self.dolphinPath = QFileDialog.getExistingDirectory(self, \n \"Choose the Dolphin Directory\", \"../\")\n self.updateStatus()\n\n # Handle creating a new StyleLoader\n def importStyle(self):\n newStyle = StyleLoader.getNewStyle()\n if newStyle != None:\n self.styles.append(newStyle)\n self.populateStyles()\n\n # Disables/enables buttons as appropriate\n def updateStatus(self):\n computed = self.curStyle.computed\n if computed and self.isoPath != \"\" and self.dolphinPath != \"\":\n self.launchButton.setText(\"Start Game!\")\n self.launchButton.setEnabled(True)\n else:\n self.launchButton.setText(\"Need paths set or style to be computed\")\n self.launchButton.setEnabled(False)\n # Compute Button Stuff\n if computed:\n self.progressBar.hide()\n self.computeButton.setEnabled(False)\n self.computeButton.setText(\"Style Already Computed!\")\n else:\n self.progressBar.show()\n self.progressBar.setValue(0)\n self.computeButton.setEnabled(True)\n self.computeButton.setText(\"Calculate Style\")\n\n # Launch the game with modified assets\n def startGame(self):\n self.curStyle.load(self)\n subprocess.run([self.dolphinPath + \"/Dolphin.exe\", \"-e\", self.isoPath])\n# Generate a QSlider with the given number of steps\ndef genSlider(steps):\n slider = QSlider()\n slider.setOrientation(Qt.Horizontal)\n slider.setRange(1, steps)\n slider.setSingleStep(1)\n slider.setPageStep(1)\n return slider\n\ndef main():\n app = QApplication([])\n app.setWindowIcon(QIcon(\"icon.png\"))\n window = MainWindow()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tp3/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"54104393","text":"#!/usr/bin/python\r\n\r\nimport sys, getopt\r\nimport pandas as pd\r\n\r\ndef main(argv):\r\n inputfile = ''\r\n outputfile = ''\r\n try:\r\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\r\n except getopt.GetoptError:\r\n print ('sum_var.py -i -o ')\r\n sys.exit(2)\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print ('sum_var.py -i -o ')\r\n sys.exit()\r\n elif opt in (\"-i\", \"--ifile\"):\r\n inputfile = arg\r\n elif opt in (\"-o\", \"--ofile\"):\r\n outputfile = arg\r\n print ('Input file is \"', inputfile)\r\n data = pd.read_csv(inputfile,sep = \"\\t\", header=0, na_values = \".\")\r\n data[\"SUM\"]=data[\"nHet\"] +data[\"nHomAlt\"]\r\n data[\"SUM\"].to_csv(\"Sum_variants\", sep=\"\\t\", header=True, index = False)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])","sub_path":"orig/sum_var.py","file_name":"sum_var.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"432246815","text":"from app.api import api\nfrom app.models import Post, Comment, User\nfrom app import db\nfrom flask import jsonify, request, url_for, redirect\nfrom app.api.errors import bad_request\n\n\n# 根据 id 取得 Post\n@api.route('/posts/', methods=['GET'])\ndef get_post(id):\n print('---------this----------')\n return jsonify(Post.query.get_or_404(id).to_dict())\n\n\n@api.route('/posts/', methods=['GET'])\ndef get_posts(username):\n user = User.query.filter_by(username=username).first_or_404()\n page = request.args.get('page', 1, type=1)\n per_page = min(request.args.get('per_page', 10, type=int), 100)\n # posts = user.posts.order_by(Post.timestamp.desc()).paginate(page, per_page, 'api.get_posts')\n posts = Post.to_collection_dict(user.posts.order_by(Post.timestamp.desc()), page, per_page, 'api.get_posts', username=username)\n return jsonify(posts)\n\n\n# 取得所有 Post\n@api.route('/posts', methods=['GET'])\ndef get_all_posts():\n print('---------that----------')\n page = request.args.get('page', 1, type=1)\n per_page = min(request.args.get('per_page', 10, type=int), 100)\n posts = Post.to_collection_dict(Post.query.order_by(Post.timestamp.desc()), page, per_page, 'api.get_all_posts')\n return jsonify(posts)\n\n\n# 创建一条 Post\n@api.route('/posts', methods=['POST'])\ndef new_post():\n # Flask提供request.get_json()方法从请求中提取JSON并将其作为Python结构返回\n data = request.get_json() or {}\n print(data)\n if 'body' not in data or 'user_id' not in data:\n return bad_request('must include body fields')\n post = Post()\n post.from_dict(data, new_post=True)\n # print('000000000000000000000000')\n # print(post)\n db.session.add(post)\n db.session.commit()\n response = jsonify(post.to_dict())\n response.status_code = 201\n response.headers['Location'] = url_for('api.get_post', id=post.id)\n return response\n\n\n# 修改 Post\n@api.route('/posts/', methods=['PUT'])\ndef update_post(id):\n post = Post.query.get_or_404(id)\n data = request.get_json() or {}\n post.from_dict(data, new_post=False)\n db.session.commit()\n return jsonify(post.to_dict())\n\n\n@api.route('posts/', methods=['DELETE'])\ndef delete_post(id):\n post = Post.query.get_or_404(id)\n db.session.delete(post)\n db.session.commit()\n\n\n# 取得 Post 下的评论\n@api.route('/posts//comments', methods=['GET'])\ndef get_comments(id):\n print('---------this----------')\n post = Post.query.get_or_404(id)\n page = request.args.get('page', 1, type=int)\n per_page = min(request.args.get('per_page', 10, type=int), 100)\n data = Post.to_collection_dict(post.comments.order_by(Comment.timestamp.desc()), page, per_page, 'api.get_comments', id=id)\n return jsonify(data)\n\n\n# 创建一条评论\n@api.route('/posts/', methods=['POST'])\ndef new_comment(id):\n data = request.get_json() or {}\n print(data)\n if 'body' not in data or 'author_id' not in data:\n return bad_request('must include body fields')\n comment = Comment()\n comment.from_dict(data)\n comment.post_id = id\n # print('000000000000000000000000')\n # print(post)\n db.session.add(comment)\n db.session.commit()\n return redirect(url_for('api.get_comments', id=comment.post_id))\n","sub_path":"app/api/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"499802176","text":"import random\nimport string\nimport json\nimport httplib2\nfrom functools import wraps\nfrom models import Base, User, Book\nfrom flask import Flask, jsonify, request, render_template, url_for, redirect, flash\nfrom flask import session as login_session\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\nfrom flask import make_response\nimport os\n\nengine = create_engine('postgresql://catalog:udacity@localhost/catalog')\n\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"super_secret_key\"\n\ndef check_login(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if \"username\" in login_session:\n return f(*args, **kwargs)\n flash(\"whoops, you need to be logged in to do that!\")\n return redirect(url_for(\"login\"))\n return wrapper\n\ndef check_object_owner(username):\n if username == login_session[\"username\"]:\n return True\n return False\n\n\n@app.route('/login')\ndef login():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)\n\n@app.route('/fbconnect', methods=['POST'])\ndef fbconnect():\n if request.args.get(\"state\") != login_session['state']:\n response = make_response(json.dumps(\"Invalid state parameter\"), 401)\n response.headers['Content-Type'] = \"application/json\"\n return response\n access_token = request.data\n\n # Exchange client token for server-side token\n app_id = json.loads(open('/var/www/html/fb_client_secrets.json', \"r\").read())[\"web\"][\"app_id\"]\n app_secret = json.loads(open('/var/www/html/fb_client_secrets.json', \"r\").read())[\"web\"][\"app_secret\"]\n url = (\"https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s\") % (app_id,app_secret,access_token)\n h = httplib2.Http()\n result = h.request(url, \"GET\")[1]\n data = json.loads(result)\n\n token = \"access_token=\"+data[\"access_token\"]\n\n url = (\"https://graph.facebook.com/v2.8/me?%s&fields=name,id,email\") % (token)\n h = httplib2.Http()\n result = h.request(url, \"GET\")[1]\n\n data = json.loads(result)\n\n login_session['provider'] = \"facebook\"\n login_session[\"username\"] = data[\"name\"]\n login_session[\"email\"] = data[\"email\"]\n login_session[\"facebook_id\"] = data[\"id\"]\n\n stored_token = token.split(\"=\")[1]\n login_session['access_token'] = stored_token\n\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session[\"user_id\"] = user_id\n\n output = \"\"\n output += \"

Welcome, \"\n output += login_session[\"username\"]\n output += \"!

\"\n flash(\"Now logged in as %s\" % login_session['username'])\n return output\n\n@app.route(\"/fbdisconnect/\")\ndef fbdisconnect():\n facebook_id = login_session[\"facebook_id\"]\n access_token = login_session[\"access_token\"]\n url = 'https://graph.facebook.com/%s/permissions?access_token=%s' % (facebook_id,access_token)\n h = httplib2.Http()\n result = h.request(url, 'DELETE')[1]\n return \"you have been logged out\"\n\n@app.route('/logout')\ndef logout():\n if 'provider' in login_session:\n fbdisconnect()\n del login_session['facebook_id']\n del login_session['username']\n del login_session['email']\n del login_session['user_id']\n del login_session['provider']\n\n flash(\"You have been logged out.\")\n return redirect(url_for(\"showAllBooks\"))\n else:\n flash(\"Whoops, we can't log you out because you weren't logged in!\")\n return redirect(url_for('showAllBooks'))\n\n@app.route('/')\n@app.route('/library/')\ndef showAllBooks():\n books = session.query(Book).all()\n genres = [b.genre for b in session.query(Book.genre).distinct()]\n return render_template('home.html', books=books, genres=genres)\n\n\n@app.route(\"/library/\")\ndef showGenreBooks(book_genre):\n books = session.query(Book).filter_by(genre=book_genre)\n return render_template('genre.html', genre=book_genre, books=books)\n\n\n@app.route('/library//')\ndef showBook(book_id):\n book = session.query(Book).filter_by(id=book_id).one()\n user = session.query(User).filter_by(id=book.user_id).one()\n username = user.username\n return render_template('item.html', book=book, username=username)\n\n\n@app.route('/library//edit', methods=['GET', 'POST'])\n@check_login\ndef editBook(book_id):\n book = session.query(Book).filter_by(id=book_id).one()\n user = session.query(User).filter_by(username=login_session['username']).one()\n if user.id == book.user_id:\n if request.method == 'POST':\n book.title = request.form['title']\n book.author = request.form['author']\n book.description = request.form['description']\n book.genre = request.form['genre']\n session.commit()\n flash(\"Item successfully edited\")\n return redirect(url_for(\"showBook\", book_id=book.id))\n return render_template('edit.html', book=book)\n else:\n flash(\"Sorry, you can't edit a book you didn't create.\")\n return redirect(url_for(\"showBook\", book_id=book.id))\n\n\n@app.route('/library//delete', methods=['GET', 'POST'])\n@check_login\ndef deleteBook(book_id):\n book = session.query(Book).filter_by(id=book_id).one()\n user = session.query(User).filter_by(username=login_session['username']).one()\n if user.id == book.user_id:\n if request.method == 'POST':\n user = session.query(User).filter_by(username=login_session['username']).one()\n session.delete(book)\n session.commit()\n flash(\"Item successfully deleted\")\n return redirect(url_for(\"showAllBooks\"))\n return render_template('delete.html', book=book)\n else:\n flash(\"Sorry, you can't delete a book you didn't create.\")\n return redirect(url_for(\"showBook\", book_id=book.id))\n\n\n\n@app.route('/library/add', methods=['GET', 'POST'])\n@check_login\ndef addBook():\n if request.method == 'POST':\n newBook = Book(title=request.form['title'],\n author=request.form['author'],\n description=request.form['description'],\n genre=request.form['genre'])\n user = session.query(User).filter_by(username=login_session[\"username\"]).one()\n newBook.user_id = user.id\n session.add(newBook)\n session.commit()\n flash(\"Success!\")\n return redirect(url_for(\"showAllBooks\"))\n return render_template('additem.html')\n\n\n@app.route('/library/json')\ndef jsonifyBooks():\n books = session.query(Book).all()\n return jsonify(books=[book.serialize for book in books])\n\ndef getUserID(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None\n\ndef createUser(loginsession):\n newUser = User(username=loginsession['username'], email=loginsession['email'])\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=loginsession['email']).one()\n return user.id\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"417659525","text":"# coding: utf-8\n\n\"\"\"\n Bungie.Net API\n\n These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality. # noqa: E501\n\n OpenAPI spec version: 2.3.6\n Contact: support@bungie.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass DestinyInsertPlugsActionRequest(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'action_token': 'str',\n 'item_instance_id': 'int',\n 'plug': 'DestinyInsertPlugsRequestEntry',\n 'character_id': 'int',\n 'membership_type': 'int'\n }\n\n attribute_map = {\n 'action_token': 'actionToken',\n 'item_instance_id': 'itemInstanceId',\n 'plug': 'plug',\n 'character_id': 'characterId',\n 'membership_type': 'membershipType'\n }\n\n def __init__(self, action_token=None, item_instance_id=None, plug=None, character_id=None, membership_type=None): # noqa: E501\n \"\"\"DestinyInsertPlugsActionRequest - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._action_token = None\n self._item_instance_id = None\n self._plug = None\n self._character_id = None\n self._membership_type = None\n self.discriminator = None\n\n if action_token is not None:\n self.action_token = action_token\n if item_instance_id is not None:\n self.item_instance_id = item_instance_id\n if plug is not None:\n self.plug = plug\n if character_id is not None:\n self.character_id = character_id\n if membership_type is not None:\n self.membership_type = membership_type\n\n @property\n def action_token(self):\n \"\"\"Gets the action_token of this DestinyInsertPlugsActionRequest. # noqa: E501\n\n Action token provided by the AwaGetActionToken API call. # noqa: E501\n\n :return: The action_token of this DestinyInsertPlugsActionRequest. # noqa: E501\n :rtype: str\n \"\"\"\n return self._action_token\n\n @action_token.setter\n def action_token(self, action_token):\n \"\"\"Sets the action_token of this DestinyInsertPlugsActionRequest.\n\n Action token provided by the AwaGetActionToken API call. # noqa: E501\n\n :param action_token: The action_token of this DestinyInsertPlugsActionRequest. # noqa: E501\n :type: str\n \"\"\"\n\n self._action_token = action_token\n\n @property\n def item_instance_id(self):\n \"\"\"Gets the item_instance_id of this DestinyInsertPlugsActionRequest. # noqa: E501\n\n The instance ID of the item having a plug inserted. Only instanced items can have sockets. # noqa: E501\n\n :return: The item_instance_id of this DestinyInsertPlugsActionRequest. # noqa: E501\n :rtype: int\n \"\"\"\n return self._item_instance_id\n\n @item_instance_id.setter\n def item_instance_id(self, item_instance_id):\n \"\"\"Sets the item_instance_id of this DestinyInsertPlugsActionRequest.\n\n The instance ID of the item having a plug inserted. Only instanced items can have sockets. # noqa: E501\n\n :param item_instance_id: The item_instance_id of this DestinyInsertPlugsActionRequest. # noqa: E501\n :type: int\n \"\"\"\n\n self._item_instance_id = item_instance_id\n\n @property\n def plug(self):\n \"\"\"Gets the plug of this DestinyInsertPlugsActionRequest. # noqa: E501\n\n The plugs being inserted. # noqa: E501\n\n :return: The plug of this DestinyInsertPlugsActionRequest. # noqa: E501\n :rtype: DestinyInsertPlugsRequestEntry\n \"\"\"\n return self._plug\n\n @plug.setter\n def plug(self, plug):\n \"\"\"Sets the plug of this DestinyInsertPlugsActionRequest.\n\n The plugs being inserted. # noqa: E501\n\n :param plug: The plug of this DestinyInsertPlugsActionRequest. # noqa: E501\n :type: DestinyInsertPlugsRequestEntry\n \"\"\"\n\n self._plug = plug\n\n @property\n def character_id(self):\n \"\"\"Gets the character_id of this DestinyInsertPlugsActionRequest. # noqa: E501\n\n\n :return: The character_id of this DestinyInsertPlugsActionRequest. # noqa: E501\n :rtype: int\n \"\"\"\n return self._character_id\n\n @character_id.setter\n def character_id(self, character_id):\n \"\"\"Sets the character_id of this DestinyInsertPlugsActionRequest.\n\n\n :param character_id: The character_id of this DestinyInsertPlugsActionRequest. # noqa: E501\n :type: int\n \"\"\"\n\n self._character_id = character_id\n\n @property\n def membership_type(self):\n \"\"\"Gets the membership_type of this DestinyInsertPlugsActionRequest. # noqa: E501\n\n\n :return: The membership_type of this DestinyInsertPlugsActionRequest. # noqa: E501\n :rtype: int\n \"\"\"\n return self._membership_type\n\n @membership_type.setter\n def membership_type(self, membership_type):\n \"\"\"Sets the membership_type of this DestinyInsertPlugsActionRequest.\n\n\n :param membership_type: The membership_type of this DestinyInsertPlugsActionRequest. # noqa: E501\n :type: int\n \"\"\"\n\n self._membership_type = membership_type\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DestinyInsertPlugsActionRequest):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"bungie_sdk_python/Model/Destiny/Requests/Actions/destiny_insert_plugs_action_request.py","file_name":"destiny_insert_plugs_action_request.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"487899748","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef evaluateLabels(y, ypred, visualize=True):\n\n classLabels = np.unique(y)\n conf = np.zeros((len(classLabels), len(classLabels)))\n for tc in xrange(len(classLabels)):\n for pc in xrange(len(classLabels)):\n conf[tc, pc] = np.sum(np.logical_and(y==classLabels[tc], \n ypred==classLabels[pc]).astype(float))\n \n acc = np.sum(np.diag(conf))/y.shape[0]\n\n if visualize:\n plt.figure()\n plt.imshow(conf, cmap='gray')\n plt.ylabel('true labels')\n plt.xlabel('predicted labels')\n\n return (acc, conf)\n","sub_path":"mini-project-5/code/evaluateLabels.py","file_name":"evaluateLabels.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"8699913","text":"import os.path\nimport sys\nimport json\nimport pyglet\nfrom gtts import gTTS\nfrom pint import UnitRegistry \nfrom pprint import PrettyPrinter\nimport speech_recognition as sr\n\np = PrettyPrinter(indent=4)\nureg = UnitRegistry()\nQ = ureg.Quantity\n\ntry:\n import apiai\nexcept ImportError:\n sys.path.append(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)\n )\n import apiai\n\nCLIENT_ACCESS_TOKEN = 'c118e7572df94b9da10d399f29c4ee36' #the integrated one\n\nr = sr.Recognizer()\nwith sr.Microphone() as source:\n print(\"Say something!\")\n audio = r.listen(source)\n speech = r.recognize_google(audio)\n \nprint(\"You said :\"+ speech)\n\n\n\ndef main():\n ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)\n\n request = ai.text_request()\n\n request.lang = 'en' # optional, default value equal 'en'\n\n request.session_id = \"\"\n\n request.query = speech\n response = request.getresponse()\n\n answer = json.loads(response.read())[\"result\"][\"contexts\"][0][\"parameters\"]\n number = float(answer[\"amount\"])\n '''\n p.pprint(answer)\n print(type(number))\n print(type(answer[\"amount\"]))\n print(answer[\"unit-from.original\"])\n print(answer[\"unit-to.original\"])\n '''\n #a = Q(10,'kilogram')\n #print(a.to('lb'))\n\n a = Q(number,answer[\"unit-from.original\"])\n result = a.to(answer[\"unit-to.original\"])\n print(result)\n\n\n tts = gTTS(text=str(result), lang=\"en\")\n tts.save(\"hello.mp3\")\n ppath = os.path.abspath(\"hello.mp3\")\n music = pyglet.resource.media(\"hello.mp3\")\n music.play()\n pyglet.app.run()\n os.remove(\"hello.mp3\") #remove temperory file\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"unitConversions.py","file_name":"unitConversions.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"189110654","text":"def Form_Show(sender, packet):\r\n uipart = sender.GetUIPartByName('GLReport')\r\n uipart.Edit()\r\n uipart.LastNumber = 1\r\n\r\ndef ValidateNumber (Column, bound):\r\n master = Column.Master\r\n Column.First()\r\n while not Column.Eof:\r\n column_no = Column.column_no\r\n if column_no > bound:\r\n Column.Edit()\r\n Column.column_no = column_no - 1\r\n Column.Next()\r\n\r\ndef Column_AfterNewRecord(Sender):\r\n master = Sender.Master\r\n last_number = master.LastNumber \r\n Sender.column_no = last_number\r\n\r\ndef Column_BeforeDelete(Sender):\r\n ValidateNumber(Sender, Sender.column_no)\r\n\r\ndef Column_AfterDelete(Sender):\r\n master = Sender.Master\r\n last_number = master.LastNumber\r\n master.LastNumber = last_number - 1\r\n\r\ndef Column_BeforePost (Sender):\r\n master = Sender.Master\r\n master.uistate = Sender.state\r\n\t\r\ndef Column_AfterPost(Sender):\r\n master = Sender.Master\r\n if master.uistate == 3:\r\n last_number = master.LastNumber \r\n master.LastNumber = last_number + 1\r\n\r\ndef column_no_period_exit (Sender):\r\n form = Sender.ownerform\r\n glreport = form.GetUIPartByName('GLReport')\r\n \r\n column_no_period = glreport.column_no_period\r\n last_column_no = glreport.LastNumber\r\n if not(column_no_period > 0) or (column_no_period > last_column_no):\r\n glreport.Edit()\r\n glreport.column_no_period = 0\r\n\r\ndef bSave_Click(sender):\r\n form = sender.OwnerForm\r\n glreport = form.GetUIPartByName('GLReport')\r\n\r\n form.CommitBuffer()\r\n form.PostResult()\r\n form.ResetAndClearData()\r\n\r\n glreport.Edit()\r\n glreport.LastNumber = 1\r\n","sub_path":"dialogs/fs_generator/fs01_entry_gl_report_intr.py","file_name":"fs01_entry_gl_report_intr.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"175993884","text":"import difflib\nfrom Bio import SeqIO\nfrom itertools import product\n\n# doesnt work properly on large cases for some reason\ndef _get_overlap(s1, s2):\n s = difflib.SequenceMatcher(None, s1, s2)\n pos_a, pos_b, size = s.find_longest_match(0, len(s1), 0, len(s2)) \n return s1[pos_a:pos_a+size]\n\ndef get_overlap(s1, s2):\n i = len(s2)\n while i > 0:\n if s2[:i] == s1[-i:]:\n return s1[-i:]\n if s2[-i:] == s1[:i]:\n return s1[:i]\n i -= 1\n return \"\"\n\ndef get_edge_list(records):\n res = {}\n for i,j in product(range(len(records)), repeat=2):\n if i == j:\n continue\n s1 = str(records[i].seq)\n s2 = str(records[j].seq)\n ovrlp = get_overlap(s1, s2)\n if len(ovrlp) >= min(len(s1)/2 -2, len(s2)/2 -2):\n if s1.startswith(ovrlp):\n res[f\"{records[j].name}:{records[i].name}\"] = ovrlp\n else:\n res[f\"{records[i].name}:{records[j].name}\"] = ovrlp\n return res\n\ndef get_path(edges):\n starts = {x.split(\":\")[0]:x for x in edges.keys()}\n ends = [x.split(\":\")[1] for x in edges.keys()]\n skew = [x for x in starts.keys() if x not in ends]\n if len(skew) != 1:\n print(f\"Warning: graph is not a straight path! {skew}\")\n skew = skew[0]\n return skew, starts\n\ndef main():\n records = list(SeqIO.parse(\"rosalind_long.txt\", \"fasta\"))\n edges = get_edge_list(records)\n records = {x.name : str(x.seq) for x in records}\n start, path = get_path(edges)\n result = records[start]\n while True:\n edge = path.get(start, False)\n if not edge:\n break\n next = edge.split(\":\")[1]\n bridge = edges[edge]\n result += records[next].replace(bridge, \"\")\n start = next\n with open(\"out.txt\", \"w\") as o:\n print(result, file=o)\n\nif __name__ == \"__main__\":\n main()","sub_path":"Bioinformatics Stronghold/43_long.py","file_name":"43_long.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"324761240","text":"import socket\nimport tqdm\nimport os\n\n#https://www.thepythoncode.com/article/send-receive-files-using-sockets-python\n#From the Python Code: I've initialized some parameters we gonna use, notice that I've used \"0.0.0.0\" as the server IP address, this means all IPv4 addresses on the local machine. You may wonder, why we don't just use our local IP address or \"localhost\" or \"127.0.0.1\" ? Well, if the server has two IP addresses, let's say \"192.168.1.101\" on a network, and \"10.0.1.1\" on another, and the server listens on \"0.0.0.0\", it will be reachable at both of those IPs.\n# device's IP address\nSERVER_HOST = '0.0.0.0'\nSERVER_PORT = 5000\n# receive 4096 bytes each time\nBUFFER_SIZE = 4096\nSEPARATOR = ''\n\n# create the server socket\n# TCP socket\ns = socket.socket()\n\n# bind the socket to our local address\ns.bind((SERVER_HOST, SERVER_PORT))\n\n# enabling our server to accept connections\n# 5 here is the number of unaccepted connections that\n# the system will allow before refusing new connections\ns.listen(5)\n#print('[*] Listening as, ' + string(SERVER_HOST) + \",\" + stringString(SERVER_PORT))\n#print(\"[*] Listening as {{SERVER_HOST}}:{{SERVER_PORT}}\")\nprint(\"[*] Listening ip/port\",(SERVER_HOST,SERVER_PORT))\n#print(\"[*] Listening port\",SERVER_PORT)\n\n# accept connection if there is any\nclient_socket, address = s.accept() \n# if below code is executed, that means the sender is connected\nprint('[+]connected.',address)\n\n# receive the file infos\n# receive using client socket, not server socket\nreceived = client_socket.recv(BUFFER_SIZE).decode()\nfilename, filesize = received.split(SEPARATOR)\n# remove absolute path if there is\nfilename = os.path.basename(filename)\n# convert to integer\nfilesize = int(filesize)\n\n# start receiving the file from the socket\n# and writing to the file stream\nprogress = tqdm.tqdm(range(filesize), filename, unit='B', unit_scale=True, unit_divisor=1024)\nwith open(filename, 'wb') as f:\n while True:\n # read 1024 bytes from the socket (receive)\n bytes_read = client_socket.recv(BUFFER_SIZE)\n if not bytes_read: \n # nothing is received\n # file transmitting is done\n break\n # write to the file the bytes we just received\n f.write(bytes_read)\n # update the progress bar\n progress.update(len(bytes_read))\n\n# close the client socket\nclient_socket.close()\n# close the server socket\ns.close()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"sccsServerLocalNetworkPythonGear-sendingOnlyWIP/sccsServer1.py","file_name":"sccsServer1.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"372090428","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nimport tweepy\nimport os\nimport datetime\n\n\nclass Command(BaseCommand):\n\n\thelp = 'Retweets from important twitter handles.'\n\n\tdef handle(self, *args, **options):\n\t\tauth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)\n\t\tauth.set_access_token(settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_SECRET_TOKEN)\n\n\t\tapi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\n\t\tprevious_tweet_time = None\n\t\ttwitter_handles = [\n\t\t\t\"UP_ESC\", \n\t\t\t\"upparser\", \n\t\t\t\"UPCAPES\", \n\t\t\t\"upcs\"\n\t\t]\n\n\t\tbanned_twitter_handles = [\n\t\t\t\"ce_reps\",\n\t\t\t\"me_reps\", \n\t\t\t\"GEreps\",\n\t\t\t\"MMMreps\",\n\t\t\t\"iereps\",\n\t\t\t\"chereps\",\n\t\t\t\"eeerepsofficial\",\n\t\t\t\"upacm\", \n\t\t\t\"UPCURSOR\",\n\t\t\t\"thecssummit\",\n\t\t\t\"officialupcsi\",\n\t\t\t\"ph_sentinel\",\n\t\t\t\"ICEupdiliman\",\n\t\t\t\"official_DMMME\",\n\t\t\t\"upeeei\",\n\t\t\t\"pjnalzaro\"\n\t\t]\n\n\t\tfor account_name in twitter_handles:\n\t\t\tstatuses = []\n\t\t\tfor status in tweepy.Cursor(api.user_timeline, id=account_name, user_id=account_name).items(8):\n\t\t\t\tstatuses.append(status.id)\n\t\t\tstatuses.reverse()\n\n\t\t\t# Reversing so that earliest tweet is evaluated first.\n\t\t\tfor index in range(0, len(statuses)):\n\t\t\t\tstatus = api.get_status(statuses[index], tweet_mode=\"extended\")\n\n\t\t\t\t# First tweet buffer is just for reference of previous tweet time.\n\t\t\t\tif index > 0:\n\t\t\t\t\tretweeted = False\n\t\t\t\t\tlacks_time = False\n\t\t\t\t\tis_reply = bool(status.in_reply_to_status_id)\n\n\t\t\t\t\t #If tweet date is > 10 days from now\n\t\t\t\t\ttoo_old = ((datetime.datetime.now() - status.created_at).days > 10)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# If the source retweet is already retweeted.\n\t\t\t\t\t\tif (status.retweeted_status.retweeted):\n\t\t\t\t\t\t\tretweeted = True\n\t\t\t\t\t\t# If not yet retweeted, ignore tweet if retweet source is self or banned accounts.\n\t\t\t\t\t\telif (status.retweeted_status.user.screen_name == api.me().screen_name) or (status.retweeted_status.user.screen_name in banned_twitter_handles):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\texcept AttributeError: # Not a Retweet from Page\n\t\t\t\t\t\tif (status.retweeted):\n\t\t\t\t\t\t\tretweeted = True\n\n\t\t\t\t\t# Checking if time between tweets are at least 90 minutes.\n\t\t\t\t\t# This is to avoid live tweets being retweeted.\n\t\t\t\t\tif previous_tweet_time:\n\t\t\t\t\t\telapsed_minutes = (status.created_at - previous_tweet_time).total_seconds()//60\n\t\t\t\t\t\tif elapsed_minutes < 90:\n\t\t\t\t\t\t\tlacks_time = True \n\n\t\t\t\t\tif not (retweeted or is_reply or lacks_time or too_old):\n\t\t\t\t\t\tstatus.retweet()\n\n\t\t\t\tprevious_tweet_time = status.created_at\n\n\t\tself.stdout.write(self.style.SUCCESS('Retweeting task complete.'))\n\t\treturn\n","sub_path":"main/management/commands/twitter_retweet.py","file_name":"twitter_retweet.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"650918481","text":"import sys\nprint(\"This program calculates the geometric and arithmetic mean of a set of \"\n \"positive numbers\\n\")\nno_of_entries = int(input(\"How many numbers do you want to input? \"))\ncount = 0\nnum_list = []\nproduct = 1\nwhile count < no_of_entries:\n num_inputs = eval(input(\"Enter a number: \"))\n if num_inputs < 0:\n print(\"Enter a positive number!\")\n sys.exit()\n else:\n num_list.append(num_inputs)\n product *= num_inputs\n addition = sum(num_list)\n arith_mean = addition/no_of_entries\n geo_mean = product ** (1/no_of_entries)\n count += 1\n\nprint(\"The arithmetic mean is\", arith_mean)\nprint(\"The geometric mean is\", geo_mean)\n","sub_path":"Geo-arith-mean.py","file_name":"Geo-arith-mean.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"262635309","text":"import json\r\n\r\nimport tornado.web\r\nimport aiohttp\r\n\r\nfrom scripts.base_handler import BaseHandler\r\nimport config\r\n\r\n\r\nclass DiscordOauth2LoginHandler(BaseHandler):\r\n\tasync def get(self):\r\n\t\ttry:\r\n\t\t\tself.get_query_argument(\"code\")\r\n\t\texcept tornado.web.MissingArgumentError:\r\n\t\t\tself.set_status(400)\r\n\t\t\tself.write(\"code argument is missing\")\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\r\n\r\n\t\tif self.get_secure_cookie(\"token\") is None:\r\n\t\t\tasync with aiohttp.ClientSession() as session:\r\n\r\n\t\t\t\tdata = {\r\n\t\t\t\t\t'client_id': config.client_id,\r\n\t\t\t\t\t'client_secret': config.client_secret,\r\n\t\t\t\t\t'grant_type': 'authorization_code',\r\n\t\t\t\t\t'code': self.get_query_argument('code'),\r\n\t\t\t\t\t'redirect_uri': config.redirect_uri,\r\n\t\t\t\t\t'scope': 'identify',\r\n\t\t\t\t}\r\n\t\t\t\tasync with session.post(\"https://discordapp.com/api/v6/oauth2/token\", data=data) as response:\r\n\t\t\t\t\tif response.status == 200:\r\n\t\t\t\t\t\tjson_response = await response.json()\r\n\t\t\t\t\t\taccess_token = json_response[\"access_token\"]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.set_status(400)\r\n\t\t\t\t\t\tself.write(await response.read())\r\n\t\t\t\t\t\tself.finish()\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\theaders = {\"Authorization\": f\"Bearer {access_token}\"}\r\n\t\t\t\tasync with session.get(\"https://discordapp.com/api/users/@me\", headers=headers) as response:\r\n\t\t\t\t\tif response.status == 200:\r\n\t\t\t\t\t\tjson_response = await response.json()\r\n\t\t\t\t\t\tuser_id = int(json_response[\"id\"])\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.set_status(400)\r\n\t\t\t\t\t\tself.write(await response.read())\r\n\t\t\t\t\t\tself.finish()\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\tguild = self.bot.get_guild(config.guild_id)\r\n\t\t\tif guild.get_member(user_id) is None:\r\n\t\t\t\tself.set_status(400)\r\n\t\t\t\tself.set_header('Content-Type', 'text/html')\r\n\t\t\t\tself.write(\"You must be in the MDL guild\")\r\n\t\t\t\tself.finish()\r\n\t\t\t\treturn\r\n\r\n\t\t\tself.set_secure_cookie(\"token\", str(user_id))\r\n\r\n\t\t# redirect\r\n\t\ttry:\r\n\t\t\tstate = self.get_query_argument(\"state\")\r\n\t\t\tstate_dict = json.loads(state)\r\n\t\t\tif \"from\" in state_dict:\r\n\t\t\t\tself.redirect(state_dict[\"from\"])\r\n\r\n\t\texcept tornado.web.MissingArgumentError:\r\n\t\t\tself.set_status(200)\r\n\t\t\tself.write(\"Unable to redirect\")\r\n\t\t\tself.finish()\r\n\r\n\t\texcept json.decoder.JSONDecodeError:\r\n\t\t\tself.set_status(200)\r\n\t\t\tself.write(\"Unable to redirect\")\r\n\t\t\tself.finish()\r\n","sub_path":"backend/routes/discord_oauth2_login.py","file_name":"discord_oauth2_login.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"100454005","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom filenames import *\nfrom public import *\nfrom tolist import *\n\n@public\ndef pycfiles(path=None,pyc=True,shebang=True,submodules=True,relpath=False,include=\"*.pyc\",exclude=None,mindepth=None,maxdepth=None,sorted=True):\n include = tolist(include)+[\"*.py\"]\n return filenames(\n path = path,\n git = False, # exclude .git\n submodules=submodules,\n gitignore=gitignore,\n relpath = relpath,\n include=include,\n exclude=exclude,\n mindepth=mindepth,\n maxdepth=maxdepth,\n sorted=sorted\n )\n\n\n","sub_path":"py_modules/pycfiles.py","file_name":"pycfiles.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"322498127","text":"from rest_framework import serializers\nfrom djoser.serializers import UserCreateSerializer\nfrom drf_extra_fields.fields import Base64ImageField\nfrom . import models\n\n\nclass UserSerializer(UserCreateSerializer):\n imagen = Base64ImageField(default='imagenesUsuarios/usuario.png')\n\n class Meta(UserCreateSerializer.Meta):\n model = models.User\n fields = (\n 'id',\n 'password',\n 'username',\n 'first_name',\n 'last_name',\n 'email',\n 'saldo',\n 'imagen',\n )\n\n\nclass NotificacionSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Notificacion\n fields = '__all__'\n\n\nclass IdeaSerializer(serializers.ModelSerializer):\n imagen = Base64ImageField()\n\n class Meta:\n model = models.Idea\n fields = (\n 'id',\n 'nombre',\n 'descripcion',\n 'monto_objetivo',\n 'monto_actual',\n 'intereses',\n 'fecha_publicada',\n 'fecha_limite',\n 'fecha_reembolso',\n 'estado', #Poner como read only y agregar campo is_activa por separado\n 'imagen',\n 'usuario',\n 'categoria',\n 'imagenUsuario',\n )\n read_only_fields = ['monto_actual','imagenUsuario','usuario']\n\n def validate(self, data):\n instance = models.Idea(**data)\n instance.clean()\n return data\n\n\nclass CategoriaSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Categoria\n fields = (\n 'id',\n 'nombre'\n )\n\n\nclass InversionSerializer(serializers.ModelSerializer):\n reembolso = serializers.ReadOnlyField()\n estadoIdea = serializers.ReadOnlyField()\n usuario = serializers.ReadOnlyField(source='usuario.username')\n\n class Meta:\n model = models.Inversion\n fields = (\n 'id',\n 'fecha_inversion',\n 'monto_invertido',\n 'reembolso',\n 'usuario',\n 'idea',\n 'estadoIdea'\n ) \n\n def validate(self, data):\n instance = models.Inversion(**data)\n instance.clean()\n return data","sub_path":"backend/apps/inversion/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"43479596","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport operator\nsys.path.append('/Users/bigticket0501/Developer/PyMOR/code/plot_helpers/')\nimport setup\nimport reader\nimport checker\n\nsetup.style(1)\ncolors = setup.color(0)\nsetup.text()\n\nprint(\"---------------------------------------------\")\nprint(\"This is the name of the program:\", sys.argv[0])\nprint(\"Argument List:\", str(sys.argv))\nos.chdir(str(sys.argv[1]))\nmodel = str(sys.argv[2])\nN = str(sys.argv[3])\nT0 = int(sys.argv[4])\nsc = str(sys.argv[5])\nmode = str(sys.argv[6])\nprint(\"---------------------------------------------\")\n\ntarget_dir = '/vel_dual_norm/'\nsetup.checkdir(target_dir)\n\nsearch_dir = './'+model+'_info/vel_dual_norm'\nif model == 'all':\n root, filenames = setup.gtfpath(search_dir, '^.*_'+N+'nb_.*$')\nelse:\n root, filenames = setup.gtfpath(search_dir, '^.*_'+N+'nb_ic_h10_(?!.*-90|.*-80|.*-70).*$')\nif T0 == 1:\n files_dict = setup.create_dict(filenames, '^.*_ic_h10_(-?\\d+)_.*$')\nelif T0 >= 1:\n files_dict = setup.create_dict(filenames, '^.*_zero_h10_(-?\\d+)_.*$')\ndict_final = sorted(files_dict.items(), key=operator.itemgetter(0))\n\nerri = []\nangles = []\nfor angle, fnames in dict_final:\n for fname in fnames:\n data = reader.reader(fname)\n if not data:\n data = 1e8\n dual_norm = np.array(data).astype(np.float64)\n erri.append(float(dual_norm))\n angles.append(int(angle)+90)\n\nanchor = setup.find_anchor()\ndata = np.column_stack((angles, erri))\ndata = data[data[:, 0].argsort()]\nylims = [1e-4, 1e-1]\n\n# scaled the dual norm with argv[4]\nif sc == 'rom':\n # scaled with the rom_norm at training points\n sc_dir = './rom_norm/'\n scale = np.loadtxt(sc_dir+'rom_u_h1norm_N'+N+'_'+mode+'.dat')\n ylb = r'$\\frac{\\triangle_u(\\theta_g)}{\\|\\langle \\bf{u}_{ROM}(\\theta_g) \\rangle_g\\|_{H^1}}$'\nelif sc == 'fom':\n # scaled with the fom_norm at anchor point\n sc_dir = './fom_norm/'\n angles = np.loadtxt(sc_dir+'angle.dat')\n idx = np.where(angles == int(anchor))\n print('Scaled with fom norm anchored at:', angles[idx])\n tmp = np.loadtxt(sc_dir+'fom_u_h1norm.dat')\n scale = tmp[idx]\n ylb = r'$\\frac{\\triangle_u(\\theta_g)}{\\|\\langle \\bf{u}_{FOM}(\\theta^*_g) \\rangle_g\\|_{H^1}}$'\nelif sc == 'romabserr':\n # scaled with the rom_abserr, the result is so called the effectivity\n sc_dir = './vel_mabserr/'\n scale = np.loadtxt(sc_dir+'vel_rom_abserr_N'+N+'_'+mode+'.dat')\n print(scale)\n ylb = r'$\\frac{\\triangle_u(\\theta_g)}{\\|\\langle \\bf{u}_{FOM}-u_{ROM} \\rangle_g\\|_{H^1}}$'\nelif sc == 'eta':\n # scaled with the effectivity\n sc_dir = './vel_mabserr/'\n abserr = np.loadtxt(sc_dir+'vel_rom_abserr_N'+N+'_'+mode+'.dat')\n angle = np.loadtxt('./vel_dual_norm/angle_list_'+mode+'.dat')\n dual = np.loadtxt('./vel_dual_norm/vel_erri_N'+N+'_'+mode+'.dat')\n anchor = setup.find_anchor()\n idx = np.where(angle == int(anchor))\n print(idx)\n ylb = r'$\\frac{\\triangle_u(\\theta_g)}{\\eta_u(\\theta^*_g)}$'\n effect = dual/abserr\n scale = effect[idx]\n print(scale)\n ylims = [1e-1, 1e3]\nelif sc == 'eta_rom':\n # scaled with the rom_abserr\n # This is essentially the effectivity\n sc_dir = './vel_mabserr/'\n abserr = np.loadtxt(sc_dir+'vel_rom_abserr_N'+N+'_'+mode+'.dat')\n angle = np.loadtxt('./vel_dual_norm/angle_list_'+mode+'.dat')\n dual = np.loadtxt('./vel_dual_norm/vel_erri_N'+N+'_'+mode+'.dat')\n anchor = setup.find_anchor()\n idx = np.where(angle == int(anchor))\n print(idx)\n ylb = r'$\\frac{\\triangle_u(\\theta_g)\\|\\langle \\bf{u}_{ROM}(\\theta^*_g) \\rangle_g\\|}{\\eta_u(\\theta^*_g)}$'\n effect = dual/abserr\n sc_dir = './rom_norm/'\n rom_norm = np.loadtxt(sc_dir+'rom_u_h1norm_N'+N+'_'+mode+'.dat')\n print(abserr)\n print(dual)\n print(effect)\n print(rom_norm)\n scale = effect[idx]*rom_norm[idx]\n print(scale)\n ylims = [1e-2, 1e3]\nelif sc == 'eta_rom_all':\n # scaled with the rom_abserr\n # This is essentially the effectivity\n sc_dir = './vel_mabserr/'\n abserr = np.loadtxt(sc_dir+'vel_rom_abserr_N'+N+'_'+mode+'.dat')\n angle = np.loadtxt('./vel_dual_norm/angle_list_'+mode+'.dat')\n dual = np.loadtxt('./vel_dual_norm/vel_erri_N'+N+'_'+mode+'.dat')\n anchor = setup.find_anchor()\n idx = np.where(angle == int(anchor))\n print(idx)\n ylb = r'$\\frac{\\triangle_u(\\theta_g)\\|\\langle \\bf{u}_{ROM}(\\theta_g) \\rangle_g\\|}{\\eta_u(\\theta^*_g)}$'\n effect = dual/abserr\n sc_dir = './rom_norm/'\n rom_norm = np.loadtxt(sc_dir+'rom_u_h1norm_N'+N+'_'+mode+'.dat')\n print(abserr)\n print(dual)\n print(effect)\n print(rom_norm)\n scale = effect[idx]*rom_norm\n print(scale)\n ylims = [1e-2, 1e3]\nelif sc == 'domain':\n # scaled with the domain length\n angle = np.loadtxt('./vel_dual_norm/angle_list_'+mode+'.dat')\n dual = np.loadtxt('./vel_dual_norm/vel_erri_N'+N+'_'+mode+'.dat')\n anchor = setup.find_anchor()\n idx = np.where(angle == int(anchor))\n ylb = r'$\\frac{\\triangle_u(\\theta_g)}{|V|^{1/2}}$'\n domain = 40*np.sin(angle*np.pi/180)\n scale = np.sqrt(domain)\n ylims = [1e-2, 1e3]\n\n\n# scaled the erri\ndata[:, 1] = data[:, 1]/scale\nprint(data[:, 1])\n\nsolver = checker.rom_checker(fname, '^.*_(.*)rom_.*$')\n\nfig, ax = plt.subplots(1, tight_layout=True)\nplot_params = {'c': 'k', 'marker': 'o', 'mfc': 'None',\n 'label': solver+' with '+r'$N='+N+'$'}\n#ax.set(ylabel=ylb, xlabel=r'$\\theta_g$', ylim=ylims,\n# xticks=np.linspace(0, 180, 19, dtype=int))\nax.set(ylabel=ylb, xlabel=r'$\\theta_g$',\n xticks=np.linspace(0, 180, 19, dtype=int))\n\nax.set_xticklabels(ax.get_xticks(), rotation=45)\nax.plot(data[:, 0], data[:, 1], **plot_params)\nax.legend(loc=0)\n\nprint(\"---------------------------------------------\")\nfig.savefig('.'+target_dir+'vel_dual_norm_N'+N+'_sc_'+sc+'.png')\nnp.savetxt('.'+target_dir+'vel_erri_N'+N+'_sc_'+sc+'.dat', data[:, 1])\nprint(\"---------------------------------------------\")\n\n","sub_path":"postprocessing/vel_dual_norm_scaled_wparam.py","file_name":"vel_dual_norm_scaled_wparam.py","file_ext":"py","file_size_in_byte":5939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"217914455","text":"import random\n\na = []\nfor i in range(20):\n a.append(random.randint(1,200))\n\ndef select_1(l):\n length = len(l)\n for i in range(length - 1):\n min = i\n max = length - 1\n for j in range(i+1,length):\n if l[j] < l[min]:\n min = j\n if l[j] > l[max]:\n max = j\n l[min], l[i] = l[i], l[min]\n l[max], l[length-1] = l[length-1], l[max]\n\n\nselect_1(a)\nprint(a)\n","sub_path":"practice/practice_4/select_1.py","file_name":"select_1.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"325429945","text":"import imp\nimport os\nimport sys\nfrom mock import patch\nfrom gppylib.test.unit.gp_unittest import GpTestCase,run_tests\n\nclass GpCheckPerf(GpTestCase):\n def setUp(self):\n gpcheckcat_file = os.path.abspath(os.path.dirname(__file__) + \"/../../../gpcheckperf\")\n self.subject = imp.load_source('gpcheckperf', gpcheckcat_file)\n\n def tearDown(self):\n super(GpCheckPerf, self).tearDown()\n\n @patch('gpcheckperf.getPlatform', return_value='darwin')\n @patch('gpcheckperf.run')\n def test_get_memory_on_darwin(self, mock_run, mock_get_platform):\n mock_run.return_value = [1, 'hw.physmem: 1234']\n actual_result = self.subject.getMemory()\n self.assertEquals(actual_result, None)\n\n mock_run.return_value = [0, 'hw.physmem: 0']\n actual_result = self.subject.getMemory()\n self.assertEquals(actual_result, None)\n\n mock_run.return_value = [0, 'hw.physmem: 1234']\n actual_result = self.subject.getMemory()\n self.assertEquals(actual_result, 1234)\n\n @patch('gpcheckperf.getPlatform', return_value='linux')\n @patch('gpcheckperf.run')\n def test_get_memory_on_linux(self, mock_run, mock_get_platform):\n mock_run.return_value = [1, 'MemTotal: 10 kB']\n actual_result = self.subject.getMemory()\n self.assertEquals(actual_result, None)\n\n mock_run.return_value = [0, 'MemTotal: 0 kB']\n actual_result = self.subject.getMemory()\n self.assertEquals(actual_result, None)\n\n mock_run.return_value = [0, 'MemTotal: 10 kB']\n actual_result = self.subject.getMemory()\n self.assertEquals(actual_result, 10240)\n\n @patch('gpcheckperf.getPlatform', return_value='abc')\n def test_get_memory_on_invalid_platform(self, mock_get_platform):\n actual_result = self.subject.getMemory()\n self.assertEquals(actual_result, None)\n\n @patch('gpcheckperf.getMemory', return_value=None)\n def test_parseCommandLine_when_get_memory_fails(self, mock_get_memory):\n sys.argv = [\"gpcheckperf\", \"-h\", \"locahost\", \"-r\", \"d\", \"-d\", \"/tmp\"]\n with self.assertRaises(SystemExit) as e:\n self.subject.parseCommandLine()\n\n self.assertEqual(e.exception.code, '[Error] could not get system memory size. Instead, you can use the -S option to provide the file size value')\n\n @patch('gpcheckperf.getMemory', return_value=123)\n def test_parseCommandLine_when_get_memory_succeeds(self, mock_get_memory):\n sys.argv = [\"gpcheckperf\", \"-h\", \"locahost\", \"-r\", \"d\", \"-d\", \"/tmp\"]\n self.subject.parseCommandLine()\n self.assertEqual(self.subject.GV.opt['-S'], 246.0)\n\n @patch('gppylib.commands.unix.isScpEnabled', return_value=False)\n @patch('gpcheckperf.gpsync', return_value=(False, None))\n @patch('gpcheckperf.getHostList', return_value=['localhost'])\n def test_scp_not_enabled(self, mock_hostlist, mock_gpsync, mock_isScpEnabled):\n src = '%s/lib/multidd' % os.path.abspath(os.path.dirname(__file__) + \"/../../../\")\n target = '/tmp/gpcheckperf_$USER/multidd'\n sys.argv = [\"gpcheckperf\", \"-h\", \"locahost\", \"-r\", \"d\", \"-d\", \"/tmp\"]\n\n self.subject.main()\n mock_gpsync.assert_called_with(src, target)\n\n @patch('gppylib.commands.unix.isScpEnabled', return_value=True)\n @patch('gpcheckperf.gpscp', return_value=(False, None))\n @patch('gpcheckperf.getHostList', return_value=['localhost'])\n def test_scp_enabled(self, mock_hostlist, mock_gpscp, mock_isScpEnabled):\n src = '%s/lib/multidd' % os.path.abspath(os.path.dirname(__file__) + \"/../../../\")\n target = '=:/tmp/gpcheckperf_$USER/multidd'\n sys.argv = [\"gpcheckperf\", \"-h\", \"locahost\", \"-r\", \"d\", \"-d\", \"/tmp\"]\n\n self.subject.main()\n mock_gpscp.assert_called_with(src, target)\n\n def test_gpsync_failed_to_copy(self):\n src = '%s/lib/multidd' % os.path.abspath(os.path.dirname(__file__) + \"/../../../\")\n target = '=:tmp/'\n self.subject.GV.opt['-h'] = ['localhost', \"invalid_host\"]\n with self.assertRaises(SystemExit) as e:\n self.subject.gpsync(src, target)\n self.assertIn('[Error] command failed for host:invalid_host', e.exception.code)\n\nif __name__ == '__main__':\n run_tests()\n","sub_path":"gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckperf.py","file_name":"test_unit_gpcheckperf.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"254864236","text":"#Calulator_pwiese.Saechao\n#Jarrod Saechao\n#Nov 7 2017\n#This program is a calculator\n#Python 3.6.2\n\ndef main():\n ops = operation()\n num_1 = get_num()\n num_2 = get_num() \n\n if ops == '+':\n answer = addition(num_1, num_2)\n\n elif ops == '*':\n answer = multiplication(num_1, num_2)\n\n elif ops == '-':\n answer = subtraction(num_1, num_2)\n\n elif ops == '/':\n answer = division(num_1, num_2)\n \n else:\n print(\"invalid operator\")\n return\n show_answer(answer)\n\ndef operation():\n op = input('Pick an operation ')\n return op\n\ndef get_num():\n banana = int(input(\"Gimme a number \"))\n return banana\n\n\ndef addition(king, queen):\n answer = king + queen\n return answer\n\n\ndef multiplication(apple,banana):\n answer = apple * banana\n return answer\n\ndef subtraction(num1,num2):\n potato = num1 + num2\n return potato\n\ndef division(jes,pyt):\n if pyt == 0:\n print('You can not do that')\n return 0\n money = jes / pyt\n return money\n\ndef show_answer(answer):\n print(answer,'is the answer')\nmain()\n\n \n\n \n\n \n \n\n \n","sub_path":"calculatorSaechaoJ.py","file_name":"calculatorSaechaoJ.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"1647768","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\nimport logging.config\nfrom srgutil.interfaces import IMozLogging\n\n\nclass Logging(IMozLogging):\n _log_config = {\n # Note that the formatters.json.logger_name must match\n # loggers. key\n 'version': 1,\n 'formatters': {\n 'json': {\n '()': 'dockerflow.logging.JsonLogFormatter',\n 'logger_name': 'srg'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'json'\n },\n },\n 'loggers': {\n 'srg': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n },\n }\n }\n\n def __init__(self, ctx):\n self._ctx = ctx\n self._logger_prefix = ''\n self._apply_config()\n\n def set_config(self, cfg):\n self._log_config = cfg\n\n def set_prefix(self, prefix):\n self._log_config['formatters']['json']['logger_name'] = prefix\n self._log_config['loggers'][prefix] = {'handlers': ['console'], 'level': 'DEBUG'}\n self._apply_config()\n\n def get_prefix(self):\n return self._logger_prefix\n\n def _apply_config(self):\n self._logger_prefix = self._log_config['formatters']['json']['logger_name']\n logging.config.dictConfig(self._log_config)\n\n def get_logger(self, name):\n return logging.getLogger(\"%s.%s\" % (self._logger_prefix, name))\n","sub_path":"srgutil/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"431960849","text":"def letterCheck(string):\r\n dic = {}\r\n numbers = [0]*10\r\n for l in string:\r\n dic.setdefault(l,0)\r\n dic[l] += 1\r\n\r\n if \"Z\" in dic:\r\n if dic[\"Z\"] > 0:\r\n dic[\"E\"] -= dic[\"Z\"]\r\n dic[\"R\"] -= dic[\"Z\"]\r\n dic[\"O\"] -= dic[\"Z\"]\r\n numbers[0] += dic[\"Z\"]\r\n dic[\"Z\"] = 0\r\n if \"U\" in dic:\r\n if dic[\"U\"] > 0:\r\n dic[\"O\"] -= dic[\"U\"]\r\n dic[\"F\"] -= dic[\"U\"]\r\n dic[\"R\"] -= dic[\"U\"]\r\n numbers[4] += dic[\"U\"]\r\n dic[\"U\"] = 0\r\n if \"X\" in dic:\r\n if dic[\"X\"] > 0:\r\n dic[\"I\"] -= dic[\"X\"]\r\n dic[\"S\"] -= dic[\"X\"]\r\n numbers[6] += dic[\"X\"]\r\n dic[\"X\"] = 0\r\n if \"G\" in dic:\r\n if dic[\"G\"] > 0:\r\n dic[\"E\"] -= dic[\"G\"]\r\n dic[\"I\"] -= dic[\"G\"]\r\n dic[\"T\"] -= dic[\"G\"]\r\n dic[\"H\"] -= dic[\"G\"]\r\n numbers[8] += dic[\"G\"]\r\n dic[\"G\"] = 0\r\n if \"W\" in dic:\r\n if dic[\"W\"] > 0:\r\n dic[\"T\"] -= dic[\"W\"]\r\n dic[\"O\"] -= dic[\"W\"]\r\n numbers[2] += dic[\"W\"]\r\n dic[\"W\"] = 0\r\n if \"S\" in dic:\r\n if dic[\"S\"] > 0:\r\n dic[\"E\"] -= dic[\"S\"]*2\r\n dic[\"V\"] -= dic[\"S\"]\r\n dic[\"N\"] -= dic[\"S\"]\r\n numbers[7] += dic[\"S\"]\r\n dic[\"S\"] = 0\r\n if \"V\" in dic:\r\n if dic[\"V\"] > 0:\r\n dic[\"E\"] -= dic[\"V\"]\r\n dic[\"I\"] -= dic[\"V\"]\r\n dic[\"F\"] -= dic[\"V\"]\r\n numbers[5] += dic[\"V\"]\r\n dic[\"V\"] = 0\r\n if \"I\" in dic:\r\n if dic[\"I\"] > 0:\r\n dic[\"E\"] -= dic[\"I\"]\r\n dic[\"N\"] -= dic[\"I\"]*2\r\n numbers[9] += dic[\"I\"]\r\n dic[\"I\"] = 0\r\n if \"R\" in dic:\r\n if dic[\"R\"] > 0:\r\n dic[\"E\"] -= dic[\"R\"]*2\r\n dic[\"H\"] -= dic[\"R\"]\r\n dic[\"T\"] -= dic[\"R\"]\r\n numbers[3] += dic[\"R\"]\r\n dic[\"R\"] = 0\r\n if \"O\" in dic:\r\n numbers[1] += dic[\"O\"]\r\n output_str = \"\"\r\n for i in range(0,10):\r\n output_str += str(i)*numbers[i]\r\n return output_str\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n fil = open('input.txt','r')\r\n output = open('output.txt','w')\r\n cases = fil.readline()\r\n print(cases)\r\n for i in range(int(cases)):\r\n string = fil.readline()\r\n output.write(\"Case #\"+str(i+1)+\": \" +letterCheck(string)+\"\\n\")\r\n output.close()\r\n fil.close()\r\nmain()","sub_path":"codes/CodeJamCrawler/16_2_1/EirikKillern/ProblemA.py","file_name":"ProblemA.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"434974607","text":"\"\"\"\nA short and concise description goes here.\n\nAuthor: Shuo Yang\nEmail: imsure95@gmail.com\n\"\"\"\n\n#!/usr/bin/env python3\n\nimport math\n\ntotal_students = 100\ntotal_bars = 49\nopt_dist = []\n\ndef print_optdist( rank ):\n \"\"\"Print out the optimal distribution proposed by the\n student with rank 'rank'.\"\"\"\n print( \"\\nOptimal bar distribution proposed by student with rank {0}:\"\n .format(rank+1) )\n for i in range( rank, total_students ):\n print( \"\\t{0} bars ---> rank {1}\".format(opt_dist[i], i+1) )\n\ndef optimal_bar_distribution( top_rank, num_students, num_bars ):\n \"\"\"The function takes the top rank among students\n and produce the optimal distribution of the total\n number of bars proposed by the top rank student, \n and store the optimal distribution in the global list\n 'distribution'.\"\"\"\n if top_rank == num_students-1: \n # Base case, this is the lowest ranked student of all students,\n # so just fill the table with the total number of bars.\n opt_dist[ top_rank ] = num_bars\n print_optdist( top_rank )\n return\n\n # Initialize the total number of bars distributed\n # to the top ranked student.\n bars2toprank = num_bars\n next_rank = top_rank + 1\n\n # Compute the optimal distribution proposed by the student whose\n # rank is lower than top ranked student by 1.\n optimal_bar_distribution( next_rank, num_students, num_bars )\n\n # Now we have the optimal distribution proposed by\n # the second ranked student, top ranked student can figure out\n # his/her optimal distribution based on this.\n for i in range( next_rank, num_students ):\n if opt_dist[ i ] > 0:\n opt_dist[ i ] = 0\n else: # == 0\n opt_dist[ i ] = 1\n bars2toprank -= 1\n\n opt_dist[ top_rank ] = bars2toprank\n print_optdist( top_rank )\n\n\n## The main function.\ndef main():\n # Initialize the distribution table\n for i in range( 0, total_students ):\n opt_dist.append( 0 )\n\n if total_bars >= math.floor( (total_students+1) / 2 ):\n optimal_bar_distribution( 0, total_students, total_bars )\n else:\n print( \"Need at least {0} more bars!\".\n format(math.floor( (total_students+1) / 2 ) - total_bars) )\n\nif __name__ == '__main__':\n main()\n","sub_path":"study-notes/cs545/questionnaire/puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"483514698","text":"# INCREASING THE TRAINING SET AND DESCREASING THE TESTING SET TO SEE IF THE WE CAN GET 100% ACCURACY WITH OUR MODEL\n# TAKING 99.9985% OF THE DATASET TO TRAIN MY MODEL\n# TAKING 0.0015% OF THE DATASET TO TEST (APPROX 2 PATIENTS WILL BE TESTED OUT OF 700)\n# RESULT : 100% ACCURACY IS ACHIEVED\n\nimport time #Provides various time related functions.\nimport sys #provides functions that interact with interpreter.\nimport pandas as pd #high performance library for data structures.\nimport pylab as pl #matplotlib based interface for plotting.\nimport numpy as np #Math library enables to compute efficiently and effectively.\nimport scipy.optimize as opt #It is a solver for nonlinear problems eg curve fitting.\nfrom sklearn import preprocessing #Provides several utility functions to transform a raw dataset to more suitable representation.\nfrom sklearn.model_selection import train_test_split #Split arrays and matrices to random train and test data set.\nimport matplotlib.pyplot as plt #library used for plotting graphs.\ntimer=2\ncell_char = pd.read_csv(\"cell_samples.csv\") #Read through the csv file line by line.\n\ncell_char.head() #Return results for n rows.\n\nax = cell_char[cell_char['Class'] == 4][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='red', label='malignant'); #For plotting scatterpoints on the scatterplot.\ncell_char[cell_char['Class'] == 2][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='lime', label='benign', ax=ax); #For plotting scatterpoints on the scatterplot.\n\ncell_char.dtypes #Returns data type of each column.\n\ncell_char = cell_char[pd.to_numeric(cell_char['BareNuc'], errors='coerce').notnull()] #take column and convert to numeric and coerce when specified. Does this only when value is not null.\ncell_char['BareNuc'] = cell_char['BareNuc'].astype('int') #Converts the column to int type.\n\ncell_char.dtypes #Returns data type of each column.\n\nfeature_df = cell_char[['Clump', 'UnifSize', 'UnifShape', 'MargAdh', 'SingEpiSize', 'BareNuc', 'BlandChrom', 'NormNucl', 'Mit']] #feature_dataframe is initialised here.\n\nX = np.asarray(feature_df) #feature dataframe is stored as array inside X.\n\n\ncell_char['Class'] = cell_char['Class'].astype('int') #Converts column class to type int.\ny = np.asarray(cell_char['Class']) #class column is stored as array as y.\n\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.0015, random_state=4) #splitting training and testing data set with training being approximately 0.75% of total dataset.\n\nprint(\"\\n\")\ngen = ' ********************* CANCER DETECTION MODEL *********************'\nfor i in gen:\n print (i, end='')\n sys.stdout.flush()\n time.sleep(0.05)\n\nprint(\"\\n\")\n\ngen= ' ******************************************************************'\nfor i in gen:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(0.05)\n\ntime.sleep(timer)\nprint(\"\\n\\n\")\nprint ('Train set:', X_train.shape, y_train.shape) #printing number of training data sets used.\nprint ('Test set :', X_test.shape, y_test.shape) #printing number of testing data sets used.\n\nprint(\"\\n\")\ntime.sleep(1)\nfrom sklearn import svm #provides variety of kernel functions that uses subsets of training points for decision functions.\n\nclf = svm.SVC(gamma='auto',kernel='rbf') #Specifies kernel type to be used in support vector classification.\nclf.fit(X_train, y_train) #training the model using fit method.\npredi = clf.predict(X_test) #learns the link between training and testing data and returns the label for an unlabeled tests.\n\nfrom sklearn.metrics import classification_report, confusion_matrix #for making classification report and generating confusion matrix.\nimport itertools #Functions to create iterators for efficient looping.\n\nprint(\"\\n\")\ndef plot_confusion_matrix(cm, classes, normalize=False,title='Confusion matrix',cmap=plt.cm.Reds): #plots confusion matrix with required colormapping and title.\n\n\n print('Confusion matrix :-')\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap) #plots confusion matrix.\n plt.title(title) # gives title to it.\n plt.colorbar() # Generates colormap bar.\n\n check_p = np.arange(len(classes)) #for setting current location of tick.\n\n plt.xticks(check_p, classes) #plot tick on x axis with label.\n plt.yticks(check_p, classes) #plot tick on y axis with label.\n\n fmt = 'd' #format type integer\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): #looping over data dimensions and creating text annotations.\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout() #automatic adjustment of figure.\n plt.ylabel('True label') #label at y axis.\n plt.xlabel('Predicted label') #label at x axis.\n\ncnf_matrix = confusion_matrix(y_test, predi, labels=[2,4]) # Compute confusion matrix\nnp.set_printoptions(precision=2)\n\nprint (classification_report(y_test, predi)) #print classification report.\n\n\nplt.figure() # Plot non-normalized confusion matrix\n\nplot_confusion_matrix(cnf_matrix, classes=['Benign(2)','Malignant(4)'],normalize= False, title='Confusion matrix') #plot confusion matrix.\nprint(\"\\n\")\nprint('THE WEIGHTED MEAN OF PRECISION AND RECALL IS :-')\n\nfrom sklearn.metrics import f1_score #import f1_score.\nprint(f1_score(y_test, predi, average='weighted'))\nprint(\"\\n\\n\")\naccuracy = f1_score(y_test, predi, average='weighted')*100\nprint(int(accuracy),'OUT OF EVERY 100 CASES IN OUR TEST SET GAVE THE CORRECT RESULTS OF THE DETECTION\\n\\n')\nprint('ACCURACY OF THE CANCER DETECTION MODEL IS :-')\nprint(round(accuracy,2),'%\\n\\n') #print f1_score.\nprint(\"\\n ************************** THANK YOU *******************************\\n\")\ngen= ' ********************************END*********************************\\n'\nfor i in gen:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(0.05)\nplt.show() #output the graphs.\n","sub_path":"cancer_det2.py","file_name":"cancer_det2.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"494733449","text":"from django.shortcuts import render, HttpResponse, render_to_response, HttpResponseRedirect\r\nfrom django.core.urlresolvers import reverse\r\nfrom django import forms\r\nfrom website.forms import MomentForm\r\nimport os\r\n# Create your views here.\r\n\r\n\r\n\r\ndef index(request):\r\n if request.method == 'POST':\r\n file = request.FILES.get('file')\r\n f = open(os.path.join('data', file.name), 'wb')\r\n for chunk in file.chunks():\r\n f.write(chunk)\r\n f.close()\r\n return HttpResponse('OK!')\r\n return render(request, 'index.html')\r\n\r\n\r\ndef disk(request):\r\n if request.method == \"POST\":\r\n uf = UserForm(request.POST, request.FILES)\r\n if uf.is_valid():\r\n return HttpResponse('Upload OK!')\r\n else:\r\n uf = UserForm()\r\n return render_to_response('disk.html', {'uf': uf})\r\n\r\ndef welcome(request):\r\n return HttpResponse('

欢迎你!

')\r\n\r\ndef moment_input(request):\r\n if request.method == 'POST':\r\n form = MomentForm(request.POST)\r\n if form.is_valid():\r\n moment = form.save()\r\n moment.save()\r\n return HttpResponseRedirect(reverse('welcome'))\r\n else:\r\n form = MomentForm()\r\n PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\r\n print(PROJECT_ROOT)\r\n print(form)\r\n return render(request, os.path.join(PROJECT_ROOT, 'templates', 'moment_input.html'),\r\n {'form': form})\r\n","sub_path":"drill/file_upload/website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"95781034","text":"import logging\n\nfrom setuptools import setup, find_packages\n\nreadme_file = 'README.md'\n\ntry:\n import pypandoc\n\n long_description = pypandoc.convert(readme_file, to='rst')\nexcept ImportError:\n logging.warning('pypandoc module not found, long_description will be the raw text instead.')\n with open(readme_file, encoding='utf-8') as fp:\n long_description = fp.read()\n\nsetup(\n name='wxpy',\n version='0.0.6',\n packages=find_packages(),\n package_data={\n '': ['*.md'],\n },\n include_package_data=True,\n install_requires=[\n 'itchat>=1.2.26'\n ],\n url='https://github.com/youfou/wxpy',\n license='Apache 2.0',\n author='Youfou',\n author_email='youfou@qq.com',\n description='微信个人号 API,基于 itchat,告别满屏 dict,更有 Python 范儿',\n long_description=long_description,\n keywords=[\n '微信',\n 'WeChat',\n 'API'\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Operating System :: OS Independent',\n 'Topic :: Communications :: Chat',\n 'Topic :: Utilities',\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"530490732","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import render\r\nimport re\r\nimport urllib.request\r\nimport requests\r\nfrom multiprocessing import Queue\r\n\r\ndef eq(request):\r\n page=requests.get('http://iiees.ac.ir', proxies=urllib.request.getproxies())\r\n content=page.content.decode('utf-8')\r\n location=re.findall(r'
\\s......:\\s(.+Province.+)(.+)  (.+)',content,re.M)\r\n magnitude=re.findall(r'\\s.....:([0-9].[0-9])
',content,re.M)\r\n location='Location: '+str(location[0])\r\n datetime=str(datetime).split()\r\n date=str(datetime[0])\r\n time=str(datetime[1])\r\n date='Date: '+date[3:len(date)-2]\r\n time='Time: '+time[2:len(time)-3]\r\n magnitude='Magnitude: '+str(magnitude[0])\r\n dic={'location':location,\r\n 'date':date,\r\n 'time':time,\r\n 'magnitude':magnitude,}\r\n return render(request, 'eq/index.html', dic)\r\n# Create your views here.\r\n","sub_path":"eq/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"180861860","text":"import pytest\nimport requests\n\nfrom pytest_bdd import scenarios, when, then\n\nANIME_API = 'https://api.jikan.moe/v3'\n\nscenarios('anime.feature', example_converters=dict(number=str, title=str))\n\n\n@pytest.fixture\n@when('the Anime API is queried with \"\"')\ndef anime_response(number):\n params = {'format': 'json'}\n response = requests.get(ANIME_API + '/anime/' + number, params=params)\n return response\n\n\n@then('the response shows title of \"\"')\ndef anime_response_title(anime_response, title):\n assert title == anime_response.json()['title']\n\n\n@then('the response status code is 200')\ndef anime_response_code(anime_response):\n assert anime_response.status_code == 200\n","sub_path":"src/feature/test_anime_names.py","file_name":"test_anime_names.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"457648199","text":"'''\nComputation lab 1, \nexercises NumpyIntro\n\n'''\n\nimport numpy as np \n\n\n# exercise 1\n# define matrix\n\nA = np.array([[3, -1, 4], [1, 5, -9]])\nB = np.array([[2, 6, -5, 3], [5, -8, 9, 7], [9, -3, -2, -3]])\n\n# def function to get matrix multiplication\ndef get_matmul(A,B):\n\n\ty = np.dot(A,B)\n\treturn y\n\n\nprint(get_matmul(A,B))\n\n\n# exercise 2\n\n\n\ndef get_eq1():\n\tC = np.array([[3, 1, 4], [1, 5, 9], [-5, 3, 1]])\n\tD = get_matmul(C,C)\n\teq1 = - get_matmul(D,C) + 9 * D - 15 * C\n\treturn eq1\n\nprint(get_eq1())\n\n\n# exercise 3\n\n\ndef get_triu(x):\n\ttriu = np.triu(x)\n\treturn triu\n\ndef get_tril(x):\n\ttril = np.tril(x)\n\treturn tril\n\n\nM = get_triu(np.ones((7, 7)))\nN = get_tril(np.full((7, 7), -6))+5\n\n\nprint(M)\nprint(N)\n\nMN = get_matmul(M, N)\nMNM = get_matmul(MN,M).astype(np.int64)\n\nprint(MNM)\n\n\n# exercise 4 \nz = np.arange(0, 30, 5)\nx = np.copy(z)\nmask = x > 0\nx[mask] = - x[mask]\nprint(x)\n\n\n# exercise 5\n\nW = np.arange(6).reshape((3,2))\nW1 = W.T\nT = np.full((3,3), 3)\nT1 = np.tril(T)\nJ = np.diag([-2, -2, -2])\nZ1 = np.zeros((3, 3))\nZ2 = np.zeros((2, 2))\nZ3 = np.zeros((2, 3))\nZ4 = np.zeros((3, 2))\n\nW2 = W1.T\nI1 = np.eye(3)\nYY1 = np.hstack((Z1,W2,I1))\nYY2 = np.hstack((W1,Z2,Z3))\nYY3 = np.hstack((T1,Z4,J))\n\nYYfinal = np.vstack((YY1, YY2, YY3))\n\nprint(W1)\nprint(T1)\nprint(J)\nprint(Z1)\nprint(W2)\nprint(YYfinal)\n\n\n\n# exercise 6\n\nH1 = np.arange(9).reshape((3,3))\nv = H1.sum(axis=1)\nvt = v.reshape((3,1))\nH3 = H1 / vt\ncheck = H3.sum(axis=1)\n\nprint(H1)\nprint(vt)\nprint(H3)\nprint(check)\n\n\n# exercise 7\n\ngrid = np.load(\"grid.npy\")\n#print(grid)\n\n# horizontal\ngrid[:,:-3]\ngrid[:,1:-2]\ngrid[:,2:-1]\ngrid[:,3:]\nprint(np.max(grid[:,:-3] * grid[:,1:-2] * grid[:,2:-1] * grid[:,3:]))\n\n\n\n# vertical \ngrid[:-3,:]\ngrid[1:-2,:]\ngrid[2:-1,:] \ngrid[3:,:]\nprint(np.max(grid[:-3,:] * grid[1:-2,:] * grid[2:-1,:] * grid[3:,:]))\n\n# diag 1\ngrid[:-3,3:]\ngrid[1:-2,2:-1]\ngrid[2:-1,1:-2] \ngrid[3:,:-3]\nprint(np.max(grid[:-3,3:]* grid[1:-2,2:-1] * grid[2:-1,1:-2]* grid[3:,:-3]))\n\n# diag 2\n\ngrid[:-3,:-3]\ngrid[1:-2,1:-2]\ngrid[2:-1,2:-1] \ngrid[3:,3:]\nprint(np.max(grid[:-3,:-3] * grid[1:-2,1:-2] * grid[2:-1,2:-1] * grid[3:,3:]))\n\n\n\n\n","sub_path":"probsets/comp_1/numpyintro.py","file_name":"numpyintro.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"225799750","text":"import pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statistics\nfrom sklearn.mixture import GaussianMixture\nfrom scipy.stats import norm\n# import glob\n# import operator\n# import os\n# import h5py as h5\n# import pandas\n# import scipy.stats as sc\n# from scipy.stats import gaussian_kde\n\n\ndef rtd_hist(args):\n rt = args[\"rt\"]\n fname = args[\"fname\"]\n ptitle = args[\"ptitle\"]\n hist_weights = args[\"hist_weights\"]\n nbins = args[\"nbins\"]\n y_max = args[\"y_max\"]\n plt.hist(rt, bins=nbins, normed=True,\n fill=True,\n color=\"blue\",\n weights=hist_weights)\n plt.subplots_adjust(bottom=0.2)\n plt.subplots_adjust(left=0.2)\n plt.xlabel('Residence time (log10 day)') # , fontsize=14)\n\n plt.ylabel('Probability') # , fontsize=14)\n plt.title(ptitle, fontsize=12)\n plt.xlim(-1.5, 3)\n plt.ylim(0, y_max)\n plt.xticks(np.arange(-1, 4, 1),\n 10**(np.arange(-1, 4, 1)).astype(float))\n plt.yticks(np.arange(0, (y_max + 0.1), 0.2)) # , fontsize=12)\n fig = plt.gcf()\n fig.set_size_inches(5, 3)\n fig.savefig(fname, dpi=600)\n plt.clf()\n\n\ndef rtd_stage(args):\n rt = args[\"rt\"]\n rt_label = args[\"rt_label\"]\n rt_color = args[\"rt_color\"]\n fname = args[\"fname\"]\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax2 = ax1.twinx()\n ax1.plot(river_level[:, 0], river_level[:, 3],\n \"-\", c=\"b\", label=\"River level\")\n ax2.plot(release_times / 24, np.log10(rt / 24),\n \".\", c=rt_color, label=rt_label)\n h1, l1 = ax1.get_legend_handles_labels()\n h2, l2 = ax2.get_legend_handles_labels()\n ax1.set_xlabel('Time (day)')\n ax1.set_ylabel('Rivel level (m)')\n ax2.set_ylabel(rt_label + '(log10 day)')\n ax2.yaxis.label.set_color(rt_color)\n ax2.set_yticks(np.arange(-1, 4, 1),\n 10**(np.arange(-1, 4, 1)).astype(float))\n ax2.spines['right'].set_color(rt_color)\n ax2.tick_params(axis=\"y\", colors=rt_color)\n ax1.legend(h1 + h2, l1 + l2, loc=2, frameon=False)\n ax2.set_ylim(-2, 3.5)\n fig = plt.gcf()\n fig.set_size_inches(9, 3)\n fig.subplots_adjust(bottom=0.2)\n fig.subplots_adjust(left=0.1)\n fig.savefig(fname, dpi=600)\n plt.clf()\n\n\n# input\ndata_dir = \"/Users/song884/particle_tracking/paper/data/\"\n# output\nresults_dir = \"/Users/song884/particle_tracking/paper/results/\"\nfigures_dir = \"/Users/song884/particle_tracking/paper/figures/\"\n\n# all the cases have same conductance boundary 2.5*10^-13 m\n# baseline case is facies-based homogenous model\ncase_name = \"baseline\"\n\n# read river stage time series\nriver_file = data_dir + case_name + \"/BC_2008_2015/DatumH_River_322.txt\"\nriver_level = np.loadtxt(river_file)\nriver_level = np.asarray(river_level)\nriver_level[:, 0] = river_level[:, 0] / 3600 / 24\n\n# load darcy flux data when & where particles were released\nflux_file = data_dir + case_name + \"/release_flux.pickle\"\npickle_file = open(flux_file, \"rb\")\nrelease_flux = pickle.load(pickle_file)\n\n# load coordinates where particles were released\ncoord_file = data_dir + case_name + \"/release_coord.pickle\"\npickle_file = open(coord_file, \"rb\")\nrelease_coord = pickle.load(pickle_file)\nrelease_coord = np.asarray(release_coord)\n\n# load all particles\n# pt_startT,particles inject time\n# pt_end,particels dicharge time\n# pt_startC,particels release cell\n# pt_dis,length of path\n# pt_status,particles status\npt_file = data_dir + case_name + \"/all.pickle\"\npickle_file = open(pt_file, \"rb\")\npt_results = pickle.load(pickle_file)\npt_startT = pt_results[\"startT\"]\npt_endT = pt_results[\"endT\"]\npt_startC = pt_results[\"startC\"]\npt_dis = pt_results[\"dis\"]\npt_status = pt_results['status']\npt_index = pt_results['index']\nflux_weight = pt_results['flux_weight']\n\n# days in year\nyears = [366, 365, 365, 365, 366, 365, 365, 365]\n\n# caculate max/min/mean/median residence time of particles\n# released in the same time\nrelease_times = np.unique(pt_startT)\nmax_rt = np.asarray([-1.0] * len(release_times))\nmin_rt = np.asarray([-1.0] * len(release_times))\nmean_rt = np.asarray([-1.0] * len(release_times))\nmedian_rt = np.asarray([-1.0] * len(release_times))\nfor irelease in range(len(release_times)):\n temp = ((pt_startT == release_times[irelease]) & (pt_status == 4))\n if(len(temp[temp == True]) > 0):\n max_rt[irelease] = np.max(pt_endT[temp] - pt_startT[temp])\n min_rt[irelease] = np.min(pt_endT[temp] - pt_startT[temp])\n mean_rt[irelease] = np.average(\n (pt_endT[temp] - pt_startT[temp]), weights=abs(flux_weight[temp]))\n median_rt[irelease] = statistics.median(\n pt_endT[temp] - pt_startT[temp])\n# mean_rt_file = figures_dir + case_name + \"_mean_rt.txt\"\n# np.savetxt(mean_rt_file, np.column_stack(\n# ((np.asarray(release_times), np.asarray(mean_rt)))), delimiter=\",\")\n\n# pt_cal_days: the index the day in a year\npt_cal_days = pt_startT / 24\nfor iyear in range(len(years)):\n pt_cal_days[pt_cal_days >= years[iyear]\n ] = pt_cal_days[pt_cal_days >= years[iyear]] - years[iyear]\n\n# plot all years\ntemp = (pt_status == 4)\nargs = {\"rt\": np.log10((pt_endT[temp] - pt_startT[temp]) / 24),\n \"hist_weights\": abs(flux_weight[temp]) / sum(abs(flux_weight[temp])),\n \"fname\": figures_dir + case_name + \"/rtd.png\",\n \"ptitle\": \"All particles in 6.8-yr tracking\",\n \"nbins\": 100,\n \"y_max\": 1\n }\nrtd_hist(args)\n\n\n# plot_seperate_year\nfor iyear in range(1, 7):\n temp = ((pt_status == 4)\n & (pt_startT > (24 * sum(years[0:iyear])))\n & (pt_startT < (24 * sum(years[0:(iyear + 1)]))))\n args = {\"rt\": np.log10((pt_endT[temp] - pt_startT[temp]) / 24),\n \"hist_weights\": abs(flux_weight[temp]) / sum(abs(flux_weight[temp])),\n \"fname\": figures_dir + case_name + \"/y\" + str(iyear) + \"_rtd.png\",\n \"ptitle\": \"Particles released in year \" + str(2008 + iyear),\n \"nbins\": 100,\n \"y_max\": 1\n }\n rtd_hist(args)\n\n# # plot rtd of seperate slice\n# indexs = np.unique(pt_index)\n# for i_index in indexs:\n# temp = ((pt_status == 4)\n# & (pt_index == i_index))\n# args = {\"rt\": np.log10((pt_endT[temp] - pt_startT[temp]) / 24),\n# \"hist_weights\": abs(flux_weight[temp]) / sum(\n# abs(flux_weight[temp])),\n# \"fname\": (figures_dir + case_name + \"/slice\"\n# + str(i_index) + \"_rtd.png\"),\n# \"ptitle\": (\"Particles released in Y= \" +\n# str(int(release_coord[i_index, 1])) + \" m\"),\n# \"nbins\": 100,\n# \"y_max\": 2\n# }\n# rtd_hist(args)\n\n# # level_vs_mean_residence time\nargs = {\"rt\": mean_rt,\n \"rt_label\": \"Mean residence time\",\n \"rt_color\": \"orange\",\n \"fname\": figures_dir + case_name + \"/level_vs_mean_rt.png\"\n }\nrtd_stage(args)\n\n# # level_vs_max_residence time\nargs = {\"rt\": max_rt,\n \"rt_label\": \"Maxium residence time\",\n \"rt_color\": \"r\",\n \"fname\": figures_dir + case_name + \"/level_vs_max_rt.png\"\n }\nrtd_stage(args)\n\n\n# # level_vs_in_residence time\nargs = {\"rt\": min_rt,\n \"rt_label\": \"Minimum residence time\",\n \"rt_color\": \"g\",\n \"fname\": figures_dir + case_name + \"/level_vs_min_rt.png\"\n }\nrtd_stage(args)\n\n# # level_vs_median_residence time\nargs = {\"rt\": median_rt,\n \"rt_label\": \"Median residence time\",\n \"rt_color\": \"purple\",\n \"fname\": figures_dir + case_name + \"/level_vs_median_rt.png\"\n }\nrtd_stage(args)\n\n\nfig_name = figures_dir + case_name + \"/2d_rtd.png\"\ntemp = (pt_status == 4)\nplt.hist2d(pt_startT[temp] / 365 / 24 + 2008,\n np.log10((pt_endT[temp] - pt_startT[temp]) / 24),\n bins=[100, 1000],\n vmin=0,\n vmax=1.4,\n normed=True,\n cmap=plt.cm.Reds,\n weights=abs(flux_weight[temp]) / sum(abs(flux_weight[temp])),\n )\nplt.xlabel('Time (year)') # , fontsize=14)\nplt.ylabel('Residence time (log10 day)') # , fontsize=14)\nplt.yticks(np.arange(-1, 4, 1), 10**(np.arange(-1, 4, 1)).astype(float))\ncbar = plt.colorbar(format=\"%.1e\")\ncbar.ax.set_ylabel(\"Probability Density (-)\",\n rotation=270, labelpad=20)\nplt.subplots_adjust(bottom=0.2)\nplt.subplots_adjust(left=0.2)\nplt.title(\"2D Histogram of Travel Time\", fontsize=12)\n# plt.yticks(np.arange(0, (y_max + 0.1), 0.2)) # , fontsize=12)\nfig = plt.gcf()\nfig.set_size_inches(10, 5)\nfig.savefig(fig_name, dpi=600)\nplt.clf()\nprint(\"Hello World!\")\n\nfig_name = figures_dir + case_name + \"/2d_rtd_riverStage.png\"\ntemp = (pt_status == 4)\nplt.hist2d(river_level[pt_startT.astype(int), 3][temp],\n np.log10((pt_endT[temp] - pt_startT[temp]) / 24),\n bins=[100, 100],\n normed=True,\n vmin=0,\n vmax=1.4,\n cmap=plt.cm.Blues,\n weights=abs(flux_weight[temp]) / sum(abs(flux_weight[temp])),\n )\nplt.xlabel('River Stage (m)') # , fontsize=14)\nplt.ylabel('Residence time (day)') # , fontsize=14)\nplt.yticks(np.arange(-1, 4, 1), 10**(np.arange(-1, 4, 1)).astype(float))\ncbar = plt.colorbar(format=\"%.1e\")\ncbar.ax.set_ylabel(\"Probability Density (-)\",\n rotation=270, labelpad=20)\nplt.subplots_adjust(bottom=0.2)\nplt.subplots_adjust(left=0.2)\nplt.title(\"2D Histogram of Travel Time\", fontsize=12)\nfig = plt.gcf()\nfig.set_size_inches(10, 5)\nfig.savefig(fig_name, dpi=600)\nplt.clf()\nprint(\"Hello World!\")\n\n\n# def gauss_function(x, amp, x0, sigma):\n# return amp * np.exp(-(x - x0) ** 2. / (2. * sigma ** 2.))\n# Construct function manually as sum of gaussians\n# gmm_y_sum = np.full_like(gmm_x, fill_value=0, dtype=np.float32)\n# for m, c, w in zip(gmm.means_.ravel(), gmm.covariances_.ravel(),\n# gmm.weights_.ravel()):\n# gmm_y_sum += gauss_function(x=gmm_x, amp=w, x0=m, sigma=np.sqrt(c))\n# Normalize so that integral is 1\n# gmm_y_sum /=\n# ax.plot(gmm_x, gmm_y_sum, color=\"black\", lw=4, label=\"Gauss_sum\")\n\n\nfor i_components in range(2, 5):\n fig_name = figures_dir + case_name + \"/gmm_\" + str(i_components) + \".png\"\n temp = (pt_status == 4)\n rtd = np.log10((pt_endT[temp] - pt_startT[temp]) / 24)\n rtd_weight = (abs(flux_weight[temp]) /\n sum(abs(flux_weight[temp])))\n rtd = np.random.choice(rtd, len(rtd) * 20, p=rtd_weight)\n gmm = GaussianMixture(n_components=i_components, covariance_type=\"full\")\n gmm_fit = gmm.fit(X=np.expand_dims(rtd, 1))\n gmm_x = np.linspace(-2, 3, 100)\n gmm_y = np.exp(gmm.score_samples(gmm_x.reshape(-1, 1)))\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[8, 5])\n ax.hist(rtd, bins=50, normed=True, alpha=0.5, color=\"#0070FF\")\n ax.plot(gmm_x, gmm_y, color=\"crimson\", lw=4, label=\"GMM\")\n for igauss in range(len(gmm.means_.ravel())):\n gmm_guass_y = norm.pdf(x=gmm_x,\n loc=gmm.means_.ravel()[igauss],\n scale=gmm.covariances_.ravel()[igauss]**0.5)\n ax.plot(gmm_x,\n gmm_guass_y * gmm.weights_.ravel()[igauss],\n linestyle=\"--\",\n lw=2,\n label=\"Mode\" + str(igauss + 1))\n print(np.trapz(gmm_guass_y, gmm_x))\n ax.set_ylabel(\"Probability density (-)\")\n ax.set_xlabel('Residience time (log10 day)')\n plt.legend()\n # plt.show()\n fig = plt.gcf()\n fig.set_size_inches(6, 4)\n fig.savefig(fig_name, dpi=600)\n fig.clf()\n\nfor iyear in range(1, 7):\n print(str(iyear) + \" year\")\n for i_components in range(2, 5):\n fig_name = (figures_dir + case_name +\n \"/y\" + str(iyear + 2008) +\n \"_gmm_\" + str(i_components) + \".png\")\n temp = ((pt_status == 4)\n & (pt_startT > (24 * sum(years[0:iyear])))\n & (pt_startT < (24 * sum(years[0:(iyear + 1)]))))\n rtd = np.log10((pt_endT[temp] - pt_startT[temp]) / 24)\n rtd_weight = (abs(flux_weight[temp]) /\n sum(abs(flux_weight[temp])))\n rtd = np.random.choice(rtd, len(rtd) * 20, p=rtd_weight)\n gmm = GaussianMixture(n_components=i_components,\n covariance_type=\"full\")\n gmm_fit = gmm.fit(X=np.expand_dims(rtd, 1))\n gmm_x = np.linspace(-2, 3, 100)\n gmm_y = np.exp(gmm.score_samples(gmm_x.reshape(-1, 1)))\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[8, 5])\n ax.hist(rtd, bins=50, normed=True, alpha=0.5, color=\"#0070FF\")\n ax.plot(gmm_x, gmm_y, color=\"crimson\", lw=4, label=\"GMM\")\n for igauss in range(len(gmm.means_.ravel())):\n gmm_guass_y = norm.pdf(x=gmm_x,\n loc=gmm.means_.ravel()[igauss],\n scale=gmm.covariances_.ravel()[igauss]**0.5)\n ax.plot(gmm_x,\n gmm_guass_y * gmm.weights_.ravel()[igauss],\n linestyle=\"--\",\n lw=2,\n label=\"Mode\" + str(igauss + 1))\n ax.set_ylabel(\"Probability density (-)\")\n ax.set_xlabel('Residience time (log10 day)')\n plt.title(str(2008 + iyear), fontsize=12)\n plt.legend()\n # plt.show()\n fig = plt.gcf()\n fig.set_size_inches(6, 4)\n fig.savefig(fig_name, dpi=600)\n fig.clf()\n\n\n# Draw legend\n# x = norm.pdf(x=gmm_x,\n# loc=gmm.means_.ravel()[igauss],\n# scale=gmm.covariances_.ravel()[igauss])\n# x = pt_cal_days[temp]\n# y = np.log10(\n# (pt_endT[temp] - pt_startT[temp]) / 24)\n# xy = np.vstack([x, y])\n# z = gaussian_kde(xy)(xy)\n# idx = z.argsort()\n# x, y, z = x[idx], y[idx], z[idx]\n# fig, ax = plt.subplot()\n# ax.scatter(x, y, c=z, s=50, edgecolor=\"\")\n\n\n# plt.scatter(release_times / 24, np.log10(max_rt), s=2)\n# plt.scatter(release_times / 24, np.log10(min_rt), s=2)\n\n# plt.scatter(np.log10(max_rt), np.log10(min_rt), s=2)\n# plt.hist(min_rt)\n\n# plt.scatter(pt_cal_days[temp], np.log10(\n# (pt_endT[temp] - pt_startT[temp]) / 24))\n\n# plt.scatter(pt_startT[temp], np.log10(\n# (pt_endT[temp] - pt_startT[temp]) / 24))\n\n\n# fig_name = (figures_dir + case_name +\n# \"/y\" + str(iyear + 2008) +\n# \"_gmm_\" + str(i_components) + \".png\")\n# temp = ((pt_status == 4)\n# & (pt_startT > (24 * sum(years[0:iyear])))\n# & (pt_startT < (24 * sum(years[0:(iyear + 1)]))))\n# rtd = np.log10((pt_endT[temp] - pt_startT[temp]) / 24)\n","sub_path":"before_git/mac_we27071/paper/pt_residence_time_v6.py","file_name":"pt_residence_time_v6.py","file_ext":"py","file_size_in_byte":14369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"237470544","text":"# Copyright 2014 Diamond Light Source Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. module:: plugin_info\n :platform: Unix\n :synopsis: Class which describes the NeXus plugin description\n\n.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>\n\n\"\"\"\n\nimport h5py\nimport json\nimport os\nimport logging\n\nimport numpy as np\n\n\nNX_CLASS = 'NX_class'\n\n\nclass PluginList(object):\n \"\"\"\n Descriptor for plugin lists loaded from file\n \"\"\"\n\n def __init__(self):\n super(PluginList, self).__init__()\n self.plugin_list = []\n self.name = \"Default\"\n\n def populate_plugin_list(self, filename):\n plugin_file = h5py.File(filename, 'r')\n self.name = os.path.basename(filename)\n plugin_group = plugin_file['entry/process']\n for key in plugin_group.keys():\n plugin = {}\n plugin['name'] = plugin_group[key]['name'][0]\n plugin['id'] = plugin_group[key]['id'][0]\n plugin['data'] = json.loads(plugin_group[key]['data'][0])\n self.plugin_list.append(plugin)\n plugin_file.close()\n\n def save_list_to_file(self, filename):\n plugin_file = h5py.File(filename, 'w')\n entry_group = plugin_file.create_group('entry')\n entry_group.attrs[NX_CLASS] = 'NXentry'\n plugins_group = entry_group.create_group('plugin')\n plugins_group.attrs[NX_CLASS] = 'NXplugin'\n count = 0\n for plugin in self.plugin_list:\n plugin_group = plugins_group.create_group(\"%i\" % count)\n plugin_group.attrs[NX_CLASS] = 'NXnote'\n id_array = np.array([plugin['id']])\n plugin_group.create_dataset('id', id_array.shape, id_array.dtype,\n id_array)\n name_array = np.array([plugin['name']])\n plugin_group.create_dataset('name', name_array.shape,\n name_array.dtype, name_array)\n data_array = np.array([json.dumps(plugin['data'])])\n plugin_group.create_dataset('data', data_array.shape,\n data_array.dtype, data_array)\n count += 1\n plugin_file.close()\n\n def add_plugin_citation(self, filename, plugin_number, citation):\n logging.debug(\"Adding Citation to file %s\", filename)\n plugin_file = h5py.File(filename, 'a')\n plugin_entry = plugin_file['entry/process/%i' % plugin_number]\n citation.write(plugin_entry)\n plugin_file.close()\n\n def add_intermediate_data_link(self, filename, output_data, group_name):\n logging.debug(\"Adding link to file %s\", filename)\n plugin_file = h5py.File(filename, 'a')\n inter_entry = plugin_file['entry'].require_group('intermediate')\n inter_entry.attrs[NX_CLASS] = 'NXcollection'\n inter_entry[group_name] = output_data.external_link()\n plugin_file.close()\n\n def get_string(self):\n out_string = []\n count = 0\n for plugin in self.plugin_list:\n count += 1\n description = \"%2i) %s(%s)\" % (count, plugin['name'], plugin['id'])\n for key in plugin['data'].keys():\n description += \"\\n %20s : %s\" % (key, plugin['data'][key])\n out_string.append(description)\n return '\\n'.join(out_string)\n\n\nclass CitationInformation(object):\n \"\"\"\n Descriptor of Citation Information for plugins\n \"\"\"\n\n def __init__(self):\n super(CitationInformation, self).__init__()\n self.description = \"Default Description\"\n self.doi = \"Default DOI\"\n self.endnote = \"Default Endnote\"\n self.bibtex = \"Default Bibtex\"\n\n def write(self, hdf_group):\n citation_group = hdf_group.create_group('citation')\n citation_group.attrs[NX_CLASS] = 'NXcite'\n description_array = np.array([self.description])\n citation_group.create_dataset('description',\n description_array.shape,\n description_array.dtype,\n description_array)\n doi_array = np.array([self.doi])\n citation_group.create_dataset('doi',\n doi_array.shape,\n doi_array.dtype,\n doi_array)\n endnote_array = np.array([self.endnote])\n citation_group.create_dataset('endnote',\n endnote_array.shape,\n endnote_array.dtype,\n endnote_array)\n bibtex_array = np.array([self.bibtex])\n citation_group.create_dataset('bibtex',\n bibtex_array.shape,\n bibtex_array.dtype,\n bibtex_array)\n","sub_path":"savu/data/plugin_info.py","file_name":"plugin_info.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"349928749","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 22 16:07:21 2019\n\nFunction to compute cross correlation between two variables over a rolling window\n\n@author: Thomas Bury\n\"\"\"\n\n\n\n#---------------------------------\n# Import relevant packages\n#--------------------------------\n\n# For numeric computation and DataFrames\nimport numpy as np\nimport pandas as pd\n\n\n\n# For detrending time-series\nfrom statsmodels.nonparametric.smoothers_lowess import lowess\n\n\n# Test cross_corr\n\n# Create a DataFrame of two time-series\ntVals = np.arange(0,10,0.1)\nxVals = 5 + np.random.normal(0,1,(len(tVals),2))\ndf_series = pd.DataFrame(xVals, index=tVals)\n\n\n\ndef cross_corr(df_series,\n roll_window=0.4,\n span=0.1,\n upto='Full'):\n '''\n Compute cross correlation between two time-series\n \t\n Args\n ----\n df_series: pd.DataFrame\n Time-series data to analyse. Indexed by time. Two columns\n roll_window: float\n Rolling window size as a proportion of the length of the time-series \n data.\n span: float\n Span of time-series data used for Lowess filtering. Taken as a \n proportion of time-series length if in (0,1), otherwise taken as \n absolute.\n upto: int or 'Full'\n Time up to which EWS are computed. Enter 'Full' to use\n the entire time-series. Otherwise enter a time value.\n \n Returns\n --------\n dict of pd.DataFrames:\n A dictionary with the following entries.\n **'EWS metrics':** A DataFrame indexed by time with columns corresopnding \n to each EWS.\n **'Kendall tau':** A DataFrame of the Kendall tau values for each EWS metric.\n '''\n \n \n \n # Initialise a DataFrame to store EWS data\n df_ews = pd.DataFrame()\n df_ews['State 1'] = df_series.iloc[:,0]\n df_ews['State 2'] = df_series.iloc[:,1]\n df_ews.index = df_series.index\n df_ews.index.rename('Time', inplace=True)\n \n \n \n # Portion of time-series for EWS computation\n if upto == 'Full':\n df_short_series = df_ews\n else: df_short_series = df_ews.loc[:upto]\n\n\n #------Data detrending--------\n \n # Compute the Lowess span as a proportion if given as absolute\n if not 0 < span <= 1:\n span = span/df_short_series.shape[0]\n else:\n span = span\n \n \n # Smooth time-series and compute residuals\n for var in [1,2]:\n smooth_data = lowess(df_short_series['State '+str(var)].values, \n df_short_series.index.values, frac=span)[:,1]\n smooth_series = pd.Series(smooth_data, index=df_short_series.index)\n \n residuals = df_short_series['State '+str(var)].values - smooth_data\n resid_series = pd.Series(residuals, index=df_short_series.index)\n \n # Add smoothed data and residuals to the EWS DataFrame\n df_ews['Trend '+str(var)] = smooth_series\n df_ews['Residuals '+str(var)] = resid_series\n \n \n # Compute the rolling window size (integer value)\n rw_size=int(np.floor(roll_window * df_series.shape[0]))\n \n \n # Compute cross correlation between residual time-series\n cross_cor = df_ews[['Residuals 1']].rolling(window=rw_size).corr(df_ews['Residuals 2'])['Residuals 1']\n \n # Add to EWS DataFrame\n df_ews['Cross correlation'] = cross_cor\n \n \n \n \n \n #------------Compute Kendall tau coefficients----------------#\n \n ''' In this section we compute the kendall correlation coefficients for each EWS\n with respect to time. Values close to one indicate high correlation (i.e. EWS\n increasing with time), values close to zero indicate no significant correlation,\n and values close to negative one indicate high negative correlation (i.e. EWS\n decreasing with time).'''\n \n \n # Put time values as their own series for correlation computation\n time_vals = pd.Series(df_ews.index, index=df_ews.index)\n\n \n # Find Kendall tau for each EWS and store in a DataFrame\n dic_ktau = {x:df_ews[x].corr(time_vals, method='kendall') for x in ['Cross correlation']} # temporary dictionary\n df_ktau = pd.DataFrame(dic_ktau, index=[0]) # DataFrame (easier for concatenation purposes)\n \n \n \n #-------------Organise final output and return--------------#\n \n # Ouptut a dictionary containing EWS DataFrame, power spectra DataFrame, and Kendall tau values\n output_dic = {'EWS metrics': df_ews, 'Kendall tau': df_ktau}\n \n return output_dic\n\n\n\n\n\n\n\n\n\n","sub_path":"models_ricker/ricker_seasonal/cross_corr.py","file_name":"cross_corr.py","file_ext":"py","file_size_in_byte":4762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"369761185","text":"\"\"\"\nhttps://towardsdatascience.com/magic-methods-in-python-by-example-16b6826cae5c\n\"\"\"\n\n\nclass MycustomList(list):\n\n def __getitem__(self, index):\n if index == 0:\n raise ValueError\n index = index - 1\n return list.__getitem__(self, index)\n\n def __setitem__(self, index, value):\n if index == 0:\n raise ValueError\n index = index - 1\n return list.__setitem__(self,index,value)\n\n def __delitem__(self, index):\n index = index - 1\n return list.__delitem__(self, index)\n\n def __mul__(self, other):\n mul_list = [x * y for x, y in zip(self, other)]\n return MycustomList(mul_list)\n\nlis_one = MycustomList([1,2,3,4,5])\n\n#This will call __getitem__ to get index of that value\nprint(lis_one[2])\n\n\n#This will call __setitem__ method which sets first element in a list to 100\nlis_one[1] = 100\nprint(lis_one)\n\n#This will cal __delitem__ method which deletes value by indexing using del keyword.\ndel lis_one[1]\nprint(lis_one)\n\n#This will return a multiplication of two list objects one is self, which is lis_one and other is lis_two\nlis_one = MycustomList([1,2,3,4,5])\nprint(lis_one)\nlis_two = MycustomList([1,2,3,4,5])\nprint(lis_two)\nlis_three = lis_one * lis_two\nprint(lis_three)","sub_path":"MagicMethods/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"360778393","text":"#!/user/bin/python\n\n\"\"\"\nGiven a non-empty array of digits representing a non-negative integer, increment one to the integer.\n\nThe digits are stored such that the most significant digit is at the head of the list, and each element in the array contains a single digit.\n\nYou may assume the integer does not contain any leading zero, except the number 0 itself.\n\n\n\nExample 1:\n\nInput: digits = [1,2,3]\nOutput: [1,2,4]\nExplanation: The array represents the integer 123.\nExample 2:\n\nInput: digits = [4,3,2,1]\nOutput: [4,3,2,2]\nExplanation: The array represents the integer 4321.\nExample 3:\n\nInput: digits = [0]\nOutput: [1]\n\n\nConstraints:\n\n1 <= digits.length <= 100\n0 <= digits[i] <= 9\n\"\"\"\n\nclass Solution:\n def plusOne(self, digits):\n result = []\n up = 1\n for d in reversed(digits):\n current = d + up\n if current < 10:\n result.append(current)\n up = 0\n else:\n this = (current) % 10\n up = current // 10\n result.append(this)\n if up > 0:\n result.append(up)\n return list(reversed(result))\n\n\ntestSuites = [\n ([1,2,3], [1,2,4]),\n ([4,3,2,1], [4,3,2,2]),\n ([0], [1]),\n ([8, 9], [9, 0]),\n ([9, 9], [1, 0, 0])\n]\n\ns = Solution()\nfor case in testSuites:\n ret = s.plusOne(case[0])\n print(ret)\n if ret == case[1]:\n print('case passed.')\n else:\n print(f'case {case[0]} failed.')\n","sub_path":"plus_one.py","file_name":"plus_one.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"99762834","text":"# -*- coding: utf-8 -*-\nfrom django.core.management.base import BaseCommand, CommandError\nimport subprocess, signal, os\nfrom django.conf import settings\nimport datetime\nfrom core.send_sms import sendsms\n\n\n\n\nclass Command(BaseCommand):\n\tdef handle(self, *args, **options):\n\t\tf = open('%s/get_transaction' % settings.BASE_DIR, 'r')\n\t\tt = f.read().split('\\n')[-2]\n\t\tf.close()\n\t\tt = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S')\n\t\tif t + datetime.timedelta(hours = 2) < datetime.datetime.now():\n\t\t\tsendsms('+79284319761', 'повисли транзакции')\n\t\t\tp = subprocess.Popen(['ps', '-ax'], stdout=subprocess.PIPE)\n\t\t\tout, err = p.communicate()\n\t\t\tfor line in out.splitlines():\n\t\t\t\tif 'get_trans_brokers' in str(line):\n\t\t\t\t\tprint('kill')\n\t\t\t\t\tpid = int(line.split(None, 1)[0])\n\t\t\t\t\tos.kill(pid, signal.SIGKILL)\n\t\t\t\t\tsendsms('+79284319761', 'транзакции перезапущены')\n","sub_path":"billing/management/commands/checkloadtrans.py","file_name":"checkloadtrans.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"476444627","text":"import abc\nimport logging\nimport os\nimport shutil\n\nimport yaml\n\nfrom . import consts\nfrom .base import Runner, context, assert_is_dna_project, safe_name, as_path, C\n\n\nclass Generator(Runner, metaclass=abc.ABCMeta):\n def __init__(self,\n config: C,\n force: bool):\n super().__init__(config)\n self.name = self.config['NAME']\n self.target = self.config['_TARGET']\n self.arch_version = self.config['ARCH_VERSION']\n self.force = force\n\n self.simple_name = os.path.basename(self.name)\n\n\nclass Project(Generator):\n \"\"\"Archetype Adapter\n\n Generate a new project using archetype as base.\n \"\"\"\n def __init__(self, config: C, force: bool):\n super().__init__(config, force)\n self.simple_name = safe_name(self.simple_name)\n\n def create(self):\n if os.path.exists(self.target):\n if not self.force:\n raise ValueError(f'Project \"{self.target}\" already exists.')\n\n shutil.rmtree(self.target)\n\n logging.info(f'Creating project `{self.simple_name}`')\n\n (self\n .clone()\n .replace_with_actual_name())\n\n return self\n\n def clone(self):\n logging.info(f'Cloning archetype from `{consts.ARCH_REPO}`')\n os.system(f'git clone -q {consts.ARCH_REPO} {self.target} --depth 1')\n shutil.rmtree(os.path.join(self.target, '.git'))\n\n return self\n\n def replace_with_actual_name(self):\n logging.info(f'Replacing `archetype` occurrences by `{safe_name(self.simple_name)}`')\n\n roots = []\n\n for root, dirs, files in os.walk(self.target):\n target_root = self._rename_arch(root)\n\n if root != target_root:\n logging.debug(f' mk {target_root}')\n os.makedirs(target_root, exist_ok=True)\n\n for f in files:\n source, target = os.path.join(\n root, f), os.path.join(target_root, f)\n\n logging.debug(f' mk {target}')\n\n with open(source) as s:\n content = s.read()\n\n with open(target, 'w') as t:\n t.write(self._rename_arch(content))\n\n if source != target:\n logging.debug(f' rm {source}')\n os.chmod(target, os.stat(source).st_mode)\n os.remove(source)\n\n roots.append((root, target_root))\n\n logging.debug(f'Deleting dangling archetype folders')\n for original_root, target_root in roots:\n if original_root != target_root and os.path.exists(original_root):\n logging.debug(f' rm {original_root}')\n shutil.rmtree(original_root)\n\n return self\n\n def _rename_arch(self, content):\n return content.replace('archetype', self.simple_name)\n\n\nclass TemplateGenerator(Generator):\n DIRS: str\n SUFFIX: str = '.py'\n TEMPLATE_NAME: str = None\n\n @property\n def template_name(self):\n return self.TEMPLATE_NAME or self.__class__.__name__.lower()\n\n def create(self):\n assert_is_dna_project('.')\n\n dirs = self.DIRS.format(**self.config)\n path = as_path(self.name, dirs, self.SUFFIX)\n\n if os.path.exists(path) and not self.force:\n return logging.warning(f'{self.template_name} {self.name} already exists at `{path}`')\n\n logging.info(f'Creating job {self.name} at `{path}`')\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n t = self.load_template()\n\n for key, value in self.default_context():\n t = t.replace(key, value)\n\n with open(path, 'w') as f:\n logging.debug(f' mk {path}')\n f.write(t)\n\n def load_template(self):\n name, ext = self.template_name, self.SUFFIX\n\n name = name.replace(ext, '')\n file_path = os.path.join(consts.TEMPLATES_DIR, f'{name}{ext}')\n\n if not os.path.exists(file_path):\n raise FileNotFoundError(f'Cannot find template {name} at `{file_path}`.')\n\n with open(file_path) as f:\n return f.read()\n\n def default_context(self):\n return (('NAME_SIMPLE', self.simple_name),\n ('NAME_DOTTED', self.name.replace('/', '.')),\n ('ARCHETYPE_INITIAL', self.config['SERVICE'][:1].upper()),\n ('ARCHETYPE', self.config['SERVICE']),\n ('NAME', self.name))\n\n\nclass Notebook(TemplateGenerator):\n DIRS = 'notebooks'\n SUFFIX = '.ipynb'\n\n\nclass Job(TemplateGenerator):\n DIRS = 'jobs'\n\n\nclass JobProcessor(Job):\n ...\n\n\nclass JobFull(Job):\n ...\n\n\nclass Processor(TemplateGenerator):\n DIRS = 'ink/core/forge/{SERVICE}/processors'\n\n\nclass Test(TemplateGenerator):\n DIRS = 'tests/unit/{SERVICE}'\n SUFFIX = '_test.py'\n\n\nGENERATORS = {\n 'project': Project,\n 'notebook': Notebook,\n 'job': Job,\n 'job-processor': JobProcessor,\n 'job-full': JobFull,\n 'processor': Processor,\n 'test': Test,\n}\n\n\ndef adapter(args, verbose: bool = True):\n global GENERATORS\n\n if verbose: logging.info(f'Using {args.env} environment.')\n\n c = {} if args.artifact == 'project' else context(args.env)\n\n c.update(ARTIFACT=args.artifact,\n NAME=args.name,\n ARCH_VERSION=args.arch_version,\n _TARGET=args.to or args.name,\n _CORE_REPO=consts.CORE_REPO,\n _ARCH_REPO=consts.ARCH_REPO)\n\n logging.debug(yaml.dump({'config': c}))\n return GENERATORS[args.artifact](c, force=args.force)\n","sub_path":"ink/core/forge/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"135032601","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\n\nclass conv2d(nn.Module) :\n def __init__(self,in_ch,out_ch,kernel_size,stride,padding):\n super(conv2d,self).__init__()\n self.block = nn.Sequential(\n nn.Conv2d(in_ch,out_ch,kernel_size=kernel_size,stride=stride,padding=padding,bias=False),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(True)\n )\n def forward(self,x) :\n x = self.block(x)\n return x\n\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_ch, out_ch, stride=1, kernel_size=3, padding=1):\n super(ResidualBlock, self).__init__()\n self.cnn = conv2d(in_ch,out_ch,kernel_size,stride,padding)\n if stride != 1 or in_ch != out_ch:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_ch)\n )\n else:\n self.shortcut = nn.Sequential()\n\n def forward(self, x):\n x = self.cnn(x)\n res = self.shortcut(x)\n return x + res\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n\n self.block1 = nn.Sequential(\n conv2d(1,32,5,2,0),\n conv2d(32, 64, 3, 1,1),\n conv2d(64, 32, 3, 1, 1)\n )\n\n\n def forward(self, x):\n x = self.block1(x)\n x= x.view(-1,1600)\n return x","sub_path":"modules/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"100503412","text":"'''\nCreated on 07.09.2013\n\n@author: hm\n'''\n\nimport logging, os.path, importlib\nfrom djinn.django.http import HttpResponse, HttpResponsePermanentRedirect\nlogger = logging.getLogger(\"sidu-manual\")\n\ndef dumpObj(obj):\n '''Returns a string describing an object instance\n @param obj: object to describe\n @return: a string describing the object\n '''\n return repr(obj)\n \n \ndef decodeUrl(url):\n '''Decodes the special characters in an URL into normal string.\n Special chars are %hh where hh is a 2 digit hexadecimal number.\n @param url: the url to decode\n @return: the decoded string\n '''\n rc = \"\"\n url = url.replace(\"+\", \" \")\n ix = last = 0\n while ix >= 0 and last < len(url):\n ix = url.find(\"%\", last)\n if ix < 0:\n rc += url[last:]\n else:\n rc += url[last:ix]\n hexNumber = url[ix+1:ix+3]\n cc = int(hexNumber, 16)\n rc += chr(cc)\n last = ix + 3\n return rc\n\n\nclass WSGIHandler(object):\n '''\n Implements the a simple replacement of Django which implements the \n Web Server Gateway Interface (WSGI)\n '''\n\n\n def __init__(self, urlPatterns = None):\n '''Constructor.\n '''\n if urlPatterns == None:\n moduleName = os.environ[\"URL_MODULE\"]\n module = importlib.import_module(moduleName)\n urlPatterns = module.getPatterns()\n if urlPatterns[0] == '':\n urlPatterns = urlPatterns[1:]\n self._urlPatterns = urlPatterns\n self._blockSize = 0x10000\n self._environ = None\n self._request = None\n \n def dumpUrl(self, urls):\n '''Dumps the url list.\n @return: a string describing the urls\n '''\n rc = \"\"\n for item in urls:\n rc += \"{:s}: {:s}\\n\".format(item._regExpr.pattern, item._name)\n return rc\n \n def findUrl(self, url):\n '''Returns the first matching UrlInfo object.\n @param url the url to search\n @return: None: not found\n otherwise: an UrlInfo instance\n '''\n rc = None\n if url.startswith(\"/\"):\n url = url[1:]\n for item in self._urlPatterns:\n if item._regExpr.search(url):\n rc = item\n break\n if rc == None:\n raise Exception(\"URL not found: \" + url + \" urls\" + self.dumpUrl(self._urlPatterns))\n return rc\n \n def putCookies(self, cookies):\n '''Write the cookies to the client.\n @param cookies: a dictionary with the cookies\n '''\n pass\n \n def writeContent(self, content):\n '''Writes the content of the current page to the client.\n @param content: the page content (normally HTML)\n '''\n pass\n\n def findMime(self, filename):\n '''Finds the MIME type of a filename.\n @param fn: filename (from the url)\n @return: the mime type\n '''\n node = os.path.basename(filename)\n ix = node.rindex(\".\")\n ext = \"\" if ix < 0 else node[ix+1:].lower()\n if ext == \"css\":\n rc = \"text/css\"\n elif ext == \"png\":\n rc = \"image/png\"\n elif ext == \"jpg\":\n rc = \"image/jpg\"\n elif ext == \"gif\":\n rc = \"image/gif\"\n elif ext == \"ico\":\n rc = \"image/x-icon\"\n elif ext == \"txt\" or ext == \"log\":\n rc = \"text/plain\"\n elif ext == \"htm\" or ext == \"html\":\n rc = \"text/html\"\n else:\n rc = \"application/octet-stream\"\n return rc\n \n def handleStaticFiles(self, url, documentRoot, startResponse):\n '''Handles a static file.\n @param url: the url of the static file, e.g. \"/static/std.css\"\n @param documentRoot: the base path of the application\n @param startResponse: a method which writes the HTTP header\n @return: a list with the file's content (in 64 kiByte blocks)\n '''\n fn = documentRoot + url\n if not os.path.exists(fn):\n # CONTENT_LENGTH will be added by the caller! \n headers = [(\"Content-Type\",\"text/plain\")]\n answer = \"file not found: \" + url + \"\\n\"\n startResponse.__call__(404, headers)\n rc = [answer]\n else:\n mime = self.findMime(url)\n rc = []\n fp = open(fn, \"r\")\n while True:\n # performance: put 64k blocks into the output\n part = fp.read(self._blockSize)\n if len(part) == 0:\n break\n else:\n rc.append(part)\n fp.close()\n headers = [(\"Content-Type\", mime)]\n startResponse.__call__(200, headers)\n return rc\n \n def handle(self, application, documentRoot, startResponse):\n '''Handles a HTTP request.\n @param application: the name of the application (is the virtual host)\n @param documentRoot: the base path of the application\n @param startResponse: a method which writes the HTTP header\n '''\n rc = None\n if not \"PATH_INFO\" in self._environ:\n logger.error(\"missing PATH_INFO\")\n raise Exception(\"djinn.handle(): no PATH_INFO found\")\n else:\n url = self._environ[\"PATH_INFO\"]\n if url == \"/favicon.ico\":\n url = \"/static/favicon.icon\"\n headers = []\n info = self.findUrl(url)\n if info == None:\n logger.error(\"Page not found: \" + url)\n raise Exception(\"djinn.handle(): no Page found: \" + url)\n else:\n handler = info._urlHandler\n self._request.documentRoot = documentRoot\n rc = handler.__call__(self._request)\n # rc is a HttpResponse or a HttpResponsePermanentRedirect\n if isinstance(rc, HttpResponse):\n # CONTENT_LENGTH will be added by the caller! \n self.content = rc.content\n header = (\"Content-Type\", \"text/html\")\n headers.append(header)\n elif isinstance(rc, HttpResponsePermanentRedirect):\n header = (\"Location\", rc.absUrl)\n headers.append(header)\n startResponse.__call__(rc.status, headers)\n return rc\n \n def __call__(self, environ, startResponse):\n '''the main method of the WSGI.\n @param environ: the parameters as a dictionary\n @param startResponse: a callable starting the HTTP response.\n def startResponse(status, headers)\n e.g. startResponse(\"200 OK\", [(\"LEN\", \"20\")]\n '''\n \n application = environ[\"HTTP_HOST\"]\n docRoot = environ[\"DOCUMENT_ROOT\"] if \"DOCUMENT_ROOT\" in environ else \"/usr/share/sidu-manual\"\n self._environ = environ\n self._request = WSGIRequest(environ)\n \n rc = self.handle(application, docRoot, startResponse)\n return rc\n \nclass WSGIRequest:\n '''Implements the request instance expected from WSGI applications.\n '''\n def __init__(self, environ):\n self._cookies = {}\n self.META = environ\n self.GET = {}\n self.POST = {}\n self.COOKIES = {}\n self.buildGET(environ)\n self.buildCookies(environ)\n\n def buildGET(self, environ):\n '''Builds the GET dictionary from an URL.\n @param environ: the data from the client\n '''\n self.GET = {}\n if \"QUERY_STRING\" in environ:\n queryString = environ[\"QUERY_STRING\"]\n items = queryString.split(\"&\")\n for varDef in items:\n if varDef == \"\":\n continue\n if varDef.find(\"=\") > 0:\n (name, value) = varDef.split(\"=\", 1)\n else:\n (name, value) = (varDef, \"\")\n name = decodeUrl(name)\n value = decodeUrl(value)\n self.GET[name] = value\n \n def buildCookies(self, environ):\n '''Fills the dictionary COOKIES.\n @param httpCookies: the http-url of the cookies\n '''\n self.COOKIES = {}\n if \"HTTP_COOKIE\" in environ:\n queryString = environ[\"HTTP_COOKIE\"]\n \n \n","sub_path":"djinn/wsgihandler.py","file_name":"wsgihandler.py","file_ext":"py","file_size_in_byte":8478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"412302586","text":"import curses\nimport traceback\nfrom collections import defaultdict, namedtuple\nfrom concurrent.futures import Future\nfrom enum import Enum\nfrom queue import Queue\nfrom loguru import logger\nfrom Activity import Activity\nfrom CentralDispatch import CentralDispatch, SerialDispatchQueue\nfrom EventTypes import StopApplication, ExceptionOccured, KeyStroke\nfrom activities.LogViewerActivity import LogViewerActivity\nfrom activities.ShowExceptionActivity import ShowExceptionActivity\n\nfrom loguru import logger\n\n\nclass Segue(Enum):\n PUSH = 0\n REPLACE = 1\n\n\nLabeledCallback = namedtuple(\"LabeledCallback\", [\"label\", \"callback\"])\n\n\nclass Application:\n def __init__(self, curses_screen):\n self.log_filename = \"application.log\"\n self.curses_screen = curses_screen\n self.event_subscribers = defaultdict(set)\n self.stack = []\n\n self.event_queue = Queue()\n\n self.shutdown_signal: Future = None\n self.main_thread: SerialDispatchQueue = None\n\n self.last_exception = None\n\n def handle_shutdown(self, shutdown_event):\n if shutdown_event.exception:\n try:\n raise shutdown_event.exception\n except Exception as e:\n logger.info(\"Shutdown because of error:\")\n logger.info(f\"{e.__class__.__name__}: {e}\")\n logger.info(traceback.format_exc())\n else:\n logger.info(\"Exited Normally\")\n\n def subscribe(self, event_type, activity, callback):\n self.event_subscribers[event_type].add(LabeledCallback(activity, callback))\n\n def unsubscribe_all(self, from_activity):\n for event_type, subscribers in self.event_subscribers.items():\n for labeled_callback in subscribers.copy():\n if labeled_callback.label == from_activity:\n self.event_subscribers[event_type].remove(labeled_callback)\n\n def setup_logger(self):\n logger.add(self.log_filename, format=\"{time:HH:mm:ss} {module} {message}\")\n\n def start(self, activity: Activity):\n self.setup_logger()\n curses.curs_set(0)\n CentralDispatch.default_exception_handler = self._shutdown_app_exception_handler\n\n self.main_thread = CentralDispatch.create_serial_queue()\n self.subscribe(event_type=ExceptionOccured, activity=self, callback=self.on_exception)\n self.subscribe(event_type=KeyStroke, activity=self, callback=self.on_key_stroke)\n self.shutdown_signal = CentralDispatch.future(self._event_monitor)\n self.start_key_monitor()\n self.on_start()\n\n self.segue_to(activity)\n shutdown_event = self.shutdown_signal.result()\n\n self.handle_shutdown(shutdown_event)\n\n def on_start(self): pass\n\n def _stop_activity(self, activity):\n activity._stop()\n self.unsubscribe_all(activity)\n\n def _start_activity(self, activity):\n activity._start(application=self)\n\n def _segue_to(self, activity: Activity, segue_type):\n if len(self.stack) > 0:\n if segue_type == Segue.REPLACE:\n current_activity = self.stack.pop()\n else:\n current_activity = self.stack[-1]\n\n current_activity._stop()\n current_activity.on_stop()\n self.unsubscribe_all(current_activity)\n\n self.stack.append(activity)\n activity._start(application=self)\n\n def segue_to(self, activity: Activity, segue_type=Segue.PUSH):\n self.main_thread.submit_async(self._segue_to, activity, segue_type=segue_type)\n\n def _pop_activity(self):\n current_activity = self.stack.pop()\n if len(self.stack) > 0:\n returning_activity = self.stack[-1]\n\n self._stop_activity(current_activity)\n self._start_activity(returning_activity)\n else:\n # We've popped the last activity\n self.event_queue.put(StopApplication())\n\n def pop_activity(self):\n self.main_thread.submit_async(self._pop_activity)\n\n def _dispatch_event(self, callback, event):\n callback(event)\n\n def dispatch_event(self, event):\n for labeled_callback in self.event_subscribers[type(event)]:\n self.main_thread.submit_async(self._dispatch_event, labeled_callback.callback, event)\n\n def _event_monitor(self):\n event = self.event_queue.get()\n\n while not isinstance(event, StopApplication):\n self.dispatch_event(event)\n event = self.event_queue.get()\n\n # Return the last event, because it might contain an exception\n return event\n\n def _key_monitor(self, screen):\n while not self.shutdown_signal.done():\n key = screen.getch()\n\n # 3 = ctrl-c\n if key == 3:\n self.event_queue.put(StopApplication())\n return\n else:\n self.event_queue.put(KeyStroke(key))\n\n def start_key_monitor(self):\n CentralDispatch.future(self._key_monitor, self.curses_screen)\n\n def _debug_message(self, lines):\n self.curses_screen.clear()\n for index, line in enumerate(lines):\n self.curses_screen.addstr(index, 0, line)\n\n self.curses_screen.refresh()\n\n def debug_message(self, message: str):\n lines = message.split(\"\\n\")\n\n self.main_thread.submit_async(self._debug_message, lines)\n\n def on_key_stroke(self, event: KeyStroke):\n if event.key == curses.KEY_F1:\n self.segue_to(LogViewerActivity())\n\n def on_exception(self, event: ExceptionOccured):\n if self.last_exception is not None:\n logger.info(\"While handling one exception, another occurred.\\nOriginal exception: {}\")\n logger.info(f\"{self.last_exception.__class__.__name__}: {self.last_exception}\")\n logger.info(traceback.format_exc())\n self.event_queue.put(StopApplication(exception=event.exception))\n else:\n self.last_exception = event.exception\n self.segue_to(ShowExceptionActivity(event.exception))\n\n def _shutdown_app_exception_handler(self, function):\n def inner_function(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except Exception as e:\n self.event_queue.put(ExceptionOccured(exception=e))\n\n return inner_function\n\n","sub_path":"Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"484111628","text":"#1544. Make The String Great\n#realted topics\n#string, stack\n\ns = \"leEeetcode\"\ns2 = \"abBAcC\"\ns3 = \"s\"\n\n#first solution\ndef makeGood(s):\n stack = []\n for c in s:\n if not stack:\n stack.append(c)\n elif stack[-1].upper()==c.upper() and ord(stack[-1])!=ord(c):\n stack.pop()\n else:\n stack.append(c)\n return \"\".join(stack)\n\nprint(makeGood(s))\nprint(makeGood(s2))\nprint(makeGood(s3))\n\n#second solution\ndef makeGood2(s):\n result = []\n for c in s:\n if not result:\n result.append(c)\n elif result[-1].isupper() and result[-1].lower() == c:\n result.pop()\n elif result[-1].islower() and result[-1].upper() == c:\n result.pop()\n else:\n result.append(c)\n return ''.join(result)\n\nprint(makeGood2(s))\nprint(makeGood2(s2))\nprint(makeGood2(s3))\n\n\n#third solution\ndef makeGood3(s):\n stack = [] \n for c in s:\n if stack and stack[-1] == c.swapcase():\n stack.pop()\n else:\n stack.append(c)\n \n return \"\".join(stack)\nprint(makeGood3(s))\nprint(makeGood3(s2))\nprint(makeGood3(s3))\n","sub_path":"leet code/String/1544. Make The String Great.py","file_name":"1544. Make The String Great.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"245149372","text":"from model_utils import *\n\nasr_model = LJASR()\n\"\"\"\nload these parameters from configuartion\n\"\"\"\ntrain_csv = '/mnt/sdc5/Work/Tatras/Common_Data/LJ Speech ASR Dataset/time_sorted_audio_list_train.csv'\nbatch_size = 64\nEPOCHS = 100\n\n\n# gather batch information\ntrain_files,nb_batches,total = initialize_batch_generator(train_csv,batch_size)\n\n\n# # train asr model\nfor e in range(EPOCHS):\n e_loss = 0\n e_acc = 0\n batches = batch_generator(train_files,batch_size,total)\n for b in range(nb_batches):\n train_x,train_y,audio_lens,label_lens,start,end = next(batches)\n inputs = {'the_input': train_x, 'the_labels': train_y, 'input_length': audio_lens, 'label_length': label_lens}\n # ctc output\n outputs = {'ctc': np.zeros([len(train_x)])}\n b_loss,b_acc = asr_model.train_step(inputs,outputs,batch_size,\"\")\n e_loss += b_loss\n e_acc += b_acc\n sys.stdout.write(\"\\rBatch %d/%d [%d to %d] Loss %f Acc %f\"%(b+1,nb_batches,start,end,b_loss,b_acc))\n sys.stdout.flush()\n avg_e_loss = e_loss / nb_batches\n avg_e_acc = e_acc / nb_batches\n print(\"\\nEpoch %d/%d Loss %f Acc %f\"%(e+1,EPOCHS,avg_e_loss,avg_e_acc))","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"46203962","text":"\nimport logging\nimport codecs\nimport datetime\n\nlogger=logging.getLogger()\n\nclass monitor(object):\n def process_request(self, request, spider):\n logger.info('monitor process_request:%s', request.url)\n\n def process_response(self, request, response, spider):\n logger.info('monitor process_response:%d',response.status)\n return response\n\n def process_exception(self, request, exception, spider):\n logger.info('monitor process_exception:%s', request.url)\n return request\n\n def _faillog(self, request, errorType, reason, spider):\n with codecs.open('log/faillog.log', 'a', encoding='utf-8') as file:\n file.write(\"%(now)s [%(error)s] %(url)s reason: %(reason)s \\r\\n\" %\n {'now': datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'error': errorType,\n 'url': request.url,\n 'reason': reason})","sub_path":"zhihu/zhihu/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"121401027","text":"from PIL import Image\nimport pytesseract\nimport cv2\nimport os\nimport numpy as np\n\n\ndef get_grayscale(image): return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # noise removal\ndef remove_noise(image): return cv2.medianBlur(image,5) #thresholding\ndef thresholding(image): return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] #dilation\ndef dilate(image): kernel = np.ones((5,5),np.uint8); return cv2.dilate(image, kernel, iterations = 1) #erosion\ndef erode(image): kernel = np.ones((5,5),np.uint8); return cv2.erode(image, kernel, iterations = 1) #opening - erosion followed by dilation\ndef opening(image): kernel = np.ones((5,5),np.uint8); return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel) #canny edge detection\ndef canny(image): return cv2.Canny(image, 100, 200) #skew correction\ndef deskew(image): \n coords = np.column_stack(np.where(image > 0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45: \n angle = -(90 + angle) \n else: \n angle = -angle \n (h, w) = image.shape[:2]\n center = (w // 2, h // 2) \n M = cv2.getRotationMatrix2D(center, angle, 1.0) \n rotated = cv2.warpAffine(image, M, (w, h), \n flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE) \n return rotated #template matching\ndef match_template(image, template): return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)\n\ndef predict(filename):\n image = cv2.imread(filename)\n gray = get_grayscale(image)\n thresh = remove_noise(gray)\n\n filename = \"{}.png\".format(os.getpid())\n cv2.imwrite(filename, thresh)\n\n pytesseract.pytesseract.tesseract_cmd = r'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\n custom_config = r'--oem 3 --psm 6 outputbase digits -c tessedit_char_whitelist=-0123456789'\n text = pytesseract.image_to_string(Image.open(filename), lang=\"digits_comma\", config=custom_config)\n os.remove(filename)\n return text\n\n\nimport cv2\nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nimport math\nimport imutils\n\n\ndef put_label(t_img,label,x,y):\n font = cv2.FONT_HERSHEY_SIMPLEX\n l_x = int(x) - 10\n l_y = int(y) + 10\n cv2.rectangle(t_img,(l_x,l_y+5),(l_x+35,l_y-35),(0,255,0),-1) \n cv2.putText(t_img,str(label),(l_x,l_y), font,1.5,(255,0,0),1,cv2.LINE_AA)\n return t_img\n\ndef image_refiner(gray):\n org_size = 22\n img_size = 28\n rows,cols = gray.shape\n \n if rows > cols:\n factor = org_size/rows\n rows = org_size\n cols = int(round(cols*factor)) \n else:\n factor = org_size/cols\n cols = org_size\n rows = int(round(rows*factor))\n gray = cv2.resize(gray, (cols, rows))\n\n colsPadding = (int(math.ceil((img_size-cols)/2.0)),int(math.floor((img_size-cols)/2.0)))\n rowsPadding = (int(math.ceil((img_size-rows)/2.0)),int(math.floor((img_size-rows)/2.0)))\n gray = np.lib.pad(gray,(rowsPadding,colsPadding),'constant')\n return gray\n\ndef sort_contours(cnts, method=\"left-to-right\"):\n # initialize the reverse flag and sort index\n reverse = False\n i = 0\n\n # handle if we need to sort in reverse\n if method == \"right-to-left\" or method == \"bottom-to-top\":\n reverse = True\n\n # handle if we are sorting against the y-coordinate rather than\n # the x-coordinate of the bounding box\n if method == \"top-to-bottom\" or method == \"bottom-to-top\":\n i = 1\n\n # construct the list of bounding boxes and sort them from top to\n # bottom\n boundingBoxes = [cv2.boundingRect(c) for c in cnts]\n (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),\n key=lambda b:b[1][i], reverse=reverse))\n\n # return the list of sorted contours and bounding boxes\n return (cnts, boundingBoxes)\n\ndef get_output_image(path):\n img = cv2.imread(path,2)\n img_org = cv2.imread(path)\n\n ret,thresh = cv2.threshold(img,127,255,0)\n contours,hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n res = []\n cnt_dict = dict()\n for j,cnt in enumerate(contours):\n epsilon = 0.01*cv2.arcLength(cnt,True)\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n hull = cv2.convexHull(cnt)\n k = cv2.isContourConvex(cnt)\n x,y,w,h = cv2.boundingRect(cnt)\n if(hierarchy[0][j][3]!=-1 and w>10 and h>10):\n cv2.rectangle(img_org,(x,y),(x+w,y+h),(0,255,0),2)\n roi = img[y:y+h, x:x+w]\n roi = cv2.bitwise_not(roi)\n #th,fnl = cv2.threshold(roi,127,255,cv2.THRESH_BINARY)\n cv2.imwrite('temp.png', roi)\n pred = predict('temp.png')[0]\n res.append(pred)\n (x,y),radius = cv2.minEnclosingCircle(cnt)\n\n return img_org, res\n","sub_path":"server/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"137697982","text":"import time, pygame, sys, random\r\nfrom pygame.locals import *\r\n\r\nFPS = 50\r\nwindowWidth = 780\r\nwindowHeight = 640\r\nboxSize = 20\r\nboardWidth = 10\r\nboardHeight = 25\r\nblank = \".\"\r\n\r\nmoveSideWaysFreq = 0.15\r\nmoveDownFreq = 0.1\r\n\r\nxMargin = int((windowWidth - boardWidth * boxSize)/2)\r\ntopMargin = windowHeight - (boardHeight * boxSize) - 14\r\n\r\nwhite = (255, 255, 255)\r\ngray = (185, 185, 185)\r\nblack = (0, 0, 0)\r\nred = (104, 0, 0)\r\nlightRed = (175, 20, 20)\r\ngreen = (0, 145, 0)\r\nlightGreen = (20, 175, 20)\r\nblue = (0, 0, 145)\r\nlightBlue = (20, 20, 175)\r\nyellow = (155, 155, 0)\r\nlightYellow = (175, 175, 20)\r\npurple = (76, 0, 153)\r\n\r\nborderColor = gray\r\nbgColor = black\r\ntextColor = white\r\ntextShadowColor = gray\r\nlightColors = (blue, green, red, yellow)\r\ncolors = (lightBlue, lightGreen, lightRed, lightYellow)\r\nassert len(colors) == len(lightColors)\r\n\r\ntemplateWidth = 5\r\ntemplateHeight = 5\r\n\r\nS_SHAPE_TEMPLATE = [['.....',\r\n '.....',\r\n '..OO.',\r\n '.OO..',\r\n '.....'],\r\n ['.....',\r\n '..O..',\r\n '..OO.',\r\n '...O.',\r\n '.....']]\r\n\r\nZ_SHAPE_TEMPLATE = [['.....',\r\n '.....',\r\n '.OO..',\r\n '..OO.',\r\n '.....'],\r\n ['.....',\r\n '..O..',\r\n '.OO..',\r\n '.O...',\r\n '.....']]\r\n\r\nI_SHAPE_TEMPLATE = [['..O..',\r\n '..O..',\r\n '..O..',\r\n '..O..',\r\n '.....'],\r\n ['.....',\r\n '.....',\r\n 'OOOO.',\r\n '.....',\r\n '.....']]\r\n\r\nO_SHAPE_TEMPLATE = [['.....',\r\n '.....',\r\n '.OO..',\r\n '.OO..',\r\n '.....']]\r\n\r\nJ_SHAPE_TEMPLATE = [['.....',\r\n '.O...',\r\n '.OOO.',\r\n '.....',\r\n '.....'],\r\n ['.....',\r\n '..OO.',\r\n '..O..',\r\n '..O..',\r\n '.....'],\r\n ['.....',\r\n '.....',\r\n '.OOO.',\r\n '...O.',\r\n '.....'],\r\n ['.....',\r\n '..O..',\r\n '..O..',\r\n '.OO..',\r\n '.....']]\r\n\r\nL_SHAPE_TEMPLATE = [['.....',\r\n '...O.',\r\n '.OOO.',\r\n '.....',\r\n '.....'],\r\n ['.....',\r\n '..O..',\r\n '..O..',\r\n '..OO.',\r\n '.....'],\r\n ['.....',\r\n '.....',\r\n '.OOO.',\r\n '.O...',\r\n '.....'],\r\n ['.....',\r\n '.OO..',\r\n '..O..',\r\n '..O..',\r\n '.....']]\r\n\r\nT_SHAPE_TEMPLATE = [['.....',\r\n '..O..',\r\n '.OOO.',\r\n '.....',\r\n '.....'],\r\n ['.....',\r\n '..O..',\r\n '..OO.',\r\n '..O..',\r\n '.....'],\r\n ['.....',\r\n '.....',\r\n '.OOO.',\r\n '..O..',\r\n '.....'],\r\n ['.....',\r\n '..O..',\r\n '.OO..',\r\n '..O..',\r\n '.....']]\r\n\r\nshapes = {\"S\": S_SHAPE_TEMPLATE,\r\n \"Z\": Z_SHAPE_TEMPLATE,\r\n \"J\": J_SHAPE_TEMPLATE,\r\n \"L\": L_SHAPE_TEMPLATE,\r\n \"I\": I_SHAPE_TEMPLATE,\r\n \"O\": O_SHAPE_TEMPLATE,\r\n \"T\": T_SHAPE_TEMPLATE\r\n }\r\n\r\n\r\ndef main():\r\n global fpsClock, displaysurf, basicFont, bigFont\r\n pygame.init()\r\n fpsClock = pygame.time.Clock()\r\n displaysurf = pygame.display.set_mode((windowWidth, windowHeight))\r\n basicFont = pygame.font.Font(\"freesansbold.ttf\", 16)\r\n bigFont = pygame.font.Font(\"freesansbold.ttf\", 50)\r\n pygame.display.set_caption(\"Tetromino: A Tetris Clone\")\r\n\r\n showTextScreen(\"Tetrimono\")\r\n while True:\r\n if random.randint(0, 1) == 0:\r\n pygame.mixer.music.load(\"sound/tetrisb.mid\")\r\n else:\r\n pygame.mixer.music.load(\"sound/tetrisc.mid\")\r\n pygame.mixer.music.play(-1, 0.0)\r\n runGame()\r\n pygame.mixer.music.stop()\r\n showTextScreen(\"Game Over\")\r\n\r\n\r\ndef runGame():\r\n\r\n # variables for the start of the game\r\n board = getBlankBoard()\r\n lastMoveDownTime = time.time()\r\n lastMoveSidewaysTime = time.time()\r\n lastFallTime = time.time()\r\n\r\n movingDown = False\r\n movingLeft = False\r\n movingRight = False\r\n score = 0\r\n level, fallFreq = calculateLevelAndFallFreq(score)\r\n\r\n fallingPiece = getNewPiece()\r\n nextPiece = getNewPiece()\r\n\r\n while True:\r\n if fallingPiece == None:\r\n fallingPiece = nextPiece\r\n nextPiece = getNewPiece()\r\n lastFallTime = time.time()\r\n\r\n if not isValidPosition(board, fallingPiece):\r\n return\r\n\r\n checkForQuit()\r\n for event in pygame.event.get():\r\n if event.type == KEYUP:\r\n if event.key == K_p:\r\n # pause game\r\n displaysurf.fill(bgColor)\r\n pygame.mixer.music.stop()\r\n showTextScreen(\"Paused\")\r\n\r\n pygame.mixer.music.play(-1, 0.0)\r\n lastFallTime = time.time()\r\n lastMoveDownTime = time.time()\r\n lastMoveSidewaysTime = time.time()\r\n\r\n elif event.key == K_LEFT or event.key == K_a:\r\n movingLeft = False\r\n elif event.key == K_RIGHT or event.key == K_d:\r\n movingRight = False\r\n elif event.key == K_DOWN or event.key == K_s:\r\n movingDown = False\r\n\r\n # Moving the block sideways\r\n elif event.type == KEYDOWN:\r\n if (event.key == K_LEFT or event.key == K_a) and isValidPosition(board, fallingPiece, adjX = -1):\r\n movingLeft = True\r\n movingRight = False\r\n lastMoveSidewaysTime = time.time()\r\n\r\n elif (event.key == K_RIGHT or event.key == K_d) and isValidPosition(board, fallingPiece, adjX=1):\r\n fallingPiece[\"x\"] += 1\r\n movingRight = True\r\n movingLeft = False\r\n lastMoveSidewaysTime = time.time()\r\n\r\n # Rotating the block only if there is room\r\n elif (event.key == K_UP or event.key == K_w):\r\n fallingPiece[\"rotation\"] = (fallingPiece[\"rotation\"] + 1) % len(shapes[fallingPiece[\"shape\"]])\r\n if not isValidPosition(board, fallingPiece):\r\n fallingPiece[\"rotation\"] = (fallingPiece[\"rotation\"] - 1) % len(shapes[fallingPiece[\"shape\"]])\r\n\r\n # Rotate the other\r\n elif (event.key == K_q):\r\n fallingPiece[\"rotation\"] = (fallingPiece[\"rotation\"] - 1) % len(shapes[fallingPiece[\"shape\"]])\r\n if not isValidPosition(board, fallingPiece):\r\n fallingPiece[\"rotation\"] = (fallingPiece[\"rotation\"] + 1) % len(shapes[fallingPiece[\"shape\"]])\r\n\r\n # increase speed that block is falling with down_key\r\n elif (event.key == K_DOWN or event.key == K_s):\r\n movingDown = True\r\n\r\n if isValidPosition(board, fallingPiece, adjY=1):\r\n fallingPiece[\"y\"] += 1\r\n lastMoveDownTime = time.time()\r\n\r\n # Move current block all the way down\r\n elif event.key == K_SPACE:\r\n movingDown = False\r\n movingLeft = False\r\n movingRight = False\r\n for i in range(1, boardHeight):\r\n if not isValidPosition(board, fallingPiece, adjY=i):\r\n break\r\n fallingPiece[\"y\"] += i - 1\r\n\r\n # Handle moving the block because of user input\r\n if (movingLeft or movingRight) and time.time() - lastMoveSidewaysTime > moveSideWaysFreq:\r\n if movingLeft and isValidPosition(board, fallingPiece, adjX=-1):\r\n fallingPiece[\"x\"] -= 1\r\n elif movingRight and isValidPosition(board, fallingPiece, adjX=1):\r\n fallingPiece[\"x\"] += 1\r\n lastMoveSidewaysTime = time.time()\r\n\r\n if movingDown and time.time() - lastMoveDownTime > moveDownFreq and isValidPosition(board, fallingPiece, adjY=1):\r\n fallingPiece[\"y\"] += 1\r\n lastMoveDownTime = time.time()\r\n\r\n # let the piece fall if it is time to fall\r\n if time.time() - lastFallTime > fallFreq:\r\n # see if piece has landed\r\n if not isValidPosition(board, fallingPiece, adjY=1):\r\n # falling piece has landed, place on board\r\n addToBoard(board, fallingPiece)\r\n score += removeCompleteLines(board)\r\n level, fallFreq = calculateLevelAndFallFreq(score)\r\n fallingPiece = None\r\n else:\r\n # piece did not land, continue to move block down\r\n fallingPiece[\"y\"] += 1\r\n lastFallTime = time.time()\r\n\r\n # Output on the screen\r\n displaysurf.fill(bgColor)\r\n drawBoard(board)\r\n drawStatus(score, level)\r\n drawNextPiece(nextPiece)\r\n if fallingPiece != None:\r\n drawPiece(fallingPiece)\r\n\r\n pygame.display.update()\r\n fpsClock.tick(FPS)\r\n\r\n\r\ndef makeTextObjs(text, font, obj_color):\r\n surf = font.render(text, True, obj_color)\r\n return surf, surf.get_rect()\r\n\r\n\r\ndef terminate():\r\n pygame.quit()\r\n sys.exit()\r\n\r\n\r\ndef checkForKeyPress():\r\n checkForQuit()\r\n\r\n for event in pygame.event.get([KEYDOWN, KEYUP]):\r\n if event.type == KEYDOWN:\r\n continue\r\n return event.key\r\n return None\r\n\r\n\r\ndef showTextScreen(text):\r\n # draw shadow\r\n titleSurf, titleRect = makeTextObjs(text, bigFont, red)\r\n titleRect.center = (int(windowWidth/2), int(windowHeight/2))\r\n displaysurf.blit(titleSurf, titleRect)\r\n\r\n # draw text\r\n titleSurf, titleRect = makeTextObjs(text, bigFont, textColor)\r\n titleRect.center = (int(windowWidth/2) - 3, int(windowHeight/2) - 3)\r\n\r\n displaysurf.blit(titleSurf, titleRect)\r\n\r\n pressKeySurf, pressKeyRect = makeTextObjs(\"Press a key to begin!\", basicFont, textColor)\r\n pressKeyRect.center = (int(windowWidth / 2), int(windowHeight/2) + 100)\r\n displaysurf.blit(pressKeySurf, pressKeyRect)\r\n\r\n while checkForKeyPress() == None:\r\n pygame.display.update()\r\n fpsClock.tick()\r\n\r\n\r\ndef checkForQuit():\r\n for event in pygame.event.get(QUIT):\r\n terminate()\r\n for event in pygame.event.get(KEYUP):\r\n if event.key == K_ESCAPE:\r\n terminate()\r\n\r\n pygame.event.post(event)\r\n\r\n\r\n# return the level the player is on and how many seconds has passed until piece falls\r\ndef calculateLevelAndFallFreq(score):\r\n level = int(score / 10) + 1\r\n fallFreq = 0.27 - (level * 0.02)\r\n return level, fallFreq\r\n\r\n\r\n# return random piece\r\ndef getNewPiece():\r\n shape = random.choice(list(shapes.keys()))\r\n newPiece = {\"shape\": shape,\r\n \"rotation\": random.randint(0, len(shapes[shape]) - 1),\r\n \"x\": int(boardWidth/2) - int(templateWidth/2),\r\n \"y\": -2, # start at the top of the board\r\n \"color\": random.randint(0, len(colors) - 1) }\r\n return newPiece\r\n\r\n\r\n# fill board based on piece's location, shape, and rotation\r\ndef addToBoard(board, piece):\r\n for x in range(templateWidth):\r\n for y in range(templateHeight):\r\n if shapes[piece[\"shape\"]][piece[\"rotation\"]][y][x] != blank:\r\n board[x + piece[\"x\"]][y + piece[\"y\"]] = piece[\"color\"]\r\n\r\n\r\ndef getBlankBoard():\r\n board = []\r\n for i in range(boardWidth):\r\n board.append([blank] * boardHeight)\r\n return board\r\n\r\n\r\ndef isOnBoard(x, y):\r\n return x >= 0 and x < boardWidth and y < boardHeight\r\n\r\n\r\ndef isValidPosition(board, piece, adjX=0, adjY=0):\r\n for x in range(templateWidth):\r\n for y in range(templateHeight):\r\n isAboveBoard = y + piece[\"y\"] + adjY < 0\r\n if isAboveBoard or shapes[piece[\"shape\"]][piece[\"rotation\"]][y][x] == blank:\r\n continue\r\n if not isOnBoard(x + piece[\"x\"] + adjX, y + piece[\"y\"] + adjY):\r\n return False\r\n\r\n if board[x + piece[\"x\"] + adjX][y + piece[\"y\"] + adjY] != blank:\r\n return False\r\n\r\n return True\r\n\r\n\r\n# Line is filled with no gaps\r\ndef isCompleteLine(board, y):\r\n for x in range(boardWidth):\r\n if board[x][y] == blank:\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef removeCompleteLines(board):\r\n numLinesRemoved = 0\r\n y = boardHeight - 1 # start at the bottom of the board\r\n while y >= 0:\r\n if isCompleteLine(board, y):\r\n # remove line and pull down boxes\r\n for pullDownY in range(y, 0, -1):\r\n for x in range(boardWidth):\r\n board[x][pullDownY] = board[x][pullDownY-1]\r\n\r\n # set very top line to blank\r\n for x in range(boardWidth):\r\n board[x][0] = blank\r\n numLinesRemoved += 1\r\n\r\n else:\r\n y -= 1 # Check next row above\r\n return numLinesRemoved\r\n\r\n\r\n# convert coordinates of the board to coordinates of the lation on the screen\r\ndef convertToPixelCoords(box_x, box_y):\r\n return (xMargin + (box_x * boxSize)), (topMargin + (box_y * boxSize))\r\n\r\n\r\n# draw single boxes to create the piece\r\ndef drawBox(box_x, box_y, color, pixelx=None, pixely=None):\r\n if color == blank:\r\n return\r\n if pixelx == None and pixely == None:\r\n pixelx, pixely = convertToPixelCoords(box_x, box_y)\r\n\r\n pygame.draw.rect(displaysurf, colors[color], (pixelx + 1, pixely + 1, boxSize - 4, boxSize - 4))\r\n\r\n\r\n# draw border around board\r\ndef drawBoard(board):\r\n pygame.draw.rect(displaysurf, borderColor, (xMargin - 3, topMargin - 7, (boardWidth * boxSize) + 8, (boardHeight * boxSize) + 8), 5)\r\n\r\n # fill background\r\n pygame.draw.rect(displaysurf, bgColor, (xMargin, topMargin, boxSize * boardWidth, boxSize * boardHeight))\r\n\r\n # draw the individual boxes\r\n for x in range(boardWidth):\r\n for y in range(boardHeight):\r\n drawBox(x, y, board[x][y])\r\n\r\n\r\n# Score and level indicator\r\ndef drawStatus(score, level):\r\n scoreSurf = basicFont.render(\"Score: %s\" % score, True, textColor)\r\n scoreRect = scoreSurf.get_rect()\r\n scoreRect.topleft = (windowWidth - 150, 20)\r\n displaysurf.blit(scoreSurf, scoreRect)\r\n\r\n # draw the level text\r\n levelSurf = basicFont.render(\"Level: %s\" % level, True, textColor)\r\n levelRect = levelSurf.get_rect()\r\n levelRect.topleft = (windowWidth - 150, 50)\r\n displaysurf.blit(levelSurf, levelRect)\r\n\r\n\r\ndef drawPiece(piece, pixelx=None, pixely=None):\r\n shapeToDraw = shapes[piece[\"shape\"]][piece[\"rotation\"]]\r\n if pixelx == None and pixely == None:\r\n pixelx, pixely = convertToPixelCoords(piece[\"x\"], piece[\"y\"])\r\n\r\n # draw each of the blocks that create the piece\r\n for x in range(templateWidth):\r\n for y in range(templateHeight):\r\n if shapeToDraw[y][x] != blank:\r\n drawBox(None, None, piece[\"color\"], pixelx + (x * boxSize), pixely + (y * boxSize))\r\n\r\n\r\n# draw the \"next\" text\r\ndef drawNextPiece(piece):\r\n nextSurf = basicFont.render(\"Next: \", True, textColor)\r\n nextRect = nextSurf.get_rect()\r\n nextRect.topleft = (windowWidth - 120, 80)\r\n displaysurf.blit(nextSurf, nextRect)\r\n\r\n # draw the \"next\" piece\r\n drawPiece(piece, pixelx=windowWidth-120, pixely=100)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Ch7- Tetromino/Tetris.py","file_name":"Tetris.py","file_ext":"py","file_size_in_byte":16476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"466172568","text":"# Broadlink RM2 Python Plugin for Domoticz\n#\n# Dev. Platform : Win10 x64 & Py 3.5.3 x86\n#\n# Author: zak45, 2017\n#\n\n# Below is what will be displayed in Domoticz GUI under HW\n#\n\"\"\"\n<plugin key=\"BroadlinkRM2\" name=\"Broadlink RM2\" author=\"zak45\" version=\"1.0.0\" wikilink=\"http://www.domoticz.com/wiki/plugins/BroadlinkRM2.html\" externallink=\"https://github.com/mjg59/python-broadlink\">\n <params>\n <param field=\"Address\" label=\"IP Address\" width=\"200px\" required=\"true\" default=\"127.0.0.1\"/>\n <param field=\"Mode1\" label=\"Mac\" width=\"100px\" required=\"true\" default=\"000000000000\"/>\n <param field=\"Mode2\" label=\"Folder to store ini files\" width=\"300px\" required=\"true\" default=\"C:\\\\BroadlinkRM2\"/>\n <param field=\"Mode3\" label=\"Get Temperature Device\" width=\"75px\">\n <options> \n <option label= \"False\" value=\"no\"/>\n <option label= \"True\" value=\"yes\" default=\"True\"/>\n </options>\n </param>\n <param field=\"Mode6\" label=\"Debug\" width=\"75px\">\n <options>\n <option label=\"True\" value=\"Debug\"/>\n <option label=\"False\" value=\"Normal\" default=\"True\" />\n </options>\n </param>\n </params>\n</plugin>\n\"\"\"\n#\n# Main Import\nimport Domoticz\nimport configparser\nimport datetime\nimport time\nimport codecs\n#\n#\n# Required to import path is OS dependent\n# Python framework in Domoticz do not include OS dependent path\n#\nimport sys\nimport os \n\nif sys.platform.startswith('linux'):\n # linux specific code here\n sys.path.append(os.path.dirname(os.__file__) + '/dist-packages')\nelif sys.platform.startswith('darwin'):\n # mac\n sys.path.append(os.path.dirname(os.__file__) + '/site-packages')\nelif sys.platform.startswith('win32'):\n # win specific\n sys.path.append(os.path.dirname(os.__file__) + '\\site-packages')\n\n#\nimport broadlink\n\n#\nisConnected = False\nnumberDev = 2\nbypass = False\ntemp = 0\nlearnedCommand = \"None\"\nsendCommand = \"\"\nloadedCommand = \"\"\nnbUpdate = 1\n\n# Domoticz call back functions\n#\n\n# Executed once at HW creation/ update. Can create up to 255 devices.\ndef onStart():\n global numberDev, nbUpdate\n\n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Debugging(1)\n if (len(Devices) == 0):\n if Parameters[\"Address\"] == '127.0.0.1' and Parameters[\"Mode1\"] == '000000000000':\n Domoticz.Device(Name=\"Discover\", Unit=1, Type=17, Image=2, Switchtype=17, Used=1).Create()\n \n if ( 1 not in Devices):\n Options = { \"LevelActions\" :\"||||\" , \n \"LevelNames\" :\"Off|Learn|Test|Save|Reset\" ,\n \"LevelOffHidden\":\"true\",\n \"SelectorStyle\" :\"0\"\n } \n Domoticz.Device(Name=\"Command\", Unit=1, TypeName=\"Selector Switch\", Switchtype=18, Image=12, Options=Options, Used=1).Create()\n\n if ( 2 not in Devices and Parameters[\"Mode3\"] == 'yes'):\n Domoticz.Device(Name=\"Temp\", Unit=2, TypeName=\"Temperature\", Used=1).Create()\n\n DumpConfigToLog()\n Domoticz.Heartbeat(30)\n numberDev = len(Devices)\n\n Domoticz.Log(\"Connecting to: \"+Parameters[\"Address\"]+\":\"+Parameters[\"Mode1\"])\n broadlinkConnect()\n UpdateDevice(1, 0, 'Off') \n\n return True\n\ndef onMessage(Data, Status, Extra): \n Domoticz.Log('onMessage: '+str(Data)+\" ,\"+str(Status)+\" ,\"+str(Extra)) \n return True\n\n# executed each time we click on device thru domoticz GUI\ndef onCommand(Unit, Command, Level, Hue):\n global sendCommand\n\n Domoticz.Log(\"onCommand called for Unit \" + str(Unit) + \": Parameter '\" + str(Command) + \"', Level: \" + str(Level) + \" , Connected : \" + str(isConnected))\n \n Command = Command.strip()\n\n if (Command == 'Set Level'):\n if (Unit == 1): # Command selector\n if (Level == 10): \n learn()\n if (Level == 20): \n sendCommand = learnedCommand\n if learnedCommand == \"None\":\n Domoticz.Log('Nothing to send')\n else:\n send() \n if (Level == 30):\n if learnedCommand == \"None\":\n Domoticz.Log('Nothing to save')\n else:\n save()\n if (Level == 40):\n if learnedCommand == \"None\":\n Domoticz.Log('Nothing to reset')\n else:\n reset() \n else:\n Domoticz.Error('Unit unknown')\n\n elif (Command == 'On'):\n\n if (Unit == 1 and Devices[1].Name.endswith(\"Discover\")): # Discovery\n if Discover(): \n UpdateDevice(Unit, 1, 'Found : ' + str(len(brodevices )) + ' device')\n else:\n Domoticz.Error('Not able to find Broadlink device')\n else:\n genCommand(Unit)\n\n elif (Command == 'Off'):\n\n if (Unit == 1 and Devices[1].Name.endswith(\"Discover\")): # Discovery\n UpdateDevice(Unit, 0, 'Off') \n else:\n Domoticz.Error('Unit unknown')\n else:\n Domoticz.Error('Unknown command')\n\n return True\n\ndef onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):\n\n Domoticz.Log(\"Notification: \" + str(Name))\n\n return\n\n# execution depend of Domoticz.Heartbeat(x) x in seconds\ndef onHeartbeat():\n global bypass, isConnected\n \n now = datetime.datetime.now()\n \n if bypass is True: \n bypass = False\n return\n\n if Parameters[\"Mode3\"] == \"yes\":\n\n if ((now.minute % 2) == 0):\n bypass = True\n if isConnected:\n if checkTemp():\n UpdateDevice(2, 1, temp)\n else:\n isConnected = False\n else:\n broadlinkConnect()\n else:\n if (now.minute % 4 == 0):\n broadlinkConnect()\n bypass = True\n\n return True\n\ndef onDisconnect():\n Domoticz.Log(\"onDisconnect called\")\n return\n\n# executed once when HW updated/removed\ndef onStop():\n Domoticz.Log(\"onStop called\")\n return True\n\n# Generic helper functions\ndef DumpConfigToLog():\n for x in Parameters:\n if Parameters[x] != \"\":\n Domoticz.Debug( \"'\" + x + \"':'\" + str(Parameters[x]) + \"'\")\n Domoticz.Debug(\"Device count: \" + str(len(Devices)))\n for x in Devices:\n Domoticz.Debug(\"Device: \" + str(x) + \" - \" + str(Devices[x]))\n Domoticz.Debug(\"Device ID: '\" + str(Devices[x].ID) + \"'\")\n Domoticz.Debug(\"Device Name: '\" + Devices[x].Name + \"'\")\n Domoticz.Debug(\"Device nValue: \" + str(Devices[x].nValue))\n Domoticz.Debug(\"Device sValue: '\" + Devices[x].sValue + \"'\")\n Domoticz.Debug(\"Device LastLevel: \" + str(Devices[x].LastLevel))\n return\n\n# Update Device into DB\ndef UpdateDevice(Unit, nValue, sValue):\n # Make sure that the Domoticz device still exists (they can be deleted) before updating it \n if (Unit in Devices):\n if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue):\n Devices[Unit].Update(nValue=nValue, sValue=str(sValue))\n Domoticz.Log(\"Update \"+str(nValue)+\":'\"+str(sValue)+\"' (\"+Devices[Unit].Name+\")\")\n return\n\ndef genCommand(Unit):\n global loadedCommand, sendCommand, nbUpdate\n \n Domoticz.Log('Generate on Command for learned code stored on unit :' + str(Unit))\n\n path=str(Parameters[\"Mode2\"]) + \"\\\\\" + str(Parameters[\"Key\"]) + \"-\" + str(Parameters[\"HardwareID\"]) + \"-\" + str(Unit) + \".ini\"\n\n if not os.path.exists(path):\n Domoticz.Error(' ini file not found: ' + str(path))\n return\n \n\n config = configparser.ConfigParser()\n config.read(path)\n loadedCommand = config.get(\"LearnedCode\", str(Unit))\n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Log(\" Code loaded : \" + loadedCommand) \n sendCommand = loadedCommand\n if broadlinkConnect():\n send()\n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Log(' <b> Command line : ' + '\"' + Parameters['HomeFolder'] + 'plugin_send.py' + '\" ' + path + ' </b>')\n\n UpdateDevice(Unit,1,'On-'+str(nbUpdate))\n nbUpdate +=1\n\n return\n\n# save learned code and create Domoticz device\ndef save():\n global path, learnedCommand, Unit, numberDev\n\n numberDev +=1\n path=str(Parameters[\"Mode2\"]) + \"\\\\\" + str(Parameters[\"Key\"]) + \"-\" + str(Parameters[\"HardwareID\"]) + \"-\" + str(numberDev) + \".ini\"\n\n if os.path.exists(path):\n Domoticz.Error('File exist : ' + path)\n return False\n else:\n try:\n create_config(path,str(numberDev),learnedCommand)\n except:\n Domoticz.Error('Not able to create : ' + path)\n return False\n try:\n Domoticz.Device(Name=str(Parameters[\"HardwareID\"])+\"-\" + str(numberDev), Unit=numberDev, TypeName=\"Selector Switch\", Type=244, Switchtype=9, Subtype=73).Create()\n except:\n Domoticz.Error('Not able to create device')\n return False\n \n UpdateDevice(1, 0, 'Off') \n learnedCommand = \"None\" \n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Log(\" <b> Command line : \" + Parameters[\"HomeFolder\"] + \"plugin_send.py \" + path + \" </b>\")\n \n return True\n\ndef reset():\n global learnedCommand\n \n UpdateDevice(1, 0, 'Off') \n learnedCommand = \"None\" \n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Log(\"Reset learned command\")\n \n return True\n\n# discover Broadlink device on the Network\ndef Discover():\n global brodevices, broip\n\n Domoticz.Log(\"All plugin system is on pause for 5s...\")\n brodevices = broadlink.discover(timeout=5)\n Domoticz.Log(\"Found \" + str(len(brodevices )) + \" broadlink devices\")\n\n if str(len(brodevices )) == 0:\n return False\n \n for index, item in enumerate(brodevices):\n\n brodevices[index].auth()\n\n broip = brodevices[index].host\n broip = str(broip)\n Domoticz.Log( \"<b>Device \" + str(index + 1) +\" Host address = \" + broip[1:19] + \"</b>\")\n macadd = ''.join(format(x, '02x') for x in brodevices[index].mac[::-1])\n macadd = str(macadd) \n Domoticz.Log( \"<b>Device \" + str(index + 1) +\" MAC address = \" + macadd + \"</b>\")\n\n return True\n\n# Put Broadlink on Learn , packet received converted in Hex\ndef learn():\n global learnedCommand,learnedCommand1,learnedCommand2\n \n broadlinkConnect()\n\n Domoticz.Log(\"All plugin system is on pause for 5s...\")\n Domoticz.Log(\"When Broadlink led is lit press the button on your remote within 5 seconds\")\n \n device.enter_learning()\n\n time.sleep(5) \n\n ir_packet = device.check_data()\n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Log(str(ir_packet))\n \n if str(ir_packet) == \"None\":\n Domoticz.Log('Command not received')\n learnedCommand= \"None\"\n UpdateDevice(1, 0, ' ')\n return False\n\n #learnedCommand=str(ir_packet.hex())\n learnedCommand=str.replace(str.replace(str(codecs.encode(ir_packet, 'hex_codec')),\"b'\",\"\"),\"'\",\"\")\n #learnedCommand2=str(binascii.hexlify(ir_packet))\n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Log(learnedCommand)\n \n Domoticz.Log( \"Code written in memory\" )\n UpdateDevice(1, 1, '10')\n\n return True\n\n# send Hex command\ndef send():\n global sendCommand\n\n if not sendCommand:\n Domoticz.Error('Nothing to send')\n return False\n \n sendCommand = bytes.fromhex(sendCommand)\n #sendCommand=binascii.unhexlify(sendCommand)\n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Log(str(sendCommand))\n\n try:\n device.send_data(sendCommand)\n Domoticz.Log( \"Code Sent....\")\n except:\n Domoticz.Error( \"Code Sent WARNING....Probably timeout\")\n return False\n\n return True\n\n#Create a config file\ndef create_config(path,Unit,learnedCommand):\n \n config = configparser.ConfigParser()\n config['DEFAULT'] = { 'PluginKey' : Parameters[\"Key\"],\n 'PluginName' : Parameters[\"Name\"],\n 'PluginFolder' : Parameters[\"HomeFolder\"],\n 'HardwareID' : Parameters[\"HardwareID\"],\n 'Unit' : Unit\n }\n\n config['Device'] = { 'Host' : Parameters[\"Address\"],\n 'Mac' : Parameters[\"Mode1\"]\n }\n config['LearnedCode'] = {} \n UniteCode = config['LearnedCode']\n UniteCode[str(Unit)] = learnedCommand\n try:\n with open(path, 'w') as configfile:\n config.write(configfile)\n except IOError:\n Domoticz.Error('Error create config file')\n \n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Log( \"ini file creation....\" + path) \n\n return\n\n# connect to Broadlink\ndef broadlinkConnect():\n global device, isConnected\n\n try:\n device = broadlink.rm(host=(Parameters[\"Address\"],80), mac=bytearray.fromhex(Parameters[\"Mode1\"]))\n device.auth()\n device.host\n isConnected = True\n Domoticz.Log( \"Connected to Broadlink device.\") \n except:\n Domoticz.Error( \"Error Connecting to Broadlink device....\")\n isConnected = False\n return False\n\n return True\n\n# get temperature\ndef checkTemp():\n global temp, device\n\n try:\n temp=device.check_temperature()\n except:\n Domoticz.Error( \"Error getting temperature data from Broadlink device....Timeout\")\n return False\n\n if temp > 60:\n return False\n\n return True","sub_path":"BroadlinkRM2/plugin-v1-0-0.py","file_name":"plugin-v1-0-0.py","file_ext":"py","file_size_in_byte":13648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"622368957","text":"##############################################################################\n# Programming Excercise 10\n#\n# Find the square root of X using Newton-Raphson method\n#############################################################################\n\n# epsilon represents our error tolerance. We'll\n# accept any answer Y where Y squared is within epsilon\n# of X\nepsilon = 0.001\n\n# we'll also keep track of how many turns around the loop\n# this takes, so we can compare against other techniques.\nnum_iterations = 0\n\nx = float(input(\"Please enter a real number: \"))\nans = x/2.0\n\nwhile abs(ans**2 - x) > epsilon:\n ans = ans - (((ans**2) - x)/(2*ans))\n num_iterations += 1\n\nprint (\"Completed in\", num_iterations, \" iterations\")\nif abs(ans**2 - x ) > epsilon:\n print(\"Newton-Raphson could not determine the square root of\", x)\nelse:\n print(\"The square root of\", x, \"is\", ans, \"!\")","sub_path":"source/courses/cmps367/exercises/pe10/pe10-newton.py","file_name":"pe10-newton.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"455817313","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nLICENCES = [\n\t'AGPL',\n\t'Apache2',\n\t'BSD-0C',\n\t'BSD-1C',\n\t'BSD-2C',\n\t'BSD-3C',\n\t'BSD-4C',\n\t'BSD-OG',\n\t'CC-BY-30',\n\t'CC-BY-40',\n\t'CC-BY-NC-30',\n\t'CC-BY-NC-40',\n\t'CC-BY-NC-ND-30',\n\t'CC-BY-NC-ND-40',\n\t'CC-BY-NC-SA-30',\n\t'CC-BY-NC-SA-40',\n\t'CC-BY-ND-30',\n\t'CC-BY-ND-40',\n\t'CC-BY-SA-30',\n\t'CC-BY-SA-40',\n\t'FDL',\n\t'GPL2',\n\t'LGPL21',\n\t'MPL2'\n]\n\nLICENCE_NAMES = [\n\t'GNU Affero GPLv3',\n\t'Apache 2.0',\n\t'BSD-0-Clause',\n\t'BSD-1-Clause',\n\t'BSD-2-Clause',\n\t'BSD-3-Clause',\n\t'BSD-4-Clause',\n\t'Old BSD License',\n\t'Creative Commons BY 3.0',\n\t'Creative Commons BY 4.0',\n\t'Creative Commons BY-NC 3.0',\n\t'Creative Commons BY-NC 4.0',\n\t'Creative Commons BY-NC-ND 3.0',\n\t'Creative Commons BY-NC-ND 4.0',\n\t'Creative Commons BY-NC-SA 3.0',\n\t'Creative Commons BY-NC-SA 4.0',\n\t'Creative Commons BY-ND 3.0',\n\t'Creative Commons BY-ND 4.0',\n\t'Creative Commons BY-SA 3.0',\n\t'Creative Commons BY-SA 4.0',\n\t'GNU Free Documentation License.',\n\t'GNU General Public License v2',\n\t'GNU Lesser GPL v2.1',\n\t'Mozilla Public License 2.0'\n]\n\nfrom sys import stdin, stdout\nfrom os import getcwd, path\nfrom subprocess import call\n\ndef mkbplate(title, copy_years, org, licnum, sh):\n\tlhs = ' *'\n\trhs = '*'\n\tif sh:\n\t\tlhs = '##'\n\t\trhs = '##'\n\tret = ''\n\tif sh:\n\t\tret += ('#' * 78) + '\\n'\n\telse:\n\t\tret += '/' + ('*' * 76) + '\\\\\\n'\n\ttitle_len = len(title)\n\tif title_len % 2:\n\t\ttitle += '\\u2122'\n\t\ttitle_len += 1\n\tspaces = ' ' * ((74 - title_len) // 2)\n\tret += lhs + spaces + title + spaces + rhs + '\\n'\n\tret += lhs + (' ' * 74) + rhs + '\\n'\n\tcopy = 'Copyright © ' + copy_years + ' ' + org\n\tif len(copy_years) % 2:\n\t\tcopy += '.'\n\tspaces = ' ' * ((74 - len(copy)) // 2)\n\tret += lhs + spaces + copy + spaces + rhs + '\\n'\n\tlic = 'Released under ' + LICENCE_NAMES[licnum - 1]\n\tif len(lic) % 2:\n\t\tlic += '.'\n\tspaces = ' ' * ((74 - len(lic)) // 2)\n\tret += lhs + spaces + lic + spaces + rhs + '\\n'\n\tif sh:\n\t\tret += '#' * 78\n\telse:\n\t\tret += '\\\\' + ('*' * 76) + '/'\n\treturn ret\n\ndef printbplate(title, copy_years, org, licnum):\n\treturn 'This file contains the project’s copypastable boilerplate comment headers.\\n\\nBoilerplate for C-like languages:\\n\\n' + \\\n\t\tmkbplate(title, copy_years, org, licnum, False) + \\\n\t\t'\\n\\nHash-based boilerplate (Python, POSIX shell, Makefile):\\n\\n' + \\\n\t\tmkbplate(title, copy_years, org, licnum, True) + '\\n'\n\ndef strlicq(lics):\n\tret = 'Choose a licence:\\n'\n\ti = 0\n\tlics_len = len(lics)\n\twhile i < lics_len:\n\t\tret += str(i + 1) if i >= 9 else ' ' + str(i + 1)\n\t\tret += '. ' + lics[i] + '\\n'\n\t\ti += 1\n\treturn ret\n\ndef mkdir(p):\n\tfrom os import mkdir as mkdir_\n\t#from os import FileExistsError\n\ttry:\n\t\tmkdir_(p)\n\texcept FileExistsError as e:\n\t\tpass\n\nslickdir = path.join(path.dirname(path.realpath(__file__)), '..')\n\ndef pause():\n\tinput('\\n')\n\ndef yesno(msg):\n\tresp = 0\n\tfirs = True\n\twhile resp != 'y' and resp != 'n':\n\t\tif not firs:\n\t\t\tstdout.write('\\nInvalid value \\u2018' + resp + '\\u2019\\n')\n\t\tstdout.write(msg + ' ')\n\t\tstdout.flush()\n\t\tresp = stdin.read(1)\n\t\tfirs = False\n\tstdout.write('\\n')\n\treturn True if resp == 'y' else False\n\ndef multich(msg, opt_ct):\n\tnum = 0\n\tfirs = True\n\twhile num > opt_ct or num == 0:\n\t\tif not firs:\n\t\t\tstdout.write('\\nInvalid value \\u2018' + str(num) + '\\u2019\\n')\n\t\tstdout.write(msg + ' ')\n\t\tstdout.flush()\n\t\tnum = int(input())\n\t\tfirs = False\n\treturn num\n\ndef prompt(msg):\n\tstdout.write(msg + ' ')\n\tstdout.flush()\n\treturn stdin.readline()\n\ndef main(args):\n\t# get information from user\n\ttitle = prompt('What is the name of the project?')[:-1]\n\torg = prompt('Who is the author or organisation?')[:-1]\n\tn = multich('Is this for a library (1) or program (2)?', 2)\n\tmakefile = 'Makefile.library' if n == 1 else 'Makefile.program'\n\tgitinit = False\n\tlicnum = None\n\tcwd = getcwd()\n\tif not path.exists(path.join(cwd, '.git')):\n\t\tif yesno('Initialise a git repository?'):\n\t\t\tgitinit = True\n\tif yesno('Add a licence?'):\n\t\tlicnum = multich(strlicq(LICENCES), len(LICENCES))\n\t\tlic = path.join('COPYING.' + LICENCES[licnum - 1])\n\tstdout.write('Ready to commit. Press any key to continue. ')\n\tpause()\n\tstdout.write('\\n')\n\tfrom shutil import copyfile\n\tcopyfile(path.join(slickdir, 'src', makefile), path.join(cwd, 'Makefile'))\n\tf = open(path.join(cwd, 'Makefile'), 'r')\n\ttmp = f.read()\n\tf.close()\n\ttmp = tmp.replace('@BOILERPLATE@',\n\t\tmkbplate(title, str(2020), org, licnum, True))\n\ttmp = tmp.replace('@TITLE@', title)\n\tprint('=====')\n\tprint(tmp)\n\tprint('=====')\n\tf = open(path.join(cwd, 'Makefile'), 'w')\n\tf.write(tmp)\n\tf.flush()\n\tf.close()\n\tif lic:\n\t\tcopyfile(path.join(slickdir, 'src', 'COPYING.' + LICENCES[licnum - 1]),\n\t\tpath.join(cwd, 'COPYING'))\n\tcopyfile(path.join(slickdir, 'src', 'gitattributes'),\n\t\tpath.join(cwd, '.gitattributes'))\n\tcopyfile(path.join(slickdir, 'src', 'gitignore'),\n\t\tpath.join(cwd, '.gitignore'))\n\tmkdir(path.join(cwd, 'doc'))\n\tmkdir(path.join(cwd, 'etc'))\n\tmkdir(path.join(cwd, 'data'))\n\tmkdir(path.join(cwd, 'util'))\n\tmkdir(path.join(cwd, 'src'))\n\tmkdir(path.join(cwd, 'include'))\n\tf = open(path.join(cwd, 'etc', 'BOILERPLATE'), 'w')\n\tf.write(printbplate(title, str(2020), org, licnum))\n\tf.flush()\n\tf.close()\n\tif gitinit:\n\t\tcall(['git', 'init'])\n\tprint('All done. Exiting...')\n\treturn 0\n\nif __name__ == '__main__':\n\tfrom sys import argv, exit\n\texit(main(argv))\n","sub_path":"util/initrepo.py","file_name":"initrepo.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"201888893","text":"# Standardize a crashes CSV into compatible JSON document.\n# Author terryf82 https://github.com/terryf82\n\nimport argparse\nimport os\nimport pandas as pd\nimport yaml\nfrom collections import OrderedDict\nimport csv\nimport calendar\nimport random\nfrom .standardization_util import parse_date, validate_and_write_schema\n\nCURR_FP = os.path.dirname(\n os.path.abspath(__file__))\nBASE_FP = os.path.dirname(os.path.dirname(CURR_FP))\n\n\ndef read_standardized_fields(raw_crashes, fields, opt_fields):\n\n crashes = {}\n\n for i, crash in enumerate(raw_crashes):\n if i % 10000 == 0:\n print(i)\n \n # skip any crashes that don't have coordinates\n if crash[fields[\"latitude\"]] == \"\" or crash[fields[\"longitude\"]] == \"\":\n continue\n \n # construct crash date based on config settings, skipping any crashes without date\n if fields[\"date_complete\"]:\n if not crash[fields[\"date_complete\"]]:\n continue\n \n else:\n crash_date = crash[fields[\"date_complete\"]]\n \n elif fields[\"date_year\"] and fields[\"date_month\"]:\n if fields[\"date_day\"]:\n crash_date = str(crash[fields[\"date_year\"]]) + \"-\" + str(crash[fields[\"date_month\"]]) + \"-\" + crash[fields[\"date_day\"]]\n # some cities do not supply a day of month for crashes, randomize if so\n else:\n available_dates = calendar.Calendar().itermonthdates(\n crash[fields[\"date_year\"]], crash[fields[\"date_month\"]])\n crash_date = str(random.choice([date for date in available_dates if date.month == crash[fields[\"date_month\"]]]))\n \n # skip any crashes that don't have a date\n else:\n continue\n\n crash_time = None\n if fields[\"time\"]:\n crash_time = crash[fields[\"time\"]]\n \n if fields[\"time_format\"]:\n crash_date_time = parse_date(\n crash_date,\n crash_time,\n fields[\"time_format\"]\n )\n \n else:\n crash_date_time = parse_date(\n crash_date,\n crash_time\n )\n \n # Skip crashes where date can't be parsed\n if not crash_date_time:\n continue\n\n formatted_crash = OrderedDict([\n (\"id\", crash[fields[\"id\"]]),\n (\"dateOccurred\", crash_date_time),\n (\"location\", OrderedDict([\n (\"latitude\", float(crash[fields[\"latitude\"]])),\n (\"longitude\", float(crash[fields[\"longitude\"]]))\n ]))\n ])\n formatted_crash = add_city_specific_fields(crash, formatted_crash,\n opt_fields)\n crashes[formatted_crash[\"id\"]] = formatted_crash\n return crashes\n\n\ndef add_city_specific_fields(crash, formatted_crash, fields):\n\n # Add summary and address\n if \"summary\" in list(fields.keys()) and fields[\"summary\"]:\n formatted_crash[\"summary\"] = crash[fields[\"summary\"]]\n if \"address\" in list(fields.keys()) and fields[\"address\"]:\n formatted_crash[\"address\"] = crash[fields[\"address\"]]\n\n # setup a vehicles list for each crash\n formatted_crash[\"vehicles\"] = []\n\n # check for car involvement\n if \"vehicles\" in list(fields.keys()) and fields[\"vehicles\"] == \"mode_type\":\n # this needs work, but for now any of these mode types\n # translates to a car being involved, quantity unknown\n if crash[fields[\"vehicles\"]] == \"mv\" or crash[fields[\"vehicles\"]] == \"ped\" or crash[fields[\"vehicles\"]] == \"\":\n formatted_crash[\"vehicles\"].append({\"category\": \"car\"})\n\n elif \"vehicles\" in list(fields.keys()) and fields[\"vehicles\"] == \"TOTAL_VEHICLES\":\n if crash[fields[\"vehicles\"]] != 0 and crash[fields[\"vehicles\"]] != \"\":\n formatted_crash[\"vehicles\"].append({\n \"category\": \"car\",\n \"quantity\": int(crash[fields[\"vehicles\"]])\n })\n\n # check for bike involvement\n if \"bikes\" in list(fields.keys()) and fields[\"bikes\"] == \"mode_type\":\n # assume bike and car involved, quantities unknown\n if crash[fields[\"bikes\"]] == \"bike\":\n formatted_crash[\"vehicles\"].append({\"category\": \"car\"})\n formatted_crash[\"vehicles\"].append({\"category\": \"bike\"})\n\n elif \"bikes\" in list(fields.keys()) and fields[\"bikes\"] == \"TOTAL_BICYCLES\":\n if crash[fields[\"bikes\"]] != 0 and crash[fields[\"bikes\"]] != \"\":\n formatted_crash['vehicles'].append({\n \"category\": \"bike\",\n \"quantity\": int(crash[fields[\"bikes\"]])\n })\n return formatted_crash\n\n\ndef add_id(csv_file, id_field):\n \"\"\"\n If the csv_file does not contain an id, create one\n \"\"\"\n\n rows = []\n with open(csv_file) as f:\n csv_reader = csv.DictReader(f)\n count = 1\n for row in csv_reader:\n if id_field in row:\n break\n row.update({id_field: count})\n rows.append(row)\n count += 1\n if rows:\n with open(csv_file, 'w') as f:\n writer = csv.DictWriter(f, list(rows[0].keys()))\n writer.writeheader()\n for row in rows:\n writer.writerow(row)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--destination\", type=str,\n help=\"destination name, e.g. boston\")\n parser.add_argument(\"-f\", \"--folder\", type=str,\n help=\"path to destination's data folder\")\n\n args = parser.parse_args()\n\n raw_path = os.path.join(args.folder, \"raw/crashes\")\n if not os.path.exists(raw_path):\n print(raw_path+\" not found, exiting\")\n exit(1)\n\n # load config for this city\n config_file = os.path.join(BASE_FP, 'src/config',\n \"config_\"+args.destination+\".yml\")\n with open(config_file) as f:\n config = yaml.safe_load(f)\n\n dict_city_crashes = {}\n print(\"searching \"+raw_path+\" for raw files:\\n\")\n\n for csv_file in list(config['crashes_files'].keys()):\n\n if not os.path.exists(os.path.join(raw_path, csv_file)):\n raise SystemExit(csv_file + \" not found, exiting\")\n # find the config for this crash file\n crash_config = config['crashes_files'][csv_file]\n if crash_config is None:\n print(\"- could not find config for crash file \"+csv_file+\", skipping\")\n continue\n\n add_id(\n os.path.join(raw_path, csv_file), crash_config['required']['id'])\n\n print(\"processing \"+csv_file)\n\n df_crashes = pd.read_csv(os.path.join(raw_path, csv_file), na_filter=False)\n raw_crashes = df_crashes.to_dict(\"records\")\n\n std_crashes = read_standardized_fields(raw_crashes,\n crash_config['required'], crash_config['optional'])\n print(\"- {} crashes loaded with standardized fields, checking for specific fields\\n\".format(len(std_crashes)))\n dict_city_crashes.update(std_crashes)\n\n print(\"all crash files processed\")\n print(\"- {} {} crashes loaded, validating against schema\".format(len(dict_city_crashes), args.destination))\n\n schema_path = os.path.join(BASE_FP, \"standards\", \"crashes-schema.json\")\n list_city_crashes = list(dict_city_crashes.values())\n crashes_output = os.path.join(args.folder, \"standardized/crashes.json\")\n validate_and_write_schema(schema_path, list_city_crashes, crashes_output)\n","sub_path":"src/data_standardization/standardize_crashes.py","file_name":"standardize_crashes.py","file_ext":"py","file_size_in_byte":7584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"486577911","text":"# -- coding: utf-8 --'\r\n\r\nimport pandas as pd\r\nimport os\r\nimport textwrap\r\nimport string\r\nimport unicodedata\r\nimport sys\r\nimport sqlite3\r\nimport easygui\r\nimport re\r\nimport copy\r\nimport json\r\nimport xlsxwriter\r\n\r\n# import pyanx\r\n\r\nMAX_TAM_LABEL = 100 # nro máximo de caracteres nos labels\r\n\r\n\r\nclass estrutura: # especificações das planilhas\r\n def __init__(self, nome=\"\", estr=[], pasta=\"./\"):\r\n self.nome = nome\r\n self.estr = estr\r\n self.pasta = pasta\r\n\r\n def mudar_pasta(self, pasta):\r\n self.pasta = pasta\r\n\r\n def xlsx(self):\r\n return self.nome + \".xlsx\"\r\n\r\n def estr_upper(self):\r\n result = []\r\n for elem in self.estr:\r\n result.append(elem.upper())\r\n return result\r\n\r\n def nomearq(self):\r\n return os.path.join(self.pasta, self.xlsx())\r\n\r\n def arquivo_existe(self):\r\n if (\r\n self.nome.upper() == \"grupos\".upper()\r\n or self.nome.upper() == \"vinculos\".upper()\r\n ): # um novo é criado vazio, uma vez que não vem do COAF\r\n return True\r\n else:\r\n return os.path.isfile(self.nomearq())\r\n\r\n def estr_compativel(self, outra_estr=[]):\r\n return all(elem.upper() in self.estr_upper() for elem in outra_estr)\r\n\r\n def exibir(self):\r\n strestr = \",\".join(self.estr)\r\n return self.nome + \": \" + strestr\r\n\r\n\r\ndef help_estruturas(estruturas):\r\n print(\"Estruturas esperadas das planilhas:\")\r\n for e in estruturas:\r\n print(\" \" + e.exibir())\r\n\r\n\r\nclass log:\r\n def __init__(self):\r\n self.logs = u\"\"\r\n\r\n def gravalog(self, linha):\r\n print(linha)\r\n self.logs += linha + \"\\n\"\r\n\r\n def lelog(self):\r\n return self.logs\r\n\r\n\r\nclass nodo:\r\n def __init__(self, id, label, tipo=\"ENT\", tooltip=\"\", fonte=\"RIF\"):\r\n self.id = id\r\n self.tipo = tipo\r\n self.label = label\r\n self.cor = \"Silver\"\r\n self.sexo = 0\r\n self.m1 = 0\r\n self.m2 = 0\r\n self.situacao = \"\"\r\n self.dataOperacao = \"\"\r\n self.texto_tooltip = tooltip\r\n self.fonte = fonte\r\n self.camada = 0 if self.fonte == \"RIF\" else 1\r\n\r\n def todict(self):\r\n return {\r\n \"id\": self.id,\r\n \"tipo\": self.tipo,\r\n \"sexo\": self.sexo,\r\n \"label\": self.label,\r\n \"camada\": self.camada,\r\n \"situacao\": self.situacao,\r\n \"cor\": self.cor,\r\n \"texto_tooltip\": self.texto_tooltip,\r\n \"m1\": self.m1,\r\n \"m2\": self.m2,\r\n \"m3\": 0,\r\n \"m4\": 0,\r\n \"m5\": 0,\r\n \"m6\": 0,\r\n \"m7\": 0,\r\n \"m8\": 0,\r\n \"m9\": 0,\r\n \"m10\": 0,\r\n \"m11\": 0,\r\n \"dataoperacao\": self.dataOperacao,\r\n }\r\n\r\n\r\nclass noPF(nodo):\r\n def __init__(self, id, label=\"\", cor=\"Silver\", sexo=0, fonte=\"RIF\"):\r\n nodo.__init__(self, id, label, \"PF\")\r\n self.sexo = sexo\r\n\r\n def todict(self):\r\n return nodo.todict(self)\r\n\r\n\r\nclass noPJ(nodo):\r\n def __init__(self, id, label=\"\", cor=\"Silver\", fonte=\"RIF\"):\r\n nodo.__init__(self, id, label, \"PJ\")\r\n self.cor = cor\r\n self.sexo = 1\r\n\r\n\r\nclass noConta(nodo):\r\n def __init__(self, id, label=\"CONTA\", cor=\"Green\"):\r\n nodo.__init__(self, id, label, \"CCR\")\r\n self.cor = cor\r\n\r\n\r\nclass noGrupo(nodo):\r\n def __init__(self, id, label=\"GRUPO\", cor=\"Blue\"):\r\n nodo.__init__(self, id, label, \"GR\")\r\n self.cor = cor\r\n self.fonte = \"grupos\"\r\n\r\n\r\nclass noComunicacao(nodo):\r\n def __init__(self, id, label=\"COMUNICACAO\", cor=\"Red\", dataOperacao=None):\r\n nodo.__init__(self, id, label, \"COM\")\r\n self.cor = cor\r\n\r\n # self.dataOperacao=dataOperacao\r\n\r\n\r\nclass aresta:\r\n def __init__(self, origem, destino, descricao=\"\", cor=\"Silver\", fonte=\"RIF\"):\r\n self.origem = origem\r\n self.destino = destino\r\n self.descricao = descricao\r\n self.cor = cor\r\n self.fonte = fonte\r\n self.camada = 0 if self.fonte == \"RIF\" else 1\r\n\r\n def todict(self):\r\n return {\r\n \"origem\": self.origem,\r\n \"destino\": self.destino,\r\n \"cor\": self.cor,\r\n \"camada\": self.camada,\r\n \"tipoDescricao\": {\"0\": self.descricao},\r\n }\r\n\r\n\r\nlg = log()\r\n\r\ncom = estrutura(\r\n \"Comunicacoes\",\r\n [\r\n \"Indexador\",\r\n \"Data_do_Recebimento\",\r\n \"Data_da_operacao\",\r\n \"DataFimFato\",\r\n \"cpfCnpjComunicante\",\r\n \"nomeComunicante\",\r\n \"CidadeAgencia\",\r\n \"UFAgencia\",\r\n \"NomeAgencia\",\r\n \"NumeroAgencia\",\r\n \"informacoesAdicionais\",\r\n \"CampoA\",\r\n \"CampoB\",\r\n \"CampoC\",\r\n \"CampoD\",\r\n \"CampoE\",\r\n ],\r\n)\r\nenv = estrutura(\r\n \"Envolvidos\",\r\n [\r\n \"Indexador\",\r\n \"cpfCnpjEnvolvido\",\r\n \"nomeEnvolvido\",\r\n \"tipoEnvolvido\",\r\n \"agenciaEnvolvido\",\r\n \"contaEnvolvido\",\r\n \"DataAberturaConta\",\r\n \"DataAtualizacaoConta\",\r\n \"bitPepCitado\",\r\n \"bitPessoaObrigadaCitado\",\r\n \"intServidorCitado\",\r\n ],\r\n)\r\noco = estrutura(\"Ocorrencias\", [\"Indexador\", \"Ocorrencia\"])\r\n# opcionais\r\ngru = estrutura(\"Grupos\", [\"cpfCnpjEnvolvido\", \"nome_Envolvido\", \"Grupo\", \"Detalhe\"])\r\nvin = estrutura(\r\n \"Vinculos\",\r\n [\r\n \"cpfCnpjEnvolvido\",\r\n \"nome_Envolvido\",\r\n \"cpfCnpjVinculado\",\r\n \"nome_Vinculado\",\r\n \"Descricao\",\r\n ],\r\n)\r\n\r\n\r\nestruturas = [com, env, oco, gru, vin]\r\n# help_estruturas(estruturas)\r\n\r\n\r\ndef removeAcentos(data):\r\n if data is None:\r\n return u\"\"\r\n # if isinstance(data,str):\r\n # data = unicode(data,'latin-1','ignore')\r\n return \"\".join(\r\n x for x in unicodedata.normalize(\"NFKD\", data) if x in string.printable\r\n )\r\n\r\n\r\ndef gerar_planilha(arquivo, df, nome, indice=False):\r\n def formatar_cabecalho(cor):\r\n return arquivo.book.add_format(\r\n {\r\n \"bold\": True,\r\n \"text_wrap\": True,\r\n \"valign\": \"top\",\r\n \"fg_color\": cor,\r\n \"border\": 1,\r\n }\r\n )\r\n\r\n # Palette URL: http://paletton.com/#uid=43K0I0kw0w0jyC+oRxVy4oIDfjr\r\n PALETA = [\r\n \"#5778C0\",\r\n \"#a4b3b6\",\r\n \"#FF8D63\",\r\n \"#FFE700\",\r\n \"#FFA900\",\r\n \"#000000\",\r\n ] # azul, cinza, verm, amarelo, lara, preto\r\n COR_PRINCIPAL = PALETA[0]\r\n COR_NEUTRA_CLARA = PALETA[1]\r\n COR_SECUNDARIA = PALETA[2]\r\n COR_TERCIARIA = PALETA[4]\r\n COR_NEUTRA_ESCURA = PALETA[5]\r\n\r\n df.style.bar(color=COR_PRINCIPAL)\r\n print(\"antes \" + nome)\r\n df.to_excel(arquivo, sheet_name=nome, index=indice)\r\n print(\"depois \" + nome)\r\n # Write the column headers with the defined format.\r\n # print(df.index.names)\r\n if len(arquivo.sheets) > 6:\r\n cor_basica = COR_SECUNDARIA\r\n elif len(arquivo.sheets) < 3:\r\n cor_basica = COR_PRINCIPAL\r\n else:\r\n cor_basica = COR_NEUTRA_CLARA\r\n\r\n if not indice:\r\n for col_num, value in enumerate(df.columns.values):\r\n arquivo.sheets[nome].write(\r\n 0, col_num, value, formatar_cabecalho(cor_basica)\r\n )\r\n arquivo.sheets[nome].set_tab_color(cor_basica)\r\n else:\r\n for col_num, value in enumerate(df.index.names):\r\n arquivo.sheets[nome].write(\r\n 0, col_num, value, formatar_cabecalho(cor_basica)\r\n )\r\n for col_num, value in enumerate(df.columns.values):\r\n arquivo.sheets[nome].write(\r\n 0,\r\n col_num + len(df.index.names),\r\n value,\r\n formatar_cabecalho(COR_NEUTRA_CLARA),\r\n )\r\n arquivo.sheets[nome].set_tab_color(cor_basica)\r\n\r\n\r\ndef gerar_planilhaXLS(arquivo, df, nome, indice=False):\r\n df.style.bar(color=\"#99ccff\")\r\n df.to_excel(arquivo, sheet_name=nome, index=indice)\r\n\r\n\r\ndef tipoi2F(umou2=1, linha=None, carJuncao=\"\\r \"):\r\n print(\"linha= \", linha)\r\n descricao = linha[1 if umou2 == 1 else 3]\r\n # if descricao == '': #telefone ou endereco\r\n # descricao = carJuncao.join(node[4:].split('__'))\r\n # else:\r\n # if self.GNX.node[node]['tipo'] !='TEL':\r\n # descricao = Obj.parseCPFouCNPJ(node) + carJuncao + carJuncao.join(textwrap.wrap(descricao,30))\r\n\r\n # dicTipo = {'TEL':u'Telefone', 'END':u'Local', 'PF':u'PF', 'PJ':u'PJ', 'PE':u'Edifício', 'ES':u'Edifício', 'CC':u'Conta','INF':u'Armário' }\r\n\r\n tipo = linha[7 if umou2 == 1 else 8]\r\n # tipoi2 = dicTipo[tipo]\r\n tipoi2 = u\"Escritório\"\r\n if tipo in (\"TEL\", \"END\", \"CC\"):\r\n descricao = \"\"\r\n else:\r\n descricao = carJuncao.join(textwrap.wrap(descricao, 30))\r\n sexo = 1\r\n\r\n if tipo == \"PF\":\r\n # if self.GNX.node[node]['sexo']==1:\r\n if not sexo or sexo == 1:\r\n tipoi2 = u\"Profissional (masculino)\"\r\n elif sexo == 2:\r\n tipoi2 = u\"Profissional (feminino)\"\r\n elif tipo == \"PJ\":\r\n # if node[8:12]!='0001':\r\n # if sexo != 1: #1=matriz\r\n if sexo % 2 == 0: # 1=matriz\r\n tipoi2 = u\"Apartamento\" # filial de empresa\r\n else:\r\n tipoi2 = u\"Escritório\"\r\n elif tipo == \"PE\":\r\n tipoi2 = u\"Oficina\"\r\n\r\n corSituacao = linha[9 if umou2 == 1 else 10]\r\n if linha[4 if umou2 == 1 else 5] == 0:\r\n corSituacao = \"Vermelho\"\r\n return (tipoi2, descricao, corSituacao)\r\n\r\n\r\ndef to_i2(df, arquivo=None):\r\n dicTiposIngles = {\r\n u\"Profissional (masculino)\": u\"Person\",\r\n u\"Profissional (feminino)\": u\"Woman\",\r\n u\"Escritório\": u\"Office\",\r\n u\"Apartamento\": u\"Workshop\",\r\n u\"Governo\": u\"House\",\r\n u\"Casa\": u\"House\",\r\n u\"Loja\": u\"Office\",\r\n u\"Oficina\": u\"Office\",\r\n u\"Telefone\": u\"Phone\",\r\n u\"Local\": u\"Place\",\r\n u\"Conta\": u\"Account\",\r\n u\"Armário\": u\"Cabinet\",\r\n u\"Edifício\": u\"Office\",\r\n }\r\n # chart = Pyanx_macros()\r\n noi2origem = {}\r\n noi2destino = {}\r\n\r\n for idc, campos in df.iterrows():\r\n # print('campos= ',campos)\r\n\r\n tipo, descricao, corSituacao = tipoi2F(linha=campos, umou2=1, carJuncao=\" \")\r\n noi2origem[idc] = chart.add_node(\r\n entity_type=dicTiposIngles.get(tipo, \"\"),\r\n label=(campos[\"cpfcnpj1\"]) + u\"-\" + (descricao),\r\n )\r\n tipo, descricao, corSituacao = tipoi2F(linha=campos, umou2=2, carJuncao=\" \")\r\n noi2destino[idc] = chart.add_node(\r\n entity_type=dicTiposIngles.get(tipo, \"\"),\r\n label=(campos[\"cpfcnpj1\"]) + u\"-\" + (descricao),\r\n )\r\n\r\n nomeLigacao = campos[\"descrição\"]\r\n chart.add_edge(noi2origem[idc], noi2destino[idc], removeAcentos(nomeLigacao))\r\n # idc += 1\r\n\r\n fstream = chart.createStream(\r\n layout=\"spring_layout\", iterations=0\r\n ) # não calcula posição\r\n\r\n retorno = fstream.getvalue()\r\n fstream.close()\r\n if arquivo is not None:\r\n f = open(arquivo, \"w\")\r\n f.write(retorno)\r\n f.close()\r\n return retorno\r\n\r\n\r\ndef soDigitos(texto):\r\n return re.sub(\"[^0-9]\", \"\", texto)\r\n\r\n\r\ndef estimarFluxoDoDinheiro(tInformacoesAdicionais):\r\n # normalmente aparece algo como R$ 20,8 Mil enviada para Jardim Indústria e Comércio - CNPJ 606769xxx\r\n # inicialmente quebramos o texto por R$ e verifica quais são seguidos por CPF ou CNPJ\r\n # pega o texto da coluna InformacoesAdicionais do arquivo Comunicacoes.csv e tenta estimar o valor para cada cpf/cnpj\r\n # normalmente aparece algo como R$ 20,8 Mil enviada para Indústria e Comércio - CNPJ 6067xxxxxx\r\n # inicialmente quebramos o texto por R$ e verifica quais são seguidos por CPF ou CNPJ\r\n # retorna dicionário\r\n # como {'26106949xx': 'R$420 MIL RECEBIDOS, R$131 MIL POR', '68360088xxx': 'R$22 MIL, RECEBIDAS'}\r\n # lista = re.sub(' +', ' ',tInformacoesAdicionais).upper().split('R$')\r\n t = re.sub(\" +\", \" \", tInformacoesAdicionais).upper()\r\n lista = t.split(\"R$\")\r\n listaComTermoCPFCNPJ = []\r\n for item in lista:\r\n if \"CPF\" in item or \"CNPJ\" in item:\r\n listaComTermoCPFCNPJ.append(item.strip())\r\n\r\n listaValores = []\r\n valoresDict = {}\r\n for item in listaComTermoCPFCNPJ:\r\n valorPara = \"\"\r\n cpn = \"\"\r\n le = item.split(\" \")\r\n valor = \"R$\" + le[0] # + ' ' + le[1] # + ' ' + le[2]\r\n if le[1].upper().rstrip(\",\").rstrip(\"S\").rstrip(\",\") in (\r\n \"MIL\",\r\n \"MI\",\r\n \"RECEBIDO\",\r\n \"RECEBIDA\",\r\n \"ENVIADA\",\r\n \"RETIRADO\",\r\n \"DEPOSITADO\",\r\n \"CHEQUE\",\r\n ):\r\n valor += \" \" + le[1]\r\n if le[2].upper().rstrip(\",\").rstrip(\"S\") in (\r\n \"MIL\",\r\n \"MI\",\r\n \"RECEBIDO\",\r\n \"RECEBIDA\",\r\n \"ENVIADA\",\r\n \"RETIRADO\",\r\n \"DEPOSITADO\",\r\n \"CHEQUE\",\r\n ):\r\n valor += \" \" + le[2]\r\n if \"CPF\" in item:\r\n aux1 = item.split(\"CPF \")\r\n try:\r\n aux2 = aux1[1].split(\" \")\r\n cpn = soDigitos(aux2[0])\r\n except:\r\n pass\r\n elif \"CNPJ\" in item:\r\n aux1 = item.split(\"CNPJ \")\r\n try:\r\n aux2 = aux1[1].split(\" \")\r\n cpn = soDigitos(aux2[0])\r\n except:\r\n pass\r\n if cpn:\r\n listaValores.append(valorPara)\r\n if cpn in valoresDict:\r\n v = valoresDict[cpn]\r\n v.add(valor)\r\n valoresDict[cpn] = v\r\n else:\r\n valoresDict[cpn] = set([valor])\r\n d = {}\r\n for k, v in valoresDict.items():\r\n d[k] = \", \".join(v)\r\n return d\r\n\r\n\r\n# .def estimaFluxoDoDinheiro(t):\r\n\r\n\r\ndef consolidar_pd(pasta):\r\n \"\"\"Processa as planilhas comunicacoes, envolvidos, ocorrencias e grupo em planilhas com agrupamento \"\"\"\r\n arq = com.nomearq() # Comunicacoes\r\n try:\r\n df_com = pd.read_excel(\r\n arq, options={\"strings_to_numbers\": False}, converters={\"Indexador\": str}\r\n )\r\n df_com[\"Indexador\"] = pd.to_numeric(df_com[\"Indexador\"], errors=\"coerce\")\r\n df_com[\"Data_da_operacao\"] = pd.to_datetime(df_com[\"Data_da_operacao\"])\r\n if not com.estr_compativel(df_com.columns):\r\n print(com.estr_upper())\r\n mostra_erro(\"O arquivo \" + arq + \" contém colunas incompatíveis: \")\r\n raise (\"Estrutura incompatível\")\r\n lg.gravalog(\"Arquivo \" + arq + \" lido.\")\r\n except Exception as exc:\r\n print(\"Erro ao ler o arquivo \" + arq + \"\\n\" + str(type(exc)))\r\n\r\n arq = env.nomearq() # Envolvidos\r\n try:\r\n df_env = pd.read_excel(\r\n arq, options={\"strings_to_numbers\": False}, converters={\"Indexador\": str}\r\n )\r\n df_env[\"Indexador\"] = pd.to_numeric(df_env[\"Indexador\"], errors=\"coerce\")\r\n df_env = df_env[pd.notnull(df_env[\"Indexador\"])]\r\n if not env.estr_compativel(df_env.columns):\r\n print(env.estr_upper())\r\n mostra_erro(\"O arquivo \" + arq + \" contém colunas incompatíveis: \")\r\n raise (\"Estrutura incompatível\")\r\n lg.gravalog(\"Arquivo \" + arq + \" lido.\")\r\n except Exception as exc:\r\n lg.gravalog(\"Erro ao ler o arquivo \" + arq + \"\\n\" + str(type(exc)))\r\n\r\n arq = oco.nomearq() # Ocorrencias\r\n try:\r\n df_oco = pd.read_excel(arq, options={\"strings_to_numbers\": False})\r\n df_oco[\"Indexador\"] = pd.to_numeric(df_oco[\"Indexador\"], errors=\"coerce\")\r\n df_oco = df_oco[pd.notnull(df_oco[\"Indexador\"])]\r\n dictOco = {}\r\n dictOco2 = {}\r\n for r in df_oco.itertuples(index=False):\r\n if r.Indexador in dictOco:\r\n s = dictOco[r.Indexador]\r\n s += \"; \" + r.Ocorrencia\r\n dictOco[r.Indexador] = s\r\n else:\r\n dictOco[r.Indexador] = r.Ocorrencia\r\n dictOco2[\"Indexador\"] = []\r\n dictOco2[\"Ocorrencia\"] = []\r\n for k, v in dictOco.items():\r\n dictOco2[\"Indexador\"].append(k)\r\n dictOco2[\"Ocorrencia\"].append(v)\r\n\r\n df_oco2 = pd.DataFrame.from_dict(dictOco2)\r\n\r\n if not oco.estr_compativel(df_oco.columns):\r\n print(oco.estr_upper())\r\n mostra_erro(\"O arquivo \" + arq + \" contém colunas incompatíveis: \")\r\n raise (\"Estrutura incompatível\")\r\n lg.gravalog(\"Arquivo \" + arq + \" lido.\")\r\n except Exception as exc:\r\n lg.gravalog(\"Erro ao ler o arquivo \" + arq + \"\\n\" + str(type(exc)))\r\n\r\n arq = gru.nomearq() # Grupos/detalhes\r\n if not os.path.isfile(arq): # criar arquivo vazio\r\n consolidado = pd.ExcelWriter(\r\n arq,\r\n engine=\"xlsxwriter\",\r\n options={\"strings_to_numbers\": False},\r\n datetime_format=\"dd/mm/yyyy\",\r\n date_format=\"dd/mm/yyyy\",\r\n )\r\n gerar_planilha(\r\n consolidado, pd.DataFrame(columns=gru.estr), gru.nome, indice=False\r\n )\r\n consolidado.save()\r\n lg.gravalog(\r\n \"O arquivo \"\r\n + arq\r\n + \" não foi encontrado. Um novo foi criado com as colunas \"\r\n + gru.exibir()\r\n )\r\n try:\r\n df_gru = pd.read_excel(arq, options={\"strings_to_numbers\": False})\r\n df_gru = df_gru.fillna(\"-\")\r\n if not gru.estr_compativel(df_gru.columns):\r\n print(gru.estr_upper())\r\n mostra_erro(\"O arquivo \" + arq + \" contém colunas incompatíveis: \")\r\n raise (\"Estrutura incompatível\")\r\n lg.gravalog(\"Arquivo \" + arq + \" lido.\")\r\n except Exception as exc:\r\n lg.gravalog(\"Erro ao ler o arquivo \" + arq + \"\\n\" + str(type(exc)))\r\n\r\n arq = vin.nomearq() # Vinculos\r\n if not os.path.isfile(arq): # criar arquivo vazio\r\n consolidado = pd.ExcelWriter(\r\n arq,\r\n engine=\"xlsxwriter\",\r\n options={\"strings_to_numbers\": False},\r\n datetime_format=\"dd/mm/yyyy\",\r\n date_format=\"dd/mm/yyyy\",\r\n )\r\n gerar_planilha(\r\n consolidado, pd.DataFrame(columns=vin.estr), vin.nome, indice=False\r\n )\r\n consolidado.save()\r\n lg.gravalog(\r\n \"O arquivo \"\r\n + arq\r\n + \" não foi encontrado. Um novo foi criado com as colunas \"\r\n + vin.exibir()\r\n )\r\n try:\r\n df_vin = pd.read_excel(arq, options={\"strings_to_numbers\": False})\r\n if not vin.estr_compativel(df_vin.columns):\r\n print(vin.estr_upper())\r\n mostra_erro(\"O arquivo \" + arq + \" contém colunas incompatíveis: \")\r\n raise (\"Estrutura incompatível\")\r\n lg.gravalog(\"Arquivo \" + arq + \" lido.\")\r\n except Exception as exc:\r\n lg.gravalog(\"Erro ao ler o arquivo \" + arq + \"\\n\" + str(type(exc)))\r\n\r\n nenhumgrupo = len(df_gru[\"Grupo\"].unique())==0\r\n if nenhumgrupo:\r\n grupos_selecionados = None\r\n else:\r\n grupos_selecionados = gui_grupos(df_gru[\"Grupo\"].unique()) # selecao\r\n if grupos_selecionados == None : \r\n grupos_selecionados = df_gru[\"Grupo\"].unique() # nenhum = todos\r\n\r\n print(\"Consolidando\")\r\n arq = os.path.join(pasta, \"RIF_consolidados.xlsx\")\r\n porGrupo = len(df_gru[\"Grupo\"].unique()) > 1\r\n try:\r\n print(\"antes merge\")\r\n df_consolida = pd.merge(df_com, df_env, how=\"left\", on=\"Indexador\")\r\n df_consolida = pd.merge(df_consolida, df_oco2, how=\"left\", on=\"Indexador\")\r\n\r\n df_consolida = pd.merge(df_consolida, df_gru, how=\"left\", on=\"cpfCnpjEnvolvido\")\r\n print(\"depois merge\")\r\n df_consolida.Detalhe.fillna(\r\n \"-?-\", inplace=True\r\n ) # CPFCNPJ que não constam do grupo\r\n\r\n indexadores_selecionados = df_consolida[\"Indexador\"].values\r\n\r\n if porGrupo:\r\n indexadores_grupo = df_consolida[\"Indexador\"].loc[\r\n df_consolida[\"Grupo\"].isin(grupos_selecionados)\r\n ]\r\n indexadores_selecionados = indexadores_grupo.values\r\n df_congrupo = df_consolida.loc[\r\n df_consolida[\"Indexador\"].isin(indexadores_grupo.values)\r\n ]\r\n df_consolida = df_congrupo\r\n\r\n consolidado = pd.ExcelWriter(\r\n arq,\r\n engine=\"xlsxwriter\",\r\n options={\"strings_to_numbers\": False},\r\n datetime_format=\"dd/mm/yyyy\",\r\n date_format=\"dd/mm/yyyy\",\r\n )\r\n if porGrupo and not nenhumgrupo: # tem agrupamentos\r\n table = pd.pivot_table(\r\n df_consolida,\r\n index=[\r\n \"Grupo\",\r\n \"Indexador\",\r\n \"Data_da_operacao\",\r\n \"cpfCnpjEnvolvido\",\r\n \"nomeEnvolvido\",\r\n \"informacoesAdicionais\",\r\n \"Detalhe\",\r\n \"Ocorrencia\",\r\n ],\r\n columns=[\"tipoEnvolvido\"],\r\n margins=False,\r\n )\r\n else:\r\n table = pd.pivot_table(\r\n df_consolida,\r\n index=[\r\n \"Indexador\",\r\n \"Data_da_operacao\",\r\n \"cpfCnpjEnvolvido\",\r\n \"nomeEnvolvido\",\r\n \"informacoesAdicionais\",\r\n \"Detalhe\",\r\n \"Ocorrencia\",\r\n ],\r\n columns=[\"tipoEnvolvido\"],\r\n margins=False,\r\n )\r\n df_pivot = table.stack()\r\n except Exception as exc:\r\n lg.gravalog(\r\n \"Erro ao consolidar planilhas no arquivo \" + arq + \"\\n\" + str(type(exc))\r\n )\r\n print(\"depois grupo\")\r\n\r\n dicAdic = {\"Indexador\": [], \"cpfCnpjEnvolvido\": [], \"valor\": []}\r\n valoresPorIndexador = {}\r\n for row in df_com.itertuples(index=False):\r\n if row.informacoesAdicionais:\r\n valoresPorIndexador[row.Indexador] = estimarFluxoDoDinheiro(\r\n str(row.informacoesAdicionais)\r\n )\r\n for k, v in valoresPorIndexador.items():\r\n if v != {}:\r\n for kk, vv in v.items():\r\n dicAdic[\"Indexador\"].append(k)\r\n dicAdic[\"cpfCnpjEnvolvido\"].append(kk)\r\n dicAdic[\"valor\"].append(vv)\r\n df_Adic = pd.DataFrame.from_dict(dicAdic)\r\n\r\n try:\r\n gerar_planilha(consolidado, df_pivot, \"INDEXADOR\", indice=True)\r\n\r\n if porGrupo: # tem agrupamentos\r\n table = pd.pivot_table(\r\n df_consolida,\r\n index=[\r\n \"Grupo\",\r\n \"cpfCnpjEnvolvido\",\r\n \"nomeEnvolvido\",\r\n \"Data_da_operacao\",\r\n \"Detalhe\",\r\n \"informacoesAdicionais\",\r\n \"Indexador\",\r\n \"Ocorrencia\",\r\n ],\r\n columns=[\"tipoEnvolvido\"],\r\n margins=False,\r\n )\r\n else:\r\n table = pd.pivot_table(\r\n df_consolida,\r\n index=[\r\n \"cpfCnpjEnvolvido\",\r\n \"nomeEnvolvido\",\r\n \"Data_da_operacao\",\r\n \"Detalhe\",\r\n \"informacoesAdicionais\",\r\n \"Indexador\",\r\n \"Ocorrencia\",\r\n ],\r\n columns=[\"tipoEnvolvido\"],\r\n margins=False,\r\n )\r\n df_pivot = table.stack()\r\n\r\n gerar_planilha(consolidado, df_pivot, \"CPFCNPJ\", indice=True)\r\n print(\"df_pivot\")\r\n\r\n gerar_planilha(consolidado, df_consolida, \"ComunicXEnvolvidos\")\r\n print(\"df_consolida\")\r\n gerar_planilha(consolidado, df_com, \"Comunicacoes\")\r\n print(\"df_com\")\r\n gerar_planilha(consolidado, df_env, \"Envolvidos\")\r\n print(\"df_env\")\r\n gerar_planilha(consolidado, df_oco2, \"Ocorrencias\")\r\n print(\"df_oco2\")\r\n gerar_planilha(consolidado, df_gru, \"Grupos\")\r\n print(\"df_gru\")\r\n gerar_planilha(consolidado, df_vin, \"Vinculos\")\r\n print(\"df_vin\")\r\n gerar_planilha(consolidado, df_Adic, \"InfoAdicionais\")\r\n print(\"df_Adic\")\r\n\r\n df_consolida.to_csv(os.path.join(pasta, 'consolidado.csv'))\r\n\r\n except Exception as exc:\r\n lg.gravalog(\r\n \"Erro ao gerar planilhas para o arquivo \" + arq + \"\\n\" + str(type(exc))\r\n )\r\n\r\n try:\r\n consolidado.save()\r\n except Exception as exc:\r\n lg.gravalog(\"Erro ao gravar o arquivo \" + arq + \"\\n\" + str(type(exc)))\r\n lg.gravalog(\"Planilhas consolidadas: \" + arq)\r\n return df_gru, df_env, df_com, df_oco2, df_vin, indexadores_selecionados\r\n\r\n\r\ndef exportar_rede_rel(pasta, dfgru, dfenv):\r\n # criando as tabelas no SQLITE\r\n try:\r\n conx = sqlite3.connect(\r\n \":memory:\"\r\n ) # ou use :memory: para botá-lo na memória RAM\r\n curs = conx.cursor()\r\n except Exception as exc:\r\n lg.gravalog(\"Erro criar conexão com SQLITE memory\\n\" + str(type(exc)))\r\n\r\n try:\r\n dfgru.to_sql(\"Pedido\", conx, index=False, if_exists=\"replace\")\r\n except Exception as exc:\r\n lg.gravalog(\"Erro carregar grupos no SQLITE memory\\n\" + str(type(exc)))\r\n try:\r\n dfenv.to_sql(\"Envolvidos\", conx, index=False, if_exists=\"replace\")\r\n except Exception as exc:\r\n lg.gravalog(\"Erro carregar envolvidos no SQLITE memory\\n\" + str(type(exc)))\r\n\r\n sql = \"select \"\r\n sql += (\r\n \" REPLACE(REPLACE(REPLACE(cpfCnpjEnvolvido,'.',''),'-',''),'/','') as cpfcnpj,\"\r\n )\r\n sql += ' case length(REPLACE(REPLACE(REPLACE(cpfCnpjEnvolvido,\".\",\"\"),\"-\",\"\"),\"/\",\"\")) when 14 then \"PJ\" when 11 then \"PF\" else \"-\" end as tipo,'\r\n sql += ' upper(Grupo) as nome, 0 as camada, \"\" as [componente/grupo],'\r\n sql += \" 0 as situacao, 0 as [sexo(PF)/Matriz-Filial(PJ)], 0 as [servidor(PF)/nat.jur(PJ)],\"\r\n sql += \" 0 as [salario<2min], 0 as OB, 0 as pad, 0 as [PF candidato], 0 as [CEIS/CEPIM], 0 as doadorTSE, 0 as CadUnico, 0 as Falecido\"\r\n sql += \" from Pedido \"\r\n sql += \" union select distinct\"\r\n sql += (\r\n \" REPLACE(REPLACE(REPLACE(cpfCnpjEnvolvido,'.',''),'-',''),'/','') as cpfcnpj,\"\r\n )\r\n sql += ' case length(REPLACE(REPLACE(REPLACE(cpfCnpjEnvolvido,\".\",\"\"),\"-\",\"\"),\"/\",\"\")) when 14 then \"PJ\" when 11 then \"PF\" else \"-\" end as tipo,'\r\n sql += ' upper(nomeEnvolvido) as nome, 1 as camada, \"\" as [componente/grupo],'\r\n sql += \" 0 as situacao, 0 as [sexo(PF)/Matriz-Filial(PJ)], 0 as [servidor(PF)/nat.jur(PJ)],\"\r\n sql += \" 0 as [salario<2min], 0 as OB, 0 as pad, 0 as [PF candidato], 0 as [CEIS/CEPIM], 0 as doadorTSE, 0 as CadUnico, 0 as Falecido\"\r\n sql += \" from Envolvidos where cpfCnpjEnvolvido not in (select cpfCnpjEnvolvido from Pedido) \"\r\n sql += ' union select distinct \"\" as cpfcnpj, \"CC\" as tipo,'\r\n sql += ' agenciaEnvolvido || \"-\"|| contaEnvolvido as nome, 1 as camada, \"\" as [componente/grupo],'\r\n sql += \" 0 as situacao, 0 as [sexo(PF)/Matriz-Filial(PJ)], 0 as [servidor(PF)/nat.jur(PJ)],\"\r\n sql += \" 0 as [salario<2min], 0 as OB, 0 as pad, 0 as [PF candidato], 0 as [CEIS/CEPIM], 0 as doadorTSE, 0 as CadUnico, 0 as Falecido\"\r\n sql += ' from Envolvidos where agenciaEnvolvido not in (\"-\",\"0\")'\r\n # dicRede = {'cpfcnpj'=[],'tipo'=[],'nome'=[],'camada'=[],}\r\n try:\r\n arq = os.path.join(pasta, \"RIF_rede_de_rel.xls\")\r\n rede_rel = pd.ExcelWriter(\r\n arq,\r\n engine=\"openpyxl\",\r\n options={\"strings_to_numbers\": False},\r\n datetime_format=\"dd/mm/yyyy\",\r\n date_format=\"dd/mm/yyyy\",\r\n )\r\n except Exception as exc:\r\n lg.gravalog(\"Erro abrir planilha de rede de relacionamento\\n\" + str(type(exc)))\r\n\r\n try:\r\n df_rr = pd.read_sql(sql, conx)\r\n gerar_planilhaXLS(rede_rel, df_rr, \"cpfcnpj\")\r\n except Exception as exc:\r\n lg.gravalog(\r\n \"Erro gerar planilha de rede de relacionamento CPFCNPJ\\n\" + str(type(exc))\r\n )\r\n\r\n try:\r\n sql = \"select distinct indexador, \"\r\n sql += ' (agenciaEnvolvido || \"-\"|| contaEnvolvido) as CC'\r\n sql += ' from Envolvidos where agenciaEnvolvido not in (\"-\",\"0\") '\r\n df_cc = pd.read_sql(sql, conx)\r\n df_cc.to_sql(\"Contas\", conx, index=False, if_exists=\"replace\")\r\n except Exception as exc:\r\n lg.gravalog(\"Erro gerar tabela auxiliar de contas\\n\" + str(type(exc)))\r\n\r\n try:\r\n sql = \"select distinct\"\r\n sql += \" REPLACE(REPLACE(REPLACE(cpfCnpjEnvolvido,'.',''),'-',''),'/','') as cpfcnpj1,\"\r\n sql += 'upper(nomeEnvolvido) as nome1, contas.CC as cpfcnpj2, \"\" as nome2, 1 as camada, \"CC\" as [descrição]'\r\n sql += \" from Envolvidos inner join contas\"\r\n sql += \" on Envolvidos.indexador = contas.indexador\"\r\n df_rr = pd.read_sql(sql, conx)\r\n gerar_planilhaXLS(rede_rel, df_rr, \"ligacoes\")\r\n except Exception as exc:\r\n lg.gravalog(\r\n \"Erro gerar planilha de rede de relacionamento LIGACOES\\n\" + str(type(exc))\r\n )\r\n\r\n try:\r\n sql = \"select distinct \"\r\n sql += \" cpfCnpjEnvolvido as cpfcnpj1,\"\r\n sql += ' upper(nomeEnvolvido) as nome1, contas.CC as cpfcnpj2, \"\" as nome2, 1 as camada1, 1 as camada2,\"CC\" as [descrição], '\r\n sql += ' case length(REPLACE(REPLACE(REPLACE(cpfCnpjEnvolvido,\".\",\"\"),\"-\",\"\"),\"/\",\"\")) when 14 then \"Escritório\" when 11 then \"Profissional (masculino)\" else \"-\" end as tipo1,'\r\n sql += ' \"Conta\" as tipo2, \"Nenhum\" as cor_situacao1, \"Nenhum\" as cor_situacao2'\r\n sql += \" from Envolvidos inner join contas\"\r\n sql += \" on Envolvidos.indexador = contas.indexador\"\r\n df_rr = pd.read_sql(sql, conx)\r\n gerar_planilhaXLS(rede_rel, df_rr, \"ligacoes\")\r\n except Exception as exc:\r\n lg.gravalog(\r\n \"Erro gerar planilha de rede de relacionamento LIGACOES - complementares\\n\"\r\n + str(type(exc))\r\n )\r\n\r\n try:\r\n arqcsv = os.path.join(pasta, \"I2.csv\")\r\n df_rr.to_csv(\r\n arqcsv, sep=\";\", header=True, encoding=\"utf-8\", decimal=\",\", index=False\r\n )\r\n except Exception as exc:\r\n lg.gravalog(\"Erro gerar csv de rede de relacionamento i2\\n\" + str(type(exc)))\r\n\r\n # to_i2(df=df_rr,arquivo='rede_rel.anx')\r\n try:\r\n gerar_planilhaXLS(rede_rel, df_rr, \"I2\")\r\n except Exception as exc:\r\n lg.gravalog(\r\n \"Erro gerar planilha de rede de relacionamento i2\\n\" + str(type(exc))\r\n )\r\n\r\n # df_rr = pd.DataFrame(r, columns=[i[0] for i in curs.description])\r\n\r\n try:\r\n rede_rel.save()\r\n except Exception as exc:\r\n lg.gravalog(\"Erro ao gravar o arquivo \" + arq + \"\\n\" + str(type(exc)))\r\n lg.gravalog(\"Rede de relacionamento gerada: \" + arq)\r\n\r\n\r\ndef validar_pasta(pasta, planilhas):\r\n for p in planilhas:\r\n p.mudar_pasta(pasta)\r\n if not p.arquivo_existe():\r\n return p.nomearq()\r\n return \"\"\r\n\r\n\r\ndef mostra_erro(msg):\r\n print(msg)\r\n easygui.msgbox(msg)\r\n return\r\n\r\n\r\ndef parse_args():\r\n \"\"\" Obtém do usuário a definição da pasta onde estão as planilhas a serem processadas\r\n e persiste num arquivo de configuração json\r\n \"\"\"\r\n import json\r\n import argparse\r\n\r\n # futuro\r\n procGrupos = True\r\n procVinculos = True\r\n procContas = True\r\n procAusentes = True\r\n\r\n proc_grupos_msg = \"Processar grupos/detalhes de CPF/CNPJ\"\r\n proc_vinculos_msg = \"Incluir vínculos complementares ao RIF\"\r\n proc_contas_msg = \"Agrupar contas bancárias no grafo\"\r\n proc_ausentes_msg = \"Incluir CPF/CNPF ausentes do RIF no grafo\"\r\n\r\n stored_args = {}\r\n # usar o nome do script sem a extensão para formar o nome do arquivo json\r\n script_name = os.path.splitext(os.path.basename(__file__))[0]\r\n\r\n args_file = \"{}-args.json\".format(script_name)\r\n pasta = \"\"\r\n # ler os parâmetros persistidos, gravados no arquivo json\r\n if os.path.isfile(args_file):\r\n with open(args_file) as data_file:\r\n stored_args = json.load(data_file)\r\n procContas = stored_args.get(\"procContas\")\r\n procVinculos = stored_args.get(\"procVinculos\")\r\n procGrupos = stored_args.get(\"procGrupos\")\r\n procAusentes = stored_args.get(\"procAusentes\")\r\n\r\n # se o programa for chamado sem especificar a pasta de origem, abrir GUI para obtê-la do usuário\r\n if len(sys.argv) <= 1:\r\n pasta = stored_args.get(\"pasta\")\r\n pasta = gui_pasta(pasta)\r\n\r\n # definir o processamento de parâmetros passados pela linha de comando\r\n desc = \"Processa planilhas de RIF (comunicações, envolvidos, ocorrencias e grupo), \\ngerando planilhas com agrupamento e com dados para i2\"\r\n file_help_msg = \"Nome da pasta onde estão as planilhas\"\r\n\r\n linha_comando = argparse.ArgumentParser(description=desc)\r\n linha_comando.add_argument(\r\n \"--pasta\",\r\n action=\"store\",\r\n dest=\"pasta\",\r\n help=file_help_msg,\r\n default=pasta,\r\n required=False,\r\n )\r\n linha_comando.add_argument(\r\n \"-p\",\r\n action=\"store\",\r\n dest=\"pasta\",\r\n help=file_help_msg,\r\n default=pasta,\r\n required=False,\r\n )\r\n linha_comando.add_argument(\r\n \"-e\",\r\n action=help_estruturas(estruturas),\r\n help=\"Exibe as estruturas esperadas das planilhas de entrada\",\r\n required=False,\r\n )\r\n linha_comando.add_argument(\r\n \"-g\",\r\n action=\"store\",\r\n dest=\"procGrupos\",\r\n help=proc_grupos_msg,\r\n default=procGrupos,\r\n required=False,\r\n )\r\n linha_comando.add_argument(\r\n \"-c\",\r\n action=\"store\",\r\n dest=\"procContas\",\r\n help=proc_contas_msg,\r\n default=procContas,\r\n required=False,\r\n )\r\n linha_comando.add_argument(\r\n \"-a\",\r\n action=\"store\",\r\n dest=\"procAusentes\",\r\n help=proc_ausentes_msg,\r\n default=procAusentes,\r\n required=False,\r\n )\r\n\r\n args = linha_comando.parse_args()\r\n if not args.pasta:\r\n args.pasta = os.getcwd() # pasta atual do script como default\r\n if not os.path.isdir(args.pasta): # ver sé pasta\r\n print(args.pasta + \" nao é pasta\")\r\n exit(1)\r\n\r\n args.procContas = gui_sn(procContas, proc_contas_msg + \"?\")\r\n args.procGrupos = gui_sn(procGrupos, proc_grupos_msg + \"?\")\r\n args.procVinculos = gui_sn(procVinculos, proc_vinculos_msg + \"?\")\r\n args.procAusentes = gui_sn(procAusentes, proc_ausentes_msg + \"?\")\r\n\r\n # persistir os parâmetros\r\n with open(args_file, \"w\") as data_file:\r\n # Using vars(args) returns the data as a dictionary\r\n json.dump(vars(args), data_file)\r\n\r\n return args\r\n\r\n\r\n# display_message()\r\n\r\n\r\ndef executar(pasta):\r\n # Selecionar a pasta de origem/destino, validá-la e executar a consolidação e a geração das planilhas\r\n if not pasta:\r\n args = parse_args()\r\n pasta = args.pasta\r\n procContas = args.procContas\r\n procGrupos = args.procGrupos\r\n procVinculos = args.procVinculos\r\n procAusentes = args.procAusentes\r\n\r\n proc_grupos_msg = \"Processar grupos/detalhes de CPF/CNPJ\"\r\n proc_vinculos_msg = \"Incluir vínculos complementares ao RIF\"\r\n proc_contas_msg = \"Agrupar contas bancárias no grafo\"\r\n proc_ausentes_msg = \"Incluir CPF/CNPF ausentes do RIF no grafo\"\r\n\r\n print(\"Pasta selecionada: \" + pasta)\r\n print(proc_contas_msg + \": \" + \"sim\" if procContas else \"não\")\r\n print(proc_grupos_msg + \": \" + \"sim\" if procGrupos else \"não\")\r\n print(proc_vinculos_msg + \": \" + \"sim\" if procVinculos else \"não\")\r\n print(proc_ausentes_msg + \": \" + \"sim\" if procAusentes else \"não\")\r\n\r\n pl = validar_pasta(pasta, estruturas)\r\n if pl == \"\":\r\n dfGrupos, dfEnvolvidos, dfComunicacoes, dfOcorrencias, dfVinculos, indexadores_selecionados = consolidar_pd(\r\n pasta\r\n ) # gera planilhas, retorna dataframes\r\n # exportar_rede_rel(pasta, dfGrupos, dfEnvolvidos) # gera planilha para i2\r\n nos, ligacoes = criarArquivoMacrosGrafo(\r\n pasta,\r\n procContas,\r\n procGrupos,\r\n procVinculos,\r\n procAusentes,\r\n dfGrupos,\r\n dfEnvolvidos,\r\n dfComunicacoes,\r\n dfOcorrencias,\r\n dfVinculos,\r\n indexadores_selecionados,\r\n ) # gera json para o Macros\r\n # toAnx(pasta,nos,ligacoes)\r\n logs = lg.lelog()\r\n easygui.msgbox(logs)\r\n else:\r\n mostra_erro(\"Arquivo \" + pl + \" não encontrado nesta pasta:\\n\" + pasta)\r\n sys.exit(1)\r\n\r\n return logs # mensagens sobre a execução\r\n\r\n\r\ndef gui_pasta(pasta):\r\n # GUI para obter a pastas escolhida pelo usuário\r\n nomepasta = easygui.diropenbox(\r\n default=pasta,\r\n msg=\"Selecione a pasta onde estão as planilhas Comunicações, Envolvidos e Ocorrências\",\r\n title=\"Gera planilhas a partir de dados de RIF\",\r\n )\r\n if not nomepasta:\r\n exit(1)\r\n return nomepasta\r\n\r\n\r\ndef gui_sn(sn, texto: str):\r\n resp = easygui.choicebox(\r\n msg=texto, choices=[\"Sim\", \"Não\"], preselect=0 if sn else 1\r\n )\r\n return resp == \"Sim\"\r\n\r\n\r\ndef gui_grupos(grupos):\r\n if len(grupos) == 0:\r\n return None\r\n resp = easygui.multchoicebox(\r\n msg=\"Selecione os grupos a serem tratados:\", title=\"Grupos\", choices=grupos\r\n )\r\n return resp\r\n\r\n\r\ndef pasta_valida(pasta):\r\n # verifica se a pasta escolida é válida\r\n pl = validar_pasta(pasta, estruturas)\r\n if pl == \"\":\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef criarArquivoMacrosGrafo(\r\n pasta,\r\n processaConta,\r\n processaGrupos,\r\n processaVinculos,\r\n processaAusentes,\r\n dfGrupos,\r\n dfEnvolvidos,\r\n dfComunicacoes,\r\n dfOcorrencias,\r\n dfVinculos,\r\n indexadores_selecionados,\r\n):\r\n \"\"\"gera o arquivo do Macros a partir dos dataframes\"\"\"\r\n\r\n # cria tabela de nós\r\n nos = []\r\n\r\n # procura tipoEnvolvido=Titular, para por no tooltip do nó\r\n if len(indexadores_selecionados):\r\n dfEnvolvidos = dfEnvolvidos.loc[\r\n dfEnvolvidos[\"Indexador\"].isin(indexadores_selecionados)\r\n ]\r\n dfComunicacoes = dfComunicacoes.loc[\r\n dfComunicacoes[\"Indexador\"].isin(indexadores_selecionados)\r\n ]\r\n dfOcorrencias = dfOcorrencias.loc[\r\n dfOcorrencias[\"Indexador\"].isin(indexadores_selecionados)\r\n ]\r\n # dfGrupos = dfGrupos.loc[dfOcorrencias['Indexador'].isin(indexadores_selecionados)]\r\n\r\n titularOcorrencia = {}\r\n conta = {}\r\n for row in dfEnvolvidos.itertuples(index=False):\r\n if row.tipoEnvolvido.upper() == \"TITULAR\":\r\n titularOcorrencia[row.Indexador] = (\r\n \"Titular: \"\r\n + row.nomeEnvolvido.strip()\r\n + \"(\"\r\n + row.cpfCnpjEnvolvido\r\n + \")\"\r\n )\r\n\r\n if processaConta:\r\n if row.contaEnvolvido != \"-\" and str(row.contaEnvolvido) != \"0\":\r\n nroConta = (\r\n \"CC: \" + str(row.agenciaEnvolvido) + \"/\" + str(row.contaEnvolvido)\r\n )\r\n campoA = (\r\n dfComunicacoes[\"CampoA\"]\r\n .where(dfComunicacoes[\"Indexador\"] == row.Indexador)\r\n .sum()\r\n )\r\n if nroConta not in conta:\r\n conta[nroConta] = campoA\r\n else:\r\n conta[nroConta] += campoA\r\n if processaConta:\r\n for key, value in conta.items():\r\n label = (\r\n \"R$ {:,}\".format((value))\r\n .replace(\",\", \"X\")\r\n .replace(\".\", \",\")\r\n .replace(\"X\", \".\")\r\n )\r\n nos.append(copy.deepcopy(noConta(key, label).todict()))\r\n\r\n for row in dfComunicacoes.itertuples(index=False):\r\n if str(row.Indexador) == \"nan\" or str(row.Indexador) == \"0\":\r\n continue\r\n campos = str(row)[len(\"Pandas\") :]\r\n label = (\r\n \"COAF R$ {:,}\".format((row.CampoA))\r\n .replace(\",\", \"X\")\r\n .replace(\".\", \",\")\r\n .replace(\"X\", \".\")\r\n )\r\n noCOM = noComunicacao(id=\"COM_\" + str(int(row.Indexador)), label=label)\r\n\r\n d = dfOcorrencias[dfOcorrencias.Indexador == row.Indexador][\"Ocorrencia\"]\r\n campos = (\r\n titularOcorrencia.get(row.Indexador, \"\")\r\n + \" \"\r\n + campos\r\n + \" . Ocorrência(s): \"\r\n + \"; \".join(d)\r\n )\r\n\r\n campos = campos.replace(r\"\\t\", \" \")\r\n campos = campos.replace(r\"\\x96\", \" \").replace(r\"\\x92\", \" \")\r\n noCOM.texto_tooltip = str(re.sub(\" +\", \" \", campos))\r\n nos.append(copy.deepcopy(noCOM.todict()))\r\n\r\n valoresPorIndexador = {}\r\n for row in dfComunicacoes.itertuples(index=False):\r\n if row.informacoesAdicionais:\r\n valoresPorIndexador[row.Indexador] = estimarFluxoDoDinheiro(\r\n str(row.informacoesAdicionais)\r\n )\r\n\r\n cpfcnpjset = set()\r\n for row in dfEnvolvidos.itertuples(index=False):\r\n cpfcnpj = (\r\n row.cpfCnpjEnvolvido.replace(\".\", \"\").replace(\"-\", \"\").replace(\"/\", \"\")\r\n )\r\n cpfcnpjset.add(cpfcnpj)\r\n\r\n cs = set()\r\n for row in dfEnvolvidos.itertuples(index=False):\r\n cpfcnpj = (\r\n row.cpfCnpjEnvolvido.replace(\".\", \"\").replace(\"-\", \"\").replace(\"/\", \"\")\r\n )\r\n nome = str(\r\n row.nomeEnvolvido\r\n ) # para exportar para i2 (nao busca o nome no Macros)\r\n if cpfcnpj not in cs:\r\n cs.add(cpfcnpj)\r\n if len(cpfcnpj) == 11:\r\n nos.append(copy.deepcopy(noPF(id=cpfcnpj, label=nome).todict()))\r\n else:\r\n nos.append(copy.deepcopy(noPJ(id=cpfcnpj, label=nome).todict()))\r\n\r\n # cria tabela de ligacoes\r\n ligacoes = []\r\n ligadict = {}\r\n ligaContadict = {}\r\n\r\n det = {} # detalhes\r\n grp = {} # grupos\r\n idDet = 0\r\n idGru = 0\r\n\r\n if processaGrupos:\r\n if processaAusentes: # inclui todos os grupos\r\n df = dfGrupos\r\n else: # inclui os grupos com incidencia no RIF\r\n df = pd.merge(dfEnvolvidos, dfGrupos, how=\"inner\", on=\"cpfCnpjEnvolvido\")\r\n csgrupos = df[\"Grupo\"].unique()\r\n for gr in csgrupos:\r\n idGru += 1\r\n grp[gr] = idGru\r\n texto = gr[:MAX_TAM_LABEL]\r\n nos.append(\r\n copy.deepcopy(noGrupo(id=\"#\" + str(idGru), label=texto).todict())\r\n )\r\n for row in df.itertuples(index=False):\r\n cpfcnpj = (\r\n row.cpfCnpjEnvolvido.replace(\".\", \"\").replace(\"-\", \"\").replace(\"/\", \"\")\r\n )\r\n nome = str(row.nome_Envolvido)\r\n if cpfcnpj not in cs:\r\n cs.add(cpfcnpj)\r\n if len(cpfcnpj) == 11:\r\n nos.append(copy.deepcopy(noPF(id=cpfcnpj, label=nome).todict()))\r\n else:\r\n nos.append(copy.deepcopy(noPJ(id=cpfcnpj, label=nome).todict()))\r\n\r\n for row in dfGrupos.itertuples(index=False):\r\n gr = row.Grupo\r\n if not (gr in grp):\r\n continue\r\n obDetalhe = str(row.Detalhe)[:MAX_TAM_LABEL]\r\n cpfcnpj = (\r\n row.cpfCnpjEnvolvido.replace(\".\", \"\").replace(\"-\", \"\").replace(\"/\", \"\")\r\n )\r\n lig = aresta(\r\n origem=cpfcnpj, destino=\"#\" + str(grp[gr]), descricao=obDetalhe\r\n ).todict()\r\n ligacoes.append(copy.deepcopy(lig))\r\n # dfEnvolvidos.sort_values(by=['Indexador', 'cpfCnpjEnvolvido'])\r\n for row in dfEnvolvidos.itertuples(index=False):\r\n if str(row.Indexador) == \"nan\":\r\n continue\r\n cpfcnpj = (\r\n row.cpfCnpjEnvolvido.replace(\".\", \"\").replace(\"-\", \"\").replace(\"/\", \"\")\r\n )\r\n # origemDestino = \"COM_\"+row.Indexador+\"-\"+cpfcnpj\r\n origemDestino = str(int(row.Indexador)) + \"-\" + cpfcnpj\r\n if origemDestino not in ligadict:\r\n ligadict[origemDestino] = set([row.tipoEnvolvido])\r\n else:\r\n ligadict[origemDestino].add(row.tipoEnvolvido)\r\n # conta\r\n if (\r\n row.contaEnvolvido != \"-\"\r\n and str(row.contaEnvolvido) != \"0\"\r\n and str(row.contaEnvolvido) != \"nan\"\r\n ):\r\n nroConta = (\r\n \"CC: \" + str(row.agenciaEnvolvido) + \"/\" + str(row.contaEnvolvido)\r\n )\r\n origemDestino = nroConta + \"-\" + cpfcnpj\r\n if origemDestino not in ligadict:\r\n ligaContadict[origemDestino] = set([row.tipoEnvolvido])\r\n else:\r\n ligaContadict[origemDestino].add(row.tipoEnvolvido)\r\n\r\n for k, v in ligadict.items():\r\n origem = k.split(\"-\")[0]\r\n destino = k.split(\"-\")[1]\r\n valor = valoresPorIndexador.get(origem, {}).get(destino, \"\")\r\n if valor:\r\n valor = \"-\" + valor\r\n\r\n descricao = \",\".join(sorted(v)) + valor\r\n ligacoes.append(\r\n copy.deepcopy(\r\n aresta(\r\n origem=\"COM_\" + origem, destino=destino, descricao=descricao\r\n ).todict()\r\n )\r\n )\r\n\r\n for k, v in ligaContadict.items():\r\n origem = k.split(\"-\")[0]\r\n destino = k.split(\"-\")[1]\r\n valor = valoresPorIndexador.get(origem, {}).get(destino, \"\")\r\n if valor:\r\n valor = \"-\" + valor\r\n # descricao = ','.join(sorted([k for k in v if k!='Outros'])) + valor\r\n descricao = \",\".join(sorted(v)) + valor\r\n ligacoes.append(\r\n copy.deepcopy(\r\n aresta(origem=origem, destino=destino, descricao=descricao).todict()\r\n )\r\n )\r\n\r\n if processaVinculos:\r\n for row in dfVinculos.itertuples(index=False):\r\n cpfcnpj = (\r\n row.cpfCnpjEnvolvido.replace(\".\", \"\").replace(\"-\", \"\").replace(\"/\", \"\")\r\n )\r\n nome = str(row.nome_Envolvido)\r\n if cpfcnpj not in cs:\r\n cs.add(cpfcnpj)\r\n if len(cpfcnpj) == 11:\r\n nos.append(\r\n copy.deepcopy(\r\n noPF(id=cpfcnpj, label=nome, fonte=\"VIN\").todict()\r\n )\r\n )\r\n else:\r\n nos.append(\r\n copy.deepcopy(\r\n noPJ(id=cpfcnpj, label=nome, fonte=\"VIN\").todict()\r\n )\r\n )\r\n\r\n for row in dfVinculos.itertuples(index=False):\r\n cpfcnpj = (\r\n row.cpfCnpjEnvolvido.replace(\".\", \"\").replace(\"-\", \"\").replace(\"/\", \"\")\r\n )\r\n cpfcnpjvinc = (\r\n row.cpfCnpjVinculado.replace(\".\", \"\").replace(\"-\", \"\").replace(\"/\", \"\")\r\n )\r\n descricao = str(row.Descricao)[:MAX_TAM_LABEL]\r\n ligacoes.append(\r\n copy.deepcopy(\r\n aresta(\r\n origem=cpfcnpj,\r\n destino=cpfcnpjvinc,\r\n descricao=descricao,\r\n fonte=\"VIN\",\r\n ).todict()\r\n )\r\n )\r\n nome = str(row.nome_Envolvido)\r\n if processaAusentes and cpfcnpj not in cs: # incluir CPFCNPJ\r\n cs.add(cpfcnpj)\r\n if len(cpfcnpj) == 11:\r\n nos.append(\r\n copy.deepcopy(\r\n noPF(id=cpfcnpj, label=nome, fonte=\"VIN\").todict()\r\n )\r\n )\r\n else:\r\n nos.append(\r\n copy.deepcopy(\r\n noPJ(id=cpfcnpj, label=nome, fonte=\"VIN\").todict()\r\n )\r\n )\r\n\r\n cpfcnpj = cpfcnpjvinc\r\n nome = str(row.nome_Vinculado)\r\n if processaAusentes and cpfcnpj not in cs: # incluir CPFCNPJ\r\n cs.add(cpfcnpj)\r\n if len(cpfcnpj) == 11:\r\n nos.append(\r\n copy.deepcopy(\r\n noPF(id=cpfcnpj, label=nome, fonte=\"VIN\").todict()\r\n )\r\n )\r\n else:\r\n nos.append(\r\n copy.deepcopy(\r\n noPJ(id=cpfcnpj, label=nome, fonte=\"VIN\").todict()\r\n )\r\n )\r\n\r\n textoJson = json.dumps({\"no\": nos, \"ligacao\": ligacoes}) # , ensure_ascii=False)\r\n # print(textoJson)\r\n # textoJson = textoJson.replace(r'\\t',' ').replace(r'\\x96',' ').replace(r'\\x92', ' ') #isso dá erro\r\n arq = os.path.join(pasta, \"macros_grafo.json\")\r\n with open(arq, \"wt\", encoding=\"latin1\") as out:\r\n out.write(textoJson)\r\n lg.gravalog(\"Grafo gerado para o sistema Macros: \" + arq)\r\n return (nos, ligacoes)\r\n\r\n\r\ndef exportar_rede_rel_xlsx(pasta, nos, ligacoes):\r\n workbook = xlsxwriter.Workbook(os.path.join(pasta, \"RIF_rede.xlsx\"))\r\n worksheet = workbook.add_worksheet()\r\n headings = [\r\n \"cpfcnpj1\",\r\n \"nome1\",\r\n \"cpfcnpj2\",\r\n \"nome2\",\r\n \"camada1\",\r\n \"camada2\",\r\n \"descrição\",\r\n \"tipo1\",\r\n \"tipo2\",\r\n \"cor_situacao1\",\r\n \"cor_situacao2\",\r\n ]\r\n\r\n row = 0\r\n col = 0\r\n for h in headings:\r\n worksheet.write(row, col, h)\r\n col += 1\r\n\r\n for l in ligacoes:\r\n col = 0\r\n worksheet.write(row, col, l.origem)\r\n col += 1\r\n worksheet.write(row, col, l.label)\r\n col += 1\r\n worksheet.write(row, col, l.destino)\r\n \"\"\"\r\n self.origem=origem\r\n self.destino=destino\r\n self.descricao=descricao\r\n self.cor=cor\r\n self.fonte=fonte\r\n self.camada=0 if self.fonte == 'RIF' else 1\r\n \"\"\"\r\n return\r\n\r\n\r\ndef toAnx(pasta, nos, ligacoes):\r\n def tipoNo(no):\r\n if no[tipo] == \"PF\":\r\n tipo = \"Person\"\r\n elif no[tipo] == \"PJ\":\r\n tipo = \"Office\"\r\n elif no[tipo] == \"CC\":\r\n tipo = \"Account\"\r\n elif no[tipo] == \"ENT\":\r\n tipo = \"Cabinet\"\r\n else:\r\n tipo = \"Cabinet\"\r\n return tipo\r\n\r\n chart = pyanx.Pyanx()\r\n\r\n for no in nos:\r\n noAnx = chart.add_node(entity_type=tipoNo, label=removeAcentos(no[\"label\"]))\r\n \"\"\" 'label': label,\r\n 'color': color,\r\n 'style': style,\r\n 'description': description,\r\n 'datetime': _datetime,\r\n 'datetime_description': datestr_description,\r\n 'timezone': timezone\r\n \"\"\"\r\n for lig in ligacoes:\r\n ligAnx = chart.add_edge(\r\n removeAcentos(lig[\"origem\"]),\r\n removeAcentos(lig[\"destino\"]),\r\n removeAcentos(lig[\"tipoDescricao\"][\"0\"]),\r\n )\r\n arqAnx = os.path.join(pasta, \"RIF_Grafo.anx\")\r\n\r\n chart.create(arqAnx)\r\n return\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # caso o script seja utilizado como um package por outro, basta chamar a função \"executar\" com um nome de pasta\r\n executar(\"\")\r\n","sub_path":"rif.py","file_name":"rif.py","file_ext":"py","file_size_in_byte":51428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"291610640","text":"# -*- encoding: utf-8 -*-\n# code for python 3.6\nfrom io import open\nimport os\nfrom regex import Regex\nimport unicodedata\n\n\nr = Regex()\n\n\ndef mkdir(dir):\n if (os.path.exists(dir) == False):\n os.mkdir(dir)\n\n\ndef push_data_to_stack(stack, file_path, file_name):\n sub_folder = os.listdir(file_path)\n for element in sub_folder:\n element = file_name + '/' + element\n stack.append(element)\n\n\ndef normalize_data(dataset):\n nor_dir = 'get_person_entity'\n mkdir(nor_dir)\n stack = os.listdir(dataset)\n print('loading data in ' + dataset)\n while (len(stack) > 0):\n file_name = stack.pop()\n file_path = dataset + '/' + file_name\n if (os.path.isdir(file_path)): # neu la thu muc thi day vao strong stack\n push_data_to_stack(stack, file_path, file_name)\n else:\n with open(file_path, 'r', encoding='utf-8') as fr, \\\n open(nor_dir + '/' + file_name, 'w', encoding='utf-8') as fw:\n print('processing %s' % (file_path))\n sen = []; ner = []; pos = []\n for info in fr:\n info = unicodedata.normalize('NFKC', info)\n info = info.strip().split(u'\\t')\n if len(info) == 1:\n s, n, p = normalize_per_tag(sen, ner, pos)\n word_info = list(map(lambda x: r.run_ex(x), s))\n fw.write(get_string(s, p, word_info, n))\n sen = sen[:0]\n ner = ner[:0]\n pos = ner[:0]\n else:\n sen.append(info[0].replace(u' ', u'_'))\n pos.append(info[1])\n ner.append(info[3])\n\n\ndef normalize_per_tag(sen, ner, pos):\n s = []; per = []; n = []; p = []; per_pos = []\n for i in range(len(sen)):\n if u'PER' in ner[i]:\n per.append(sen[i])\n per_pos.append(pos[i])\n else:\n if len(per) != 0:\n s.append(u'_'.join(per))\n n.append(u'B-PER')\n p.append(per_pos[0])\n per = per[:0]\n per_pos = per_pos[:0]\n s.append(sen[i])\n n.append(ner[i])\n p.append(pos[i])\n if len(per) != 0:\n s.append(u'_'.join(per))\n n.append(u'B-PER')\n p.append(per_pos[0])\n\n return s, n, p\n\n\ndef get_string(word_list, pos_list, word_info, ner_list):\n s = []\n for i in range(len(word_list)):\n try:\n ss = u'\\t'.join([word_list[i], pos_list[i], word_info[i], ner_list[i]])\n s.append(ss)\n except:\n print(u'ERROR - DAMN IT !!!')\n s = u'\\n'.join(s) + u'\\n\\n'\n return s\n\n\nif __name__ == '__main__':\n normalize_data('normalize_data')","sub_path":"rebuild_data_ex.py","file_name":"rebuild_data_ex.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"461231495","text":"import copy\nimport datetime\nimport json\n\nfrom django.db import transaction\nfrom django.utils.timezone import utc\n\nfrom component_browser.models import Parameter\nfrom core.parse_open_api import extract_pipe_parameters_for_request, extract_pipe_parameters\nfrom core.vis_utils import short_json_html\n\n##################\n# Saving objects #\n##################\n\n\ndef save_parameters(pipe_obj, parameters, validation_errors):\n with transaction.atomic():\n\n for param in parameters:\n param_obj = Parameter.objects.get(id=param.id)\n param_obj.value = param.value\n param_obj.save()\n\n # this should use the auto detected user timezone\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n pipe_obj.input_time = now\n\n if not validation_errors:\n pipe_obj.output = \"\"\n pipe_obj.short_output = \"\"\n pipe_obj.output_time = None\n pipe_obj.run_time = now\n pipe_obj.save(force_update=True)\n\n\ndef save_output(pipe_object, output, output_is_html):\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n pipe_object.output = output\n if not output_is_html:\n pipe_object.short_output = short_json_html(truncate_json(output), 4000, 'manni', 'output_css')\n pipe_object.output_time = now\n pipe_object.output_is_html = output_is_html\n pipe_object.save(force_update=True)\n\n\n###################################\n# DB extraction for visualisation #\n###################################\n\ndef get_request_params(pipe):\n\n params = extract_pipe_parameters_for_request(pipe.parameter_set.filter(parent=None).all())\n\n return params\n\n\ndef get_pipe_params(pipe):\n\n # get all non-child parameters\n parameters = pipe.parameter_set.filter(parent=None)\n\n param_dict = extract_pipe_parameters(parameters)\n request_dict = get_parameter_fields(pipe.request.__dict__)\n request_dict['component'] = pipe.request.component.__dict__\n pipe_dict = get_parameter_fields(pipe.__dict__)\n\n pipe_dict['parameters'] = param_dict\n pipe_dict['request'] = request_dict\n\n # full output will be too large\n del pipe_dict[\"output\"]\n\n return pipe_dict\n\n\ndef get_pipes_params(sorted_pipe_objs):\n\n pipes = []\n\n for pipe in sorted_pipe_objs:\n pipes.append(get_pipe_params(pipe))\n return pipes\n\n\ndef get_parameter_fields(dictionary):\n\n parameters = {}\n\n for k, v in dictionary.items():\n k = k if k != 'enum' else 'options'\n\n # need to strip values that are strings\n if isinstance(v, str):\n v = v.strip(\"\\\"\")\n\n if k not in ['_state'] and v is not None:\n\n # we are only converting enums to python data types\n if k not in ['options']:\n parameters[k] = v\n continue\n\n try:\n json_str = '{{\"key\": {}}}'.format(v)\n parse = json.loads(json_str)\n parameters[k] = parse['key']\n except Exception as e:\n parameters[k] = v\n return parameters\n\n\ndef get_request_path(request_obj):\n component_obj = request_obj.component\n request_path = request_obj.path\n\n # TODO this is a dirty hack. The request path starts with a / and concatenating it with the base path results\n # in a double /. So we delete it\n if request_path[0] == '/':\n request_path = request_path[1:]\n\n return component_obj.host + component_obj.base_path + request_path\n\n\ndef get_pipe_responses(pipe):\n\n defined_responses = {}\n\n responses_db = pipe.request.responses.all()\n\n for response in responses_db:\n if response.description:\n defined_responses[response.status_code] = response.description\n\n return defined_responses\n\n\ndef truncate_value(val):\n\n if type(val) in [float]:\n return round(val, 5)\n elif type(val) in [str]:\n if len(val) > 100:\n return val[:100] + ' ... '\n else:\n return val\n elif type(val) in [dict]:\n return truncate_iterable_json(val)\n else:\n return val\n\n\ndef truncate_iterable_json(json_val):\n\n if isinstance(json_val, list):\n return [truncate_value(element) for element in json_val[:10]]\n\n elif isinstance(json_val, dict):\n for k, v in json_val.items():\n if isinstance(v, dict):\n truncate_iterable_json(v)\n elif isinstance(v, list):\n v = v[:10]\n json_val[k] = [truncate_value(element) for element in v[:10]]\n else:\n json_val[k] = truncate_value(v)\n\n return json_val\n\n\ndef truncate_json(json_val):\n\n json_val = copy.deepcopy(json_val)\n if isinstance(json_val, dict):\n return truncate_iterable_json(json_val)\n elif isinstance(json_val, list):\n return truncate_iterable_json(json_val)\n\n return json_val","sub_path":"core/db_interaction.py","file_name":"db_interaction.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"638540141","text":"import os\nimport requests\nimport datetime\nimport pandas as pd\nimport boto3\nimport csv_s3_upload\nfrom boto3.dynamodb.conditions import Key\nfrom dotenv import load_dotenv\n\n# 코인에 대한 가격정보 가져오기\ndef get_current_data(markets) :\n load_dotenv(verbose=True)\n server_url = os.getenv('URL')\n \n #markets\n market_list1 = \"?\"\n market_list2 = \"?\"\n \n for market in markets[:100]:\n market_list1 = market_list1 + \"markets=\" + market['market'] + \"&\"\n for market in markets[100:]:\n market_list2 = market_list2 + \"markets=\" + market['market'] + \"&\"\n \n res1 = requests.request(\"GET\", server_url + \"/v1/ticker\" + market_list1)\n res2 = requests.request(\"GET\", server_url + \"/v1/ticker\" + market_list2)\n \n # prices\n prices1 = res1.json()\n prices2 = res2.json()\n payload = []\n now = (datetime.datetime.now() + datetime.timedelta(hours=9)).strftime('%Y-%m-%d-%H:%M:%S')\n # market = 코인명\n # opening_price = 시장가\n # change = rice:상향/fall:하향\n # trade_volume = 거래량\n for price in prices1:\n payload.append({\n \"market\" : price['market'], \n \"trade_price\" : price['trade_price'], \n \"change\" : price['change'], \n \"trade_volume\" : price['trade_volume'], \n })\n for price in prices2:\n payload.append({\n \"market\" : price['market'], \n \"trade_price\" : price['trade_price'], \n \"change\" : price['change'], \n \"trade_volume\" : price['trade_volume'], \n })\n \n create_to_CSV(now, payload)\n \n# 코인 데이터 csv로 저장\ndef create_to_CSV(now, payload):\n date = now[0:13]\n file_name = date + '.csv'\n csv_path = '/root/ats/monitoring/csvs/' + file_name\n \n column = [] # csv의 컬럼 값 \n data = {} # csv의 행 값\n \n # pandas DataFrame에 넣기 위한 row, colunm 작성\n for item in payload :\n column.append(item['market'])\n data[item['market']] = [[item['trade_price'], item['change'], item['trade_volume']]]\n \n if os.path.isfile(csv_path) :\n # 기존에 파일이 있으면 행만 추가(header=False)\n df = pd.DataFrame(data, index=[now])\n df.to_csv(csv_path, mode='a', header=False)\n else :\n # 다음날이 되어서 파일이 없다면 생성\n df = pd.DataFrame(data, index=[now])\n df.to_csv(csv_path)\n\n# dynamodb에서 KRW코인 목록가져오기\ndef get_market_data(dynamodb=None) :\n if not dynamodb:\n dynamodb = boto3.resource('dynamodb', region_name='ap-northeast-2')\n table = dynamodb.Table('Markets')\n response = table.scan()\n return response['Items']\n \nif __name__ == '__main__':\n markets = get_market_data() \n get_current_data(markets) \n csv_s3_upload.csv_s3_upload() # upload csv file to s3\n ","sub_path":"monitoring/get_market_price.py","file_name":"get_market_price.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"215954459","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport platform\nimport importlib\nfrom subprocess import call, DEVNULL\n\n\nPIP_DEPENDENCIES = [\n 'setuptools',\n 'pymongo',\n 'mongoengine',\n 'progressbar2',\n 'tabulate',\n 'numpy',\n 'scipy',\n 'plotly',\n 'psutil'\n]\n\n\ndef pip_install(package):\n call([\"pip3\", \"install\", \"--user\", package], stdout=DEVNULL)\n\n\ndef main():\n print(\"Creating directory structure\")\n os.makedirs(\"results/log\", exist_ok=True)\n os.makedirs(\"plots\", exist_ok=True)\n if platform.system() != \"Windows\":\n uid = os.getuid()\n os.chown(\"results\", uid, -1)\n os.chown(\"results/log\", uid, -1)\n os.chown(\"plots\", uid, -1)\n\n for root, dirs, files in os.walk(\"results\"):\n for file in dirs:\n os.chmod(os.path.join(root, file), 0o0777)\n for file in files:\n os.chmod(os.path.join(root, file), 0o0777)\n\n print(\"Installing pip dependencies\")\n call([\"pip3\", \"install\", \"--upgrade\", \"pip\", \"--user\"], stdout=DEVNULL)\n for dependency in PIP_DEPENDENCIES:\n pip_install(dependency)\n\n print(\"Calling setuptools.setup function\")\n import site\n importlib.reload(site)\n import setuptools\n with open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n setuptools.setup(\n name=\"aiutare\",\n version=\"1.0\",\n author=\"Lukas Finnbarr O'Callahan, Federico Mora\",\n author_email=\"lukasocallahan@gmail.com, fmora@cs.toronto.edu\",\n description=\"A benchmarking framework for SAT, SMT, and equivalence checking programs.\",\n long_description=long_description,\n url=\"https://github.com/FedericoAureliano/aiutare\",\n scripts=[\n 'bin/aiutare',\n 'bin/plot',\n 'bin/verify'\n ],\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n ],\n requires=PIP_DEPENDENCIES,\n )\n\n from bin.mongod_manager import start_server\n\n try:\n start_server()\n except Exception as e:\n print(e)\n print(\"Please ensure you have the latest version of MongoDB installed.\")\n\n print(\"\\nSetup successful!\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"190337622","text":"import htmlPy\n\nimport core.Social as soc\n\n\nclass SocialHandler(htmlPy.Object):\n\n def __init__(self, app):\n super(SocialHandler, self).__init__()\n self.app = app\n\n @staticmethod\n def get_social_skills():\n skills = []\n for skill in soc.get_social_skills():\n skills.append(\n dict(\n verbose=skill.get_verbose(),\n serial=skill.get_serial(),\n _id=skill.get_id()))\n return skills\n\n @htmlPy.Slot(str, result=str)\n def add_social_skill(self, skill_name):\n created = soc.create_social(skill_name)\n details = soc.Social(created.id)\n html = (\"<tr><td>{}</td><td>{}</td>\"\n \"<td><button class='button small-button danger remove-skill' \"\n \"data-skillid='{}'>Remove</button></td>\").format(\n details.get_serial(),\n details.get_verbose(), details.get_id())\n return html\n\n @htmlPy.Slot(str)\n def remove_social_skill(self, _id):\n soc.remove_social(_id)\n\n @htmlPy.Slot()\n def print_socials(self):\n print('About printing socials..')\n","sub_path":"silos/ui/SocialHandler.py","file_name":"SocialHandler.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"405214085","text":"tweet_blob = [TextBlob(tweet) for tweet in df['text']]\ndf['polarity'] = [b.sentiment.polarity for b in tweet_blob]\n\ntweet_blob = [TextBlob(tweet) for tweet in df['text']]\ndf['subjectivity'] = [b.sentiment.subjectivity for b in tweet_blob]\n\ndf['followers_count'] = [x.get('followers_count') for x in df['user']]\ndf['friends_count'] = [x.get('friends_count') for x in df['user']]\ndf['favourites_count'] = [x.get('favourites_count') for x in df['user']]\n\nclass Clean_Tweets:\n \"\"\"\n The PEP8 Standard AMAZING!!!\n \"\"\"\n def __init__(self, df:pd.DataFrame):\n self.df = df\n print('Automation in Action...!!!')\n \n def drop_unwanted_column(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n remove rows that has column names. This error originated from\n the data collection stage. \n \"\"\"\n unwanted_rows = df[df['retweet_count'] == 'retweet_count' ].index\n df.drop(unwanted_rows , inplace=True)\n df = df[df['polarity'] != 'polarity']\n \n return df\n def drop_duplicate(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n drop duplicate rows\n \"\"\"\n \n df = df.drop_duplicates(subset=None,keep=False,inplace=True)\n \n return df\n def convert_to_datetime(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n convert column to datetime\n \"\"\"\n df['created_at'] = pd.to_datetime(df['created_at'])\n \n df = df[df['created_at'] >= '2020-12-31' ]\n \n return df\n \n def convert_to_numbers(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n convert columns like polarity, subjectivity, retweet_count\n favorite_count etc to numbers\n \"\"\"\n df['polarity'] = pd.to_numeric(df['polarity'])\n df['subjectivity'] = pd.to_numeric(df['subjectivity'])\n df['retweet_count'] = pd.to_numeric(df['retweet_count'])\n df['favourite_count'] = pd.to_numeric(df['favourite_count'])\n \n return df\n \n def remove_non_english_tweets(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n remove non english tweets from lang\n \"\"\"\n df = df['lang'].map(lambda x:x.isascii())\n \n return df\n","sub_path":"clean_tweets_dataframe.py","file_name":"clean_tweets_dataframe.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"161830540","text":"import cv2\nimport numpy as np\nfrom timeit import default_timer as timer\nimport datetime, time\nfrom threading import Thread\n\nimport time\nimport collections\nimport skimage.transform\n\n\nclass FPS:\n def __init__(self,avarageof=50):\n self.frametimestamps = collections.deque(maxlen=avarageof)\n def __call__(self):\n self.frametimestamps.append(time.time())\n if(len(self.frametimestamps) > 1):\n return len(self.frametimestamps)/(self.frametimestamps[-1]-self.frametimestamps[0])\n else:\n return 0.0\n\nclass Renderer(object):\n \"\"\"\n Draw image to screen.\n \"\"\"\n\n def __init__(self, show_fps = True, initial_resolution = 1024):\n self.show_fps = show_fps\n self.counter = 0\n self.initial_resolution = initial_resolution\n\n #cv2.namedWindow(\"frame\", cv2.WINDOW_NORMAL) # Create window with freedom of dimensions\n #cv2.resizeWindow('frame', initial_resolution, initial_resolution)\n\n return None\n\n def show_frames(self, get_image_function):\n fps = FPS()\n self.counter = 0\n\n while (True):\n self.counter += 1\n\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n if key == ord('v'):\n self.show_fps = not self.show_fps\n\n \"\"\"\n if key == ord('8'):\n self.sample_every /= 2\n if key == ord('2'):\n self.sample_every *= 2\n \"\"\"\n\n # get image\n frame = None\n\n time_start = timer()\n frame = get_image_function(self.counter)\n time_end = timer()\n print(\"timer:\", (time_end-time_start))\n #fps_val = 1.0 / (time_end-time_start)\n fps_val = fps()\n # includes time for the open cv to render and the text\n # so without showing it (imshow), it would be 24-26fps, with it it's cca 20-21fps\n #print(fps_val)\n\n if self.show_fps:\n frame = cv2.putText(frame, \"FPS \"+'{:.2f}'.format(fps_val), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n\n cv2.imshow('Interactive Machine Learning - GAN', frame)\n\n\n def show_frames_game(self, get_image_function):\n # This function is made with WASD and SPACE control scheme in mind\n fps = FPS()\n self.counter = 0\n message = \"\"\n\n while (True):\n self.counter += 1\n\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n if key == ord('v'):\n self.show_fps = not self.show_fps\n\n if key is not -1:\n print(key)\n\n key_code = \"\"\n nums = [str(i) for i in list(range(0,10))]\n allowed_keys = [\"w\",\"s\",\"a\",\"d\", # movement\n \"n\", \" \",\n \"f\",\"g\",\"t\",\"h\",\"u\",\"j\", # nn hacks and restore (j)\n \"l\", \"k\", # save load latents\n \"m\", # reorder latents\n \"p\", # plots\n \"r\", # random jump\n \"e\",\n \"+\", \"-\", \"*\",\n \"=\", # interpolate\n \"]\", 'x', \"z\"] + nums\n allowed_keys_ord = [ord(k) for k in allowed_keys]\n if key in allowed_keys_ord:\n key_code = chr(key)\n else:\n # DEBUG PART for key press detections:\n if key != -1:\n print(\"not allowed key detected:\",key)\n try:\n print(\"this might be\",chr(key))\n except Exception as e:\n print(\"err trying to call chr(key)\", e)\n\n\n # get image\n frame = None\n\n time_start = timer()\n frame, new_message = get_image_function(self.counter, key_code, key)\n if len(new_message) > 0:\n message = new_message\n\n time_end = timer()\n #print(\"timer:\", (time_end-time_start))\n #fps_val = 1.0 / (time_end-time_start)\n fps_val = fps()\n # includes time for the open cv to render and the text\n # so without showing it (imshow), it would be 24-26fps, with it it's cca 20-21fps\n #print(fps_val)\n\n if self.show_fps:\n if len(message) > 0:\n text = message\n textsize = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)[0]\n textX = int((frame.shape[1] - textsize[0]) / 2)\n textY = int((frame.shape[0] + textsize[1]) / 2)\n\n frame = cv2.putText(frame, text, (self.initial_resolution - textsize[0] - 10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n #frame = cv2.putText(frame, text, (textX, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n\n frame = cv2.putText(frame, \"FPS \" + '{:.2f}'.format(fps_val), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n\n cv2.imshow('Interactive Machine Learning - GAN', frame)\n\n def make_a_grid(self, get_image_function, x_times = 4, y_times = 4, resolution_of_one = 256):\n\n z = 3\n frame = np.ones((x_times*resolution_of_one, y_times*resolution_of_one, z), np.float)\n #print(\"frame ->\", frame.shape)\n\n counter = 0\n\n for i in range(x_times):\n for j in range(y_times):\n\n image = get_image_function(0)\n #return image\n image_resized = skimage.transform.resize(image, (resolution_of_one, resolution_of_one))\n #print(\"image_resized ->\", image_resized.shape)\n\n # numpy indexing: rows, columns\n jump_x = i*resolution_of_one\n jump_y = j*resolution_of_one\n frame[0+jump_x:resolution_of_one+jump_x, 0+jump_y:resolution_of_one+jump_y] = image_resized[:,:]\n # awful frame[0+jump_x:resolution_of_one+jump_x, 0+jump_y:resolution_of_one+jump_y, counter%3] = image_resized[:,:, 0]\n\n counter += 1\n\n #frame = frame * 0.8\n return frame\n\n def show_intro(self, get_image_function, get_grid_image_function):\n resolution = 1024\n end = False\n\n # Generate background as a grid of samples\n times = 6\n frame = self.make_a_grid(get_grid_image_function, times, times, int(resolution/times))\n\n # Layer over the text\n texts = [\"<< GAN interaction Game >>\", \"\",\n \"Controls:\",\n \" - ws: move forwards/backwards\",\n \" - ad: change direction\",\n \" - space: small perturbation of the space\",\n \" - r: randomly place elsewhere\",\n \" - SHIFT: toggles save or load\",\n \" - 0-9: save to/load from a slot number 0-9\",\n \" - v: FPS on/off\",\n \" - fg (t) h: NN hacks\",\n \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"[[ Press space to continue ... ]]\"]\n\n\n while (True):\n key = cv2.waitKey(1)\n if key == ord('q'):\n end = True\n break\n if key == ord(' '):\n end = False\n break\n if key == ord('v'):\n self.show_fps = not self.show_fps\n\n frame_dynamic = frame.copy()\n\n #times = 3\n #frame_dynamic = self.make_a_grid(get_grid_image_function, times, times, int(resolution / times))\n\n if self.show_fps:\n left = 100\n top = 140\n title = True\n for text in texts:\n thickness = 2\n if title:\n thickness = 3\n frame_dynamic = cv2.putText(frame_dynamic, text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), thickness)\n title = False\n top += 40\n\n cv2.imshow('Interactive Machine Learning - GAN', frame_dynamic)\n\n if not end:\n # Continue!\n self.show_frames_game(get_image_function)","sub_path":"renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":8219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"354220738","text":"# Turn on/off Temperature Alarm LED connected to GrovePi+ port D6\n# I'll be using my own LEDs instead of the Grove LEDs. However, I'm using the code, grove_led_blink.py,\n# to drive the led from here:\n# http://wiki.seeedstudio.com/Grove-Red_LED/\n\n# Connect Temperature Alarm LED to digital port D7\ntemp_alarm_led = 6\n\npinMode(temp_alarm_led,\"OUTPUT\")\ntime.sleep(1)\n\nprint (\"This example will blink an LED connected to the GrovePi+ on the port labeled D6. If you're having trouble seeing the LED blink, be sure to check the LED connection and the port number. You may also try reversing the direction of the LED on the sensor.\")\nprint (\" \")\nprint (\"Connect the LED to the port labele D6!\" )\n\nwhile True:\n try:\n #Blink the LED\n digitalWrite(temp_alarm_led,1) # Send HIGH to switch on LED\n print (\"LED ON!\")\n time.sleep(1)\n\n digitalWrite(temp_alarm_led,0) # Send LOW to switch off LED\n print (\"LED OFF!\")\n time.sleep(1)\n\n except KeyboardInterrupt: # Turn LED off before stopping\n digitalWrite(temp_alarm_led,0)\n break\n except IOError: # Print \"Error\" if communication error encountered\n print (\"Error\")\n","sub_path":"Grove_sensors/Grove_LED/grovepi_plus_temp_alarm.py","file_name":"grovepi_plus_temp_alarm.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"625545764","text":"#!/usr/bin/python3\n#importing necessay modules\nimport numpy as np\nimport pandas as pd\nimport time\nimport sys\n#ML Libraries\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n\n#Visualization Libraries\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\"\"\"\nThis function reads the input file with samples and their binary feature vector generated using \"FEATURE_GENERATION\" script\nand genrates test and tain set for trainign and predicting machine learning model performance\n\nInput: It takes as input three file names:\n 1) fea_file: Output file genrated from \"FEATURE_GENERATION\"\n 2) select_fea_file: Output file generated from Feature_SelectionI\n 3) mapping_file: Provided by the user\n \n\nOutput: Training and Testing Features and Labels are generated as matrix and list respectively\n\"\"\"\n\ndef reading_input_data(fea_file,select_fea_file,mapping_file):\n #Reading the Excel File as Features\n dataset1= pd.read_excel(fea_file, index_col=0)\n\n #Reading the SRA TO PFGE mapping file\n srr_to_pfge={}\n #Create the PFGE TO SRA mapping\n pfge_to_srr={}\n with open(mapping_file,\"r\") as fh1:\n for line in fh1:\n line=line.strip(\"\\n\").strip(\" \")\n tmp=line.split(\"\\t\")\n #for each srr store its corresponding barcode\n srr_to_pfge[tmp[0]]=tmp[1]\n if tmp[1] not in pfge_to_srr:\n pfge_to_srr[tmp[1]]=[tmp[0]]\n else:\n pfge_to_srr[tmp[1]].append(tmp[0])\n \n ############### Creating the test train set #####################\n train=[]\n test=[]\n for key in pfge_to_srr:\n #class label with 5 representations are divided as 1 sample in test and 4 samples in train\n if len(pfge_to_srr[key])==10:\n for i in pfge_to_srr[key][0:8]:\n train.append(i)\n test.append(pfge_to_srr[key][8])\n test.append(pfge_to_srr[key][9])\n #class label with 3 or less representations are only included in training data\n elif len(pfge_to_srr[key])<=10:\n for j in pfge_to_srr[key]:\n train.append(j)\n #class label with 5 representations are divided as 1 sample in test and 3 samples in train\n elif len(pfge_to_srr[key])==9:\n for m in pfge_to_srr[key][0:7]:\n train.append(m)\n test.append(pfge_to_srr[key][8])\n #loading the selected features\n fea_sel=reading_features(select_fea_file)\n #subsetting the dataframe to only take selected features\n dataset2=dataset1.loc[:,fea_sel]\n #Assigning the class label\n pfge=[]\n for item in list(dataset2.index):\n srr_id=item.split(\"_\")[0]\n pfge.append(srr_to_pfge[srr_id])\n \n #adding the class label column\n dataset2[\"PFGE\"]=pfge\n \n X=dataset2.iloc[:,:-1]\n Y=dataset2.iloc[:,-1]\n #Encoding the categorical class variable\n labelencoder_y=LabelEncoder()\n Y_la=labelencoder_y.fit_transform(Y)\n Y_mapping=labelencoder_y.inverse_transform(Y_la)\n #Storing the mapping of the categorical class variable to it's encoded form\n mapping=dict(zip(Y_mapping,Y_la))\n #Creating the test and train set\n X_train=X.drop(test)\n X_test=X.drop(train)\n Y_train=[]\n Y_test=[]\n for i in X_train.index:\n Y_train.append(mapping[dataset2.loc[i,\"PFGE\"]])\n for i in X_test.index:\n Y_test.append(mapping[dataset2.loc[i,\"PFGE\"]])\n \n return X_train,Y_train,X_test,Y_test\n \n\"\"\"\nThis function reads the file in which selected features are stored (generated from Feature_SelectionI)\n\nInput: It takes as input name of the text file with selected maximally informative k-mers\n \nOutput: It outputs a list of selected k-mers\n\"\"\"\n\ndef reading_features(features_selected):\n lst_uni=[]\n with open(features_selected,\"r\") as fh11:\n for line in fh11:\n line=line.strip(\"\\n\").strip(\" \")\n if line.strip(\"\\n\"):\n lst_uni.append(int(line))\n return lst_uni\n\n\"\"\"\nThis function is training the Random Forest Model and also performing 5-fold Cross Validation on the trainign data \n\nInput: It takes as input Training feature matrix and class labels list\n \n\nOutput: Trained Model and prints the 5-fold CV accuracy of Training Set\n\"\"\" \n\ndef training_model(X_train, Y_train):\n clfs= RandomForestClassifier(n_estimators=500)\n clf=RandomForestClassifier(n_estimators=500)\n scores = cross_val_score(clfs,X_train,Y_train, cv=4)\n print(f\"RF 5-fold CV Accuracy %.4f\"%scores.mean())\n clf.fit(X_train,Y_train)\n return clf\n\n\"\"\"\nThis function makes prediction on the test set \n\nInput: It takes as input input the trained model, testing feature matrix and class labels list\n \n \nOutput: It returns the predicted class label and the approximated predicted class label\n\"\"\"\n\ndef testing_model(trained_model, X_test,Y_test):\n y_pred=trained_model.predict(X_test)\n print(\"Prediction Accuracy:\",accuracy_score(Y_test,y_pred))\n predict_probs = trained_model.predict_proba(X_test)\n n=5\n top5 = np.argsort(predict_probs, axis=1)[:,-n:]\n Y_pred_new=[]\n #Approximating the likelihood of class label even if th true label is present in top 5, considered as true prediction\n for i,d in enumerate(Y_test):\n if d in top5[i][::-1]:\n Y_pred_new.append(d)\n else:\n Y_pred_new.append(top5[i][::-1][0])\n return Y_pred_new, y_pred\n\n\"\"\"\nThis function computes the confusion matrix and calls the visualization function to plot it in the form of a heatmap \n\nInput: It takes as input the test class labels, predicted class labels, approximated predicted class labels\n \n\nOutput: It outputs the heatmap \n\"\"\"\n\ndef Visualization(Y_test,y_pred,y_pred_new):\n conf_mat = confusion_matrix(Y_test,y_pred)\n class_name= list(set(list(Y_test)))\n print(drawing_confusion_matrix(conf_mat, class_name))\n conf_mat2 = confusion_matrix(Y_test,Y_pred_new)\n print(drawing_confusion_matrix(conf_mat2, class_name))\n\n\"\"\"\nThis function creates a heatmap of the inputed confusion matrix \n\nInput: It takes as input three things:\n 1) confusion_matrix: As genrated by using the ML model\n 2) class_names: The actual class label names we are trying to predict\n 3) figsize: Figure size (Hz size, Vt size) {default is (20,20)}\n 4) fontsize: Font size for axes labels {default is 14}\nIt takes as input a dictionary containing output from the function senitivity_dict\n \n\nOutput: Heatmap of the confusion matix \n\"\"\"\n\ndef drawing_confusion_matrix(confusion_matrix, class_names, figsize = (20,20), fontsize=14):\n colors=[\"#fff7fb\",\"#ece2f0\",\"#d0d1e6\",\"#a6bddb\",\"#67a9cf\",\"#3690c0\",\"#02818a\"]\n df_cm = pd.DataFrame(confusion_matrix, index=class_names, columns=class_names)\n fig = plt.figure(figsize=figsize)\n sns.set(font_scale=1.4)#for label size\n heatmap = sns.heatmap(df_cm,cmap=colors)\n heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)\n heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return fig\n\ndef main(input_fea,selected_fea,srr_map):\n X_train,Y_train,X_test,Y_test=reading_input_data(input_fea,selected_fea,srr_map)\n train_model=training_model(X_train, Y_train)\n Predicted_appx, Predicted_actual=testing_model(train_model, X_test,Y_test)\n Visualization(Y_test,Predicted_actual,Predicted_appx)\n \nif __name__ == \"__main__\":\n main(sys.arv[1],sys.argv[2],sys.argv[3])\n \n","sub_path":"Masters_Project/ML_basic_script.py","file_name":"ML_basic_script.py","file_ext":"py","file_size_in_byte":7942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"388323058","text":"from pathlib import Path\n\nfrom cg.cli.demultiplex.add import add_flow_cell_cmd\nfrom cg.models.cg_config import CGConfig\nfrom cg.models.demultiplex.flow_cell import FlowCellDirectoryData\nfrom click.testing import CliRunner\n\n\ndef test_add_flowcell_cmd(\n cli_runner: CliRunner,\n bcl2fastq_flow_cell: FlowCellDirectoryData,\n demultiplex_context: CGConfig,\n tmp_demultiplexed_runs_directory: Path,\n tmp_flow_cell_directory_bcl2fastq: Path,\n):\n # GIVEN a cgstats api and a demultiplex api\n # GIVEN that there is a flow cell in the directory\n assert tmp_flow_cell_directory_bcl2fastq.exists()\n # GIVEN that there is a demultiplexed flow cell\n assert tmp_demultiplexed_runs_directory.exists()\n\n # GIVEN that the flowcell does not exist in the cgstats database\n assert not demultiplex_context.cg_stats_api.find_handler.get_flow_cell_by_name(\n flow_cell_name=bcl2fastq_flow_cell.id\n )\n\n # WHEN running the add flowcell command\n result = cli_runner.invoke(\n add_flow_cell_cmd,\n [bcl2fastq_flow_cell.full_name],\n obj=demultiplex_context,\n )\n\n # THEN assert that the run was success\n assert result.exit_code == 0\n # THEN assert that the flowcell was added to cgstats\n assert demultiplex_context.cg_stats_api.find_handler.get_flow_cell_by_name(\n flow_cell_name=bcl2fastq_flow_cell.id\n )\n","sub_path":"tests/cli/demultiplex/test_add_flowcell.py","file_name":"test_add_flowcell.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"355659942","text":"#这是一个赌博小游戏,规则如下\r\n#开局你会注册一个账号,账号会有你的名字和密码(密码只支持数字),该游戏支持保存姓名,不保存密码\r\n#游戏开始你会有5000个金币,你可以设置游戏��难度,有低级和普通和高级难度\r\n#低级难度一次消耗100金币,数字范围在0-100之间,猜数次数只有8次,猜中会奖励300金币\r\n#普通难度一次消耗500金币,数字范围在0-1000之间,猜数次数只有15次,猜中会奖励3000金币、\r\n#高级难度一次消耗2000金币,数字范围在0-10之间,猜数次数只有3次,猜中奖励10000金币\r\n#如果在低级难度连续8次没猜中,将从低级难度退出,并且无法再进入低级难度游戏\r\n#如果在普通难度连续15次没猜中,将从普通难度退出,并且无法再进入普通难度游戏\r\n#如果在困难难度连续3次没猜中,将从困难难度退出,并且无法再进入困难难度游戏\r\n#如果三个游戏都无法进入,或者金币小于0,将会从游戏中退出,并显示你最后的金币数\r\n#如果你在非以上情况下,退出了该游戏,会显示你最后的金币数,15000金币以上走的人会被称为赌圣\r\n\r\nimport random\r\nprint(\"\\t\\t\\t\\t\\t欢迎进入澳门首家线上赌场\\t\\t\\t\\t\\t\")\r\nname=input(\"请输入您的姓名:\")\r\npassword=int(input(\"请输入您的新密码(只能输入纯数字):\"))\r\n\r\npassword1 = int(input(\"请再输入您的新密码(只能输入纯数字);\"))\r\n\r\nif password == password1:\r\n log_in = 1\r\nelse:\r\n log_in = 0\r\n print(\"你密码都能输错,故意的吧?别来玩了\")\r\n\r\ngold = 5000\r\neasy = 0\r\nnormal = 0\r\ndifficult = 0\r\nlog_ine = 1\r\nlog_inn = 1\r\nlog_ind = 1\r\n#次数\r\nlog_ins = [easy,normal,difficult]\r\n#猜数范围\r\na1 = random.randint(0,100)\r\na2 = random.randint(0,1000)\r\na3 = random.randint(0,10)\r\nanswer = [a1,a2,a3]\r\n#消耗金币数\r\nexpend = [100,500,2000]\r\n#最多连续次数\r\ntime = [8,15,3]\r\n#奖励\r\naward = [300,3000,10000]\r\n#游玩次数\r\nplay = 0\r\n\r\nprint(\"简单难度猜0-100的数字,一次消耗100金币,猜中奖励300金币\")\r\nprint(\"普通难度猜0-1000的数字。一次消耗500金币,猜中奖励3000金币\")\r\nprint(\"困难难度猜0-10的数字,一次消耗2000金币,猜中奖励10000金币\")\r\n\r\n\r\nwhile log_in>0:\r\n\r\n if log_ins[0] >= 8:\r\n log_ine = 0\r\n print(\"已不能选择简单难度\")\r\n\r\n if log_ins[1] >= 13:\r\n log_inn = 0\r\n print(\"已不能选择普通难度\")\r\n if log_ins[2] >= 3:\r\n log_ind = 0\r\n print(\"已不能选择困哪难度\")\r\n\r\n if easy>=8 and normal>=13 and difficult>=3:\r\n print(\"所有难度皆不能选择,请退出游戏\")\r\n log_in = 0\r\n elif gold <=0:\r\n log_in = 0\r\n print(\"金币已不足,将退出游戏\")\r\n break\r\n else:\r\n log_in = 1\r\n\r\n\r\n\r\n\r\n\r\n while log_in > 0:\r\n print(('你还有', gold, \"个金币,普通还有\", time[0] - log_ins[0], \"次机会,普通还有\", time[1] - log_ins[1],\r\n \"次机会,困难还有\", time[0] - log_ins[0], \"次机会\"))\r\n a1 = random.randint(0, 100)\r\n a2 = random.randint(0, 1000)\r\n a3 = random.randint(0, 10)\r\n answer = [a1, a2, a3]\r\n print(\"请输入您想要选择的游戏难度\")\r\n print(\"输入1是简单,输入2是普通,输入3是困难,输入4是退出该游戏\")\r\n option=int(input(\"请输入:\"))\r\n if option==1 and log_ins[0]==time[0]:\r\n print(\"你已无法参加简单难度的游戏\")\r\n\r\n elif option == 2 and log_ins[1] == time[1]:\r\n print(\"你已无法参加普通难度的游戏\")\r\n\r\n elif option == 3 and log_ins[2] ==time[2]:\r\n print(\"你已经无法参加苦难难度的游戏\")\r\n elif option == 4:\r\n print(\"你将退出游戏\")\r\n log_in = 0\r\n break\r\n elif gold<=0:\r\n print(\"金币已不足,你甚至还欠我钱,速速出去,莫妨碍我做生意\")\r\n break\r\n while gold>0 and log_ins[option-1]<=time[option-1]:\r\n e = int(input(\"请输入要猜的数字\"))\r\n if answer[option-1]== e:\r\n print(\"恭喜您猜中了,获得了\",award[option-1],\"个金币\")\r\n print(\"您将回到难度选择区域\")\r\n log_ins[option-1]=0\r\n gold = gold +award[option-1]\r\n print(\"你现在有\",gold,\"个金币\")\r\n play = play +1\r\n break\r\n elif answer[option-1]>e:\r\n print(\"猜错了,实际的数字更大\")\r\n elif answer[option-1]<e:\r\n print(\"猜错了,实际的数字更小\")\r\n gold = gold - expend[option - 1]\r\n log_ins[option - 1] = log_ins[option - 1] + 1\r\n play=play+1\r\n print(\"你现在有\",gold,\"个金币\")\r\n print(\"还有\",time[option-1]-log_ins[option-1],\"次机会\")\r\n print(\"是否要继续,不继续请输入1,继续输入其他数字\")\r\n z = int(input())\r\n if z == 1:\r\n print(\"将回到选择难度界面\")\r\n break\r\nif gold<=0:\r\n print(\"你总共进行了\",play,\"次游戏\",name,\"小赌怡情,大赌伤身啊\")\r\nelif gold<15000:\r\n print(\"你总共进行了\",play,\"次游戏\",name,\"苦海无涯,回头是岸啊\")\r\nelse:\r\n print(\"你总共进行了\",play,\"次游戏\",name,\"赌圣大佬慢走,欢迎下次再来\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"bingo.py","file_name":"bingo.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"579286882","text":"import datetime\nimport os\nimport docker\nimport dockerpty\nimport shutil\nfrom compose.const import LABEL_SERVICE\nfrom compose.container import Container\nfrom configobj import Section\nfrom docker import DockerClient\nfrom machine import machine\nfrom python_hosts import Hosts, HostsEntry\nfrom cli.app import commons\nfrom cement.ext.ext_argparse import expose\nfrom compose.cli.command import project_from_options\nfrom compose.cli.main import TopLevelCommand\nfrom cli.app.commons import DTBaseController, CONFIG_COMMANDS\n\n\nclass DockerCommand(object):\n def __init__(self, config, compose_files, docker_root=None, docker_options=None, name=None, machine_name=None,\n network_name=None, host_name=None, volumes=None, logger=None):\n super(DockerCommand, self).__init__()\n self.config = config\n self.name = name\n self.compose_files = compose_files\n self.docker_root = docker_root\n self.machine_name = machine_name\n self.network_name = network_name\n self.host_name = host_name\n self.volumes = volumes\n self.logger = logger\n self.docker_client = None # type: DockerClient\n self.machine = None\n\n # start preparing\n self.prepare_docker_client()\n self.compose_cmd, self.docker_options = self.prepare(compose_files=compose_files, docker_options=docker_options)\n\n def prepare_docker_client(self):\n # prepare machine when requested\n if self.machine_name:\n self.prepare_machine(name=self.machine_name)\n # set hosts\n self.prepare_hosts()\n # create docker client based on the current env\n self.docker_client = docker.from_env()\n # prepare network when requested\n if self.network_name:\n self.prepare_network(name=self.network_name)\n if self.volumes:\n self.prepare_volumes()\n\n def prepare(self, compose_files, docker_options=None) -> (TopLevelCommand, dict):\n # based on:\n # https://github.com/docker/compose/issues/3573\n # https://github.com/docker/compose/pull/4992\n os.environ['COMPOSE_IGNORE_ORPHANS'] = 'true'\n project_dir = commons.get_dt_path(self.docker_root)\n\n # based on: https://github.com/fruch/doorman/blob/master/tests/integration/conftest.py\n # set the options\n options = {**{\n 'SERVICE': '',\n '--project-name': self.name,\n # important to set this to the current user dir\n '--project-directory': project_dir,\n '--file': compose_files,\n '--no-deps': False,\n '--abort-on-container-exit': False,\n '--remove-orphans': False,\n '--no-recreate': False,\n '--force-recreate': False,\n '--build': False,\n '--no-build': False,\n '--rmi': 'none',\n '--volumes': '',\n '--follow': False,\n '--timestamps': False,\n '--tail': 'all',\n '--scale': '',\n '--no-color': False,\n '-d': True,\n '--always-recreate-deps': True\n }, **(docker_options if docker_options else {})}\n # compose the project\n project = project_from_options(project_dir, options)\n # prepare the commands\n cmd = TopLevelCommand(project)\n # return the options\n return cmd, options\n\n def prepare_machine(self, name):\n self.logger.debug('Prepare machine: {name}'.format(name=name))\n m = machine.Machine()\n self.machine = m\n try:\n machine_status = m.status(machine=name)\n if not machine_status:\n m.start(machine=name)\n except RuntimeError as ex:\n if 'is already running' in str(ex):\n # machine is running, all good\n pass\n if 'Host does not exist' in str(ex):\n # machine is not exist, possibly not created\n self.logger.debug('Create machine: {name}'.format(name=name))\n try:\n # machine not exist, try to create\n m.create(name=name)\n # restart again\n m.start(machine=name)\n # set the env\n self.logger.debug('Created machine: {name}'.format(name=name))\n except RuntimeError:\n pass\n # check whether certs is ok\n try:\n m.status(machine=name)\n except RuntimeError as ex:\n if 'Error checking TLS' in str(ex):\n # possibly need to regenerate certs\n self.logger.debug('Regenerate cert for machine: {name}'.format(name=name))\n m.regenerate_certs(machine=name)\n # set env\n self.logger.debug('Set environment machine: {name}'.format(name=name))\n # get first 8 items start from index 1 [1:8], and get item every %2 [::2]\n envs = m.env(machine=name)[1:8][::2]\n envs = envs\n for env in envs:\n env = env.replace('\"', '')\n key, val = env.split('=')\n os.environ[key] = val\n self.logger.debug('Set environment: {env}'.format(env=env))\n\n def find_network(self, name):\n networks = []\n for item in self.docker_client.networks.list():\n item_dict = dict(item.attrs.items())\n item_name = item_dict.get('Name')\n if item_name == name:\n networks.append(item)\n return networks\n\n def prepare_network(self, name):\n self.logger.debug('Prepare network: {name}'.format(name=name))\n networks = self.find_network(name=name)\n # when there is no such network name, we create new one\n if len(networks) == 0:\n self.logger.debug('Create network: {name}'.format(name=name))\n self.docker_client.networks.create(name)\n\n def prepare_hosts(self):\n host = self.host_name\n if host:\n if self.machine_name:\n ip = self.machine.ip(machine=self.machine_name)\n else:\n ip = '127.0.0.1'\n self.logger.debug('Prepare hosts: {name} with {ip}'.format(name=host, ip=ip))\n hosts = Hosts()\n for entry in hosts.entries:\n if entry.address == ip:\n if host not in entry.names:\n entry.names.append(host)\n entry.names = list(set(entry.names))\n if not hosts.exists(names=[host]):\n entry = HostsEntry(entry_type='ipv4', address=ip, names=[host])\n hosts.add(entries=[entry])\n\n try:\n # make backup\n hosts_path = Hosts.determine_hosts_path()\n hosts_backup_path = hosts_path + '.' + datetime.datetime.today().strftime('%Y%m%d')\n shutil.copy(hosts_path, hosts_backup_path)\n except BaseException:\n pass\n\n try:\n hosts.write()\n except BaseException:\n self.logger.debug('Unable to write host file, ignored.')\n\n def prepare_volumes(self):\n volumes = self.volumes\n if volumes:\n self.logger.debug('Prepare volumes')\n for name, path in volumes.items():\n self.docker_client.volumes.create(name=name, **{\n 'driver_opts': {\n 'type': 'none',\n 'o': 'bind',\n 'device': commons.resolve_path(path)\n }\n })\n\n def find_container(self, name) -> Container:\n containers = self.compose_cmd.project.containers(service_names=[name])\n container = next(iter(containers), None)\n return container\n\n def build(self, docker_options=None):\n docker_options = {**self.docker_options, **(docker_options if docker_options else {})}\n # run it\n self.logger.debug('Build services')\n self.compose_cmd.build(docker_options)\n\n def start(self, docker_options=None):\n docker_options = {**self.docker_options, **(docker_options if docker_options else {})}\n # run it\n self.logger.debug('Build services')\n self.compose_cmd.build(docker_options)\n self.logger.debug('Start services')\n self.compose_cmd.up(docker_options)\n\n def run(self, container_name, command, args, run_options=None):\n try:\n container = self.find_container(name=container_name)\n run_options = {**{\n '--entrypoint': None,\n '--name': None,\n '--no-deps': False,\n '--publish': [],\n '--rm': True,\n # bind the port if not running\n '--service-ports': (container and not container.is_running) or (not container),\n '--user': None,\n '--volume': [],\n '--workdir': None,\n # this is reversed, if True means disable TTY\n '-T': False,\n '-d': False,\n '-e': [],\n 'ARGS': args,\n 'COMMAND': command,\n 'SERVICE': container_name\n }, **(run_options if run_options else {})}\n self.compose_cmd.run(run_options)\n except:\n pass\n\n def execute(self, container_name, command, args, exec_options=None):\n exec_options = {**{\n '--index': 1,\n '-d': False,\n '-T': False,\n '--privileged': False,\n '--user': None,\n 'ARGS': args,\n 'COMMAND': command,\n 'SERVICE': container_name\n }, **(exec_options if exec_options else {})}\n return self.compose_cmd.exec_command(exec_options)\n\n def ssh(self, container_name):\n # get command and options\n container = self.find_container(name=container_name)\n if container:\n # there is such active container\n self.logger.debug('Run /bin/bash into container.')\n # create new exec\n exec_id = container.create_exec('/bin/bash', stdin=True, stdout=True, tty=True)\n # check PTY for more information, this is actually passing TTY around, and dockerpty helps much\n dockerpty.start_exec(container.client, exec_id)\n else:\n self.logger.debug('There is no active container.')\n\n def stop(self, compose_files, docker_options=None):\n # get command and options\n cmd, options = self.prepare(compose_files=compose_files, docker_options=docker_options)\n self.logger.debug('Stop services')\n for container in cmd.project.containers():\n if container.is_paused:\n self.docker_client.api.unpause(container.id)\n self.docker_client.api.stop(container.id, docker_options.get('--timeout'))\n\n def remove(self, compose_files, docker_options=None):\n # get command and options\n cmd, options = self.prepare(compose_files=compose_files, docker_options=docker_options)\n self.logger.debug('Remove services')\n cmd.down(options)\n self.logger.debug('Remove networks')\n self.docker_client.networks.prune()\n self.logger.debug('Remove volumes')\n self.docker_client.volumes.prune()\n\n\nclass DockerController(DTBaseController):\n class Meta:\n label = 'docker'\n stacked_on = 'base'\n stacked_type = 'nested'\n description = 'All Docker related commands'\n\n def __init__(self):\n super(DockerController, self).__init__()\n self.docker_cmds = {}\n self.__docker_client = None\n\n def get_apps(self) -> dict:\n # check DockerCommand is not initialised for the app name\n apps = self.config_get(commons.CONFIG_APPS) # type: Section\n return apps.dict()\n\n def create_docker_cmd(self, name, docker_options=None) -> DockerCommand:\n # check DockerCommand is not initialised for the app name\n apps = self.get_apps()\n app = apps[name]\n app['name'] = name\n # get docker-compose files\n compose_files = app.get(commons.CONFIG_COMPOSE_FILES)\n compose_files = list(\n map(lambda x: commons.resolve_path(x, env=self.config_get(commons.CONFIG_ENV)),\n compose_files))\n name = self.config_get(commons.CONFIG_NAME)\n docker_root = self.config_get(commons.CONFIG_DOCKER_ROOT)\n machine_name = self.config_get(commons.CONFIG_MACHINE_NAME)\n network_name = self.config_get(commons.CONFIG_NETWORK_NAME)\n host_name = self.config_get(commons.CONFIG_HOST_NAME)\n volumes = self.config_get(commons.CONFIG_VOLUMES)\n # create new DockerCommand\n cmd = DockerCommand(config=app, name=name, docker_root=docker_root, compose_files=compose_files,\n machine_name=machine_name, network_name=network_name, host_name=host_name, volumes=volumes,\n docker_options=docker_options, logger=self.app.log)\n return cmd\n\n def get_docker_cmd(self, name, docker_options=None) -> DockerCommand:\n if not self.docker_cmds.get(name, None):\n self.docker_cmds[name] = self.create_docker_cmd(name=name, docker_options=docker_options)\n docker_cmd = self.docker_cmds.get(name, None)\n return docker_cmd\n\n def get_command(self, command_name, app_name=None, force=False):\n configs = self.config_get(CONFIG_COMMANDS)\n config = configs.get(command_name)\n app_name = app_name if app_name else config.get('app_name')\n service_name = config.get('service_name')\n # get docker command\n if not force:\n docker_cmd = self.get_docker_cmd(name=app_name)\n else:\n docker_cmd = self.create_docker_cmd(name=app_name)\n return service_name, docker_cmd\n\n @expose(hide=True)\n def default(self):\n self.app.args.print_help()\n\n @expose(\n help='Build application using Docker',\n arguments=[\n (['name'], dict(action='store', nargs='*', help='Docker name to be build'))\n ],\n )\n def build(self):\n docker_options = {}\n if not self.app.pargs.name:\n name = self.get_apps().keys()\n else:\n name = self.app.pargs.name\n for n in name:\n docker_cmd = self.get_docker_cmd(name=n, docker_options={\n '--timeout': self.config_get(commons.CONFIG_STOP_TIMEOUT)\n })\n docker_cmd.build(docker_options=docker_options)\n\n @expose(\n help='Start application using Docker',\n arguments=[\n (['name'], dict(action='store', nargs='*', help='Docker name to be build')),\n (['-fg', '--foreground'], dict(action='store_true', help='Run Docker build in foreground process')),\n ],\n )\n def start(self):\n docker_options = {\n '-d': not self.app.pargs.foreground\n }\n if not self.app.pargs.name:\n name = self.get_apps().keys()\n else:\n name = self.app.pargs.name\n for n in name:\n docker_cmd = self.get_docker_cmd(name=n, docker_options={\n '--timeout': self.config_get(commons.CONFIG_STOP_TIMEOUT)\n })\n docker_cmd.start(docker_options=docker_options)\n\n @expose(\n help='Update host file',\n arguments=[\n (['name'], dict(action='store', help='Service name')),\n ]\n )\n def update_host(self):\n self.get_docker_cmd(name=self.app.pargs.name)\n\n @expose(\n help='Run container',\n arguments=[\n (['name'], dict(action='store', help='Service name')),\n (['container'], dict(action='store', help='Container name')),\n (['command'], dict(action='store', help='Command')),\n (['args'], dict(action='store', nargs='*')),\n ],\n )\n def run(self):\n docker_cmd = self.get_docker_cmd(name=self.app.pargs.name)\n docker_cmd.run(\n container_name=self.app.pargs.container,\n command=self.app.pargs.command,\n args=self.app.pargs.args + self.app.args.unknown_args)\n\n @expose(\n help='Execute container',\n arguments=[\n (['name'], dict(action='store', help='Service name')),\n (['container'], dict(action='store', help='Container name')),\n (['command'], dict(action='store', help='Command')),\n (['args'], dict(action='store', nargs='*')),\n ],\n )\n def execute(self):\n docker_cmd = self.get_docker_cmd(name=self.app.pargs.name)\n docker_cmd.execute(\n container_name=self.app.pargs.container,\n command=self.app.pargs.command,\n args=self.app.pargs.args + self.app.args.unknown_args\n )\n\n @expose(\n help='SSH container',\n arguments=[\n (['name'], dict(action='store', help='Service name')),\n (['container'], dict(action='store', help='Container name')),\n ],\n )\n def ssh(self):\n docker_cmd = self.get_docker_cmd(name=self.app.pargs.name)\n docker_cmd.ssh(container_name=self.app.pargs.container)\n\n @expose(\n help='Logs container',\n arguments=[\n (['name'], dict(action='store', help='Service name')),\n (['container'], dict(action='store', help='Container name')),\n ],\n )\n def logs(self):\n docker_cmd = self.get_docker_cmd(name=self.app.pargs.name)\n docker_cmd.compose_cmd.logs({\n 'SERVICE': [self.app.pargs.container],\n '--tail': 'all',\n '--follow': True,\n '--timestamps': True,\n '--no-color': False\n })\n\n @expose(\n help='Stop application using Docker',\n arguments=[\n (['name'], dict(action='store', nargs='*', help='Docker name to be build')),\n (['-rm', '--remove'], dict(action='store_true', help='Remove the application')),\n ],\n )\n def stop(self):\n if not self.app.pargs.name:\n name = self.get_apps().keys()\n else:\n name = self.app.pargs.name\n apps = self.get_apps()\n for n in name:\n app = apps[n]\n docker_cmd = self.get_docker_cmd(name=n, docker_options={\n '--timeout': self.config_get(commons.CONFIG_STOP_TIMEOUT)\n })\n self.app.log.info('Stop [{name}] services'.format(name=n))\n for container in docker_cmd.compose_cmd.project.containers():\n if container.is_paused:\n docker_cmd.docker_client.api.unpause(container.id)\n # find stop script\n scripts = app.get(commons.CONFIG_SCRIPTS) # type: dict\n if scripts:\n label = container.labels.get(LABEL_SERVICE)\n stop = scripts.get(label, {}).get('stop')\n if stop:\n self.app.log.info('Execute stop script [{name}] [{stop}]'.format(name=label, stop=stop))\n docker_cmd.execute(label, 'bash', ['-c', stop], exec_options={\n '-T': True,\n '-d': True\n })\n docker_cmd.docker_client.api.stop(container.id)\n\n @expose(\n help='Remove service',\n arguments=[\n (['name'], dict(action='store', nargs='*', help='Docker name to be build')),\n ],\n )\n def remove(self):\n if not self.app.pargs.name:\n name = self.get_apps()\n else:\n name = self.app.pargs.name\n for n in name:\n docker_cmd = self.get_docker_cmd(name=n)\n docker_options = {**docker_cmd.docker_options, **{}}\n self.app.log.info('Remove services')\n docker_cmd.compose_cmd.down(docker_options)\n self.app.log.info('Remove networks')\n docker_cmd.docker_client.networks.prune()\n self.app.log.info('Remove volumes')\n docker_cmd.docker_client.volumes.prune()\n\n\ncommons.add_command('docker', command=DockerCommand, controller=DockerController)\n","sub_path":"cli/app/commands/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":20168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"443010733","text":"\"\"\"\n Himanshu Gwalani\n 2017ucp1356\n\"\"\"\nfrom core import utils,Settings,Cli,db,browser\nfrom core.color import *\nfrom core.module_utils import *\nimport importlib,traceback,os\nglobal global_options, module_keywords, cli_keywords\nmodule_help = end+G+end\n\nglobal_options = {}\nmodules = db.get_modules()\nmodule_keywords = [\"options\",\"set\",\"run\",\"back\",\"close\"]\ndef Exec(all_keywords):\n\tglobal global_options, module_keywords, cli_keywords\n\tmodule_keywords += all_keywords\n\tcli_keywords = all_keywords\n\tmod = importlib.import_module(utils.pythonize(\"core.modules.\"+Settings.running_module))\n\tglobal_options = getattr(mod, 'execution').module_type.options\n\tif os.name !=\"nt\":\n\t\tutils.Input_completer(module_keywords+modules)\n\tSettings.add_module(Settings.running_module)\n\ndef handle(c):\n\tif c==\"\" or c[0]==\"#\":return\n\tc = c.strip()\n\thead = c.lower().split(\" \")[0]\n\targs = \" \".join(c.split(\" \")[1:])\n\ttry:\n\t\t# Yeah, we don't have switch case in python...\n\t\tif head == 'exit':\n\t\t\tif Settings.headless_browser:\n\t\t\t\tSettings.headless_browser.close_all()\n\t\t\t\tSettings.headless_browser = None\n\t\t\texit(0)\n\n\t\t\n\t\tif head in [\"database\",\"debug\",\"dev\",\"verbose\",\"reload\",\"refresh\",\"list\",\"show\",\"resource\",\"os\",\"use\",\"exec\",\n\t\t\t\t\t\t\"search\",\"info\",\"previous\",\"sessions\",\"jobs\",\"eval\",\"report\"]:\n\t\t\texec(\"Cli.command_{}(args)\".format(head))\n\t\t\tSettings.update_history(c)\n\t\telse:\n\t\t\thandler = globals()[\"command_{}\".format(head)]\n\t\t\thandler(args)\n\t\t\tSettings.update_history(c)\n\n\texcept Exception as e:\n\t\terror( head + \" is not recognized as an internal command !\")\n\t\t#To check for the wanted command on typos\n\t\twanted = utils.grab_wanted(head,module_keywords)\n\t\tif len(wanted)>0:\n\t\t\tstatus( \"Maybe you meant : \" + wanted )\n\t\tstatus( \"Type help or ? to learn more..\")\n\ndef command_options(text=False):\n\t\n\toptions = global_options\n\theaders = [B+Bold+\"Name\",\"Current value\",\"Required\",\"Description\"+end]\n\tnames = list( options.keys() )\n\tvalues = utils.my_map(lambda x:str(options[x][2]),names)\n\trequired = utils.my_map(lambda x:[\"No\",\"Yes\"][options[x][0]],names)\n\tdescription = utils.my_map(lambda x:options[x][1],names)\n\tcols = []\n\tfor row in range(len(names)):\n\t\tcols.append([ names[row], values[row], required[row], description[row] ])\n\tutils.create_table(headers,cols)\n\n\t\n\t\n\ndef is_option(option):\n\ttry:\n\t\tblah = global_options[option.lower()][2]\n\t\treturn [blah]\n\texcept:\n\t\treturn False\n\ndef change_value(option,new_value):\n\tglobal_options[option.lower()][2] = new_value\n\ndef command_set(opt=False):\n\tif not opt:\n\t\terror(\"You must type an option first !\")\n\telif len( opt.split(\" \") ) < 2 and not \"=\" in opt:\n\t\terror(\"You must type a new value to the option !\")\n\telse:\n\t\tsplit_char = \" \"\n\t\tif \"=\" in opt:split_char = \"=\"\n\t\tsplitted = opt.split(split_char)\n\t\tx = is_option(splitted[0].lower())\n\t\tif type(x) is list:\n\t\t\tif type(x[0]) is bool:\n\t\t\t\tchange_value(splitted[0],x[0]==False)\n\t\t\t\tstatus( splitted[0] + \" => \" + str(x[0]==False) )\n\t\t\telse:\n\t\t\t\tchange_value( splitted[0], \" \".join(splitted[1:]) )\n\t\t\t\tstatus( splitted[0] + \" => \" + \" \".join(splitted[1:]) )\n\t\telse:\n\t\t\terror(\"Invalid option!\")\n\ndef command_run(text=False):\n\t# Options format : {\"name\":[ (0,1,2),description,value]}\n\t# Required --> 1 # Means that it must have value\n\t# Not required --> 0 # Means that it could have value or not\n\tfor key in global_options.keys():\n\t\tif global_options[key][0]==1 and not global_options[key][2].strip(): # A required option but has empty value\n\t\t\terror(\"Error! the following option have not been set (\"+ key + \")\" )\n\t\t\treturn\n\tmodule = importlib.import_module(utils.pythonize(\"core.modules.\"+Settings.running_module))\n\texec_info = getattr(module, \"execution\")\n\tif not Settings.headless_browser:\n\t\tSettings.headless_browser = browser.headless_browsers()\n\t\tcurrent_browser = {\"Status\":\"LOL\"}\n\t\tSettings.headless_browser.new_session(exec_info.name, exec_info.url, global_options[\"useragent\"][2])\n\telse:\n\t\tcurrent_browser = Settings.headless_browser.new_session(exec_info.name, exec_info.url, global_options[\"useragent\"][2])\n\n\tif current_browser[\"Status\"]==\"Duplicate\":\n\t\terror(\"Module already running!\")\n\telif current_browser[\"Status\"]==\"Failed\":\n\t\terror(\"Couldn't open Firefox! Check the installation instructions again!\")\n\telif current_browser[\"Status\"]==\"Invalid useragent\":\n\t\terror(\"Can't use this useragent! See the possible useragent values in the wiki!\")\n\telse:\n\t\t# RUN https://youtu.be/PTZ4L6cNNC4\n\t\t#current_browser = current_browser[\"Controller\"]\n\t\tif exec_info.module_type == types.grabber:\n\t\t\tSettings.headless_browser.website_qr(exec_info.name, exec_info.image_xpath) # Keeps QR image always updated and it runs in a thread too\n\t\t\tSettings.headless_browser.create_listener(exec_info.name, exec_info.change_identifier, exec_info.session_type)\n\t\t\tif exec_info.img_reload_button:\n\t\t\t\tSettings.headless_browser.check_img(exec_info.name, exec_info.img_reload_button) # This line will run in a thread too\n\t\t\tSettings.headless_browser.serve_module(exec_info.name, global_options[\"host\"][2], int(global_options[\"port\"][2]))\n\n\t\t\n\t\t\t\n\ndef command_close(text=False): # Another hidden command to use in debugging :D\n\tif Settings.headless_browser:\n\t\tSettings.headless_browser.close_all()\n\t\tSettings.headless_browser = None\n\ndef command_back(text=False):\n\tSettings.update_previous()\n\tSettings.running_module = False\n\tSettings.reset_name()\n\tif os.name!=\"nt\":\n\t\tutils.Input_completer(cli_keywords+modules )\n","sub_path":"him_bot/core/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"88621008","text":"import numpy as np\nimport json\nimport matplotlib.pyplot as plt\n\n# Load the inverted index with the features and the freqs:\nwith open('inv_index.json', 'r') as fp:\n inv_ix = json.load(fp)\n\nsorted_tags = np.sort(inv_ix.keys())\n\ndclass = np.array([int(l.strip().split(\" \")[1]) for l in open('classes')])\ndnames = np.array([l.strip().split(\" \")[0] for l in open('classes')])\n\n\ndoc_counts = json.load(open('count_index.json'))\n\nnegative_counts = {}\npositive_counts = {}\n\npositive_docs = np.sort(dnames[np.where(dclass == 1)])\nnegative_docs = np.sort(dnames[np.where(dclass == -1)])\n\n\nfor doc in positive_docs:\n total = float(doc_counts[doc]['total'])\n for tag in doc_counts[doc]:\n if tag != 'total':\n if not tag in positive_counts:\n positive_counts[tag] = []\n\n positive_counts[tag].append(doc_counts[doc][tag] / total)\n\n\nfor doc in negative_docs:\n total = float(doc_counts[doc]['total'])\n for tag in doc_counts[doc]:\n if tag != 'total':\n if not tag in negative_counts:\n negative_counts[tag] = []\n\n negative_counts[tag].append(doc_counts[doc][tag] / total)\n\n\npos_count_dist = {'positive':positive_counts, 'negative':negative_counts}\n\nwith open('pos_distribution.json', 'w') as fp:\n json.dump(pos_count_dist, fp)\n\nimport matplotlib.pyplot as plt\n\npos_names = dict(zip([u'A', u'C', u'D', u'F', u'I', u'N', u'P', u'R', u'S', u'V', u'W', u'Z'], [u'adjective', u'conjunction', u'determiner', u'punctuation', u'interjection', u'noun', u'pronoun', u'adverb', u'adposition', u'verb', u'date', u'number']))\n\n\n# removing punctuation\nif 'F' in positive_counts:\n del positive_counts['F']\n\nif 'F' in negative_counts:\n del negative_counts['F']\n\n\nsorted_pos_tags = np.sort(positive_counts.keys())\nsorted_neg_tags = np.sort(negative_counts.keys())\n\ndata_pos = []\nfor tag in sorted_pos_tags:\n data_pos.append(positive_counts[tag])\n\ndata_neg = []\nfor tag in sorted_neg_tags:\n data_neg.append(negative_counts[tag])\n\n\n\n\n\n\n\nf, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n\nax1.boxplot(data_pos)\nax1.set_title('Non-Schizo')\nax1.set_xticklabels([pos_names[x] for x in sorted_pos_tags], rotation='vertical')\nax1.grid()\n\nax2.boxplot(data_neg)\nax2.set_title('Schizo')\nax2.set_xticklabels([pos_names[x] for x in sorted_neg_tags], rotation='vertical')\nax2.grid()\nplt.suptitle('Portion of tag occurrences in each document')\n\n\n\n#ax.legend((rects1[0], rects2[0]), ('Positive', 'Negative'))\n#ax.set_xticklabels(new_tags)\n#ax.set_xticklabels([pos_names[x] for x in new_tags], rotation='vertical')\n#ax.set_xticks(ind + width / 2)\n#fig.subplots_adjust(bottom=0.18)\nplt.show()\n#plt.savefig(\"post_count.png\")\n\n\n'''\ntag_positive_counts = []\ntag_negative_counts = []\n\npositive_docs = set(dnames[np.where(dclass == 1)])\nnegative_docs = set(dnames[np.where(dclass == -1)])\n\n##########\n#### OPT 1: Grouping by POS\n##########\nnew_tags = []\nfor t in sorted_tags:\n if len(new_tags) == 0:\n new_tags.append(t[0])\n continue\n if new_tags[-1] != t[0]:\n new_tags.append(t[0])\n\n\nindex = 0\nfor tag in sorted_tags:\n if tag[0] == new_tags[index]:\n if len(tag_positive_counts) == 0:\n tag_positive_counts.append(0)\n tag_negative_counts.append(0)\n tag_positive_counts[index] += len(set(inv_ix[tag].keys()) & positive_docs)\n tag_negative_counts[index] += len(set(inv_ix[tag].keys()) & negative_docs)\n else:\n index += 1\n tag_positive_counts.append(len(set(inv_ix[tag].keys()) & positive_docs) )\n tag_negative_counts.append(len(set(inv_ix[tag].keys()) & negative_docs) )\n\n\n\n\n\npos_names = dict(zip([u'A', u'C', u'D', u'F', u'I', u'N', u'P', u'R', u'S', u'V', u'W', u'Z'], [u'adjective', u'conjunction', u'determiner', u'punctuation', u'interjection', u'noun', u'pronoun', u'adverb', u'adposition', u'verb', u'date', u'number']))\n\ntag_positive_counts = 100 * np.array(tag_positive_counts) / max(tag_positive_counts)\ntag_negative_counts = 100 * np.array(tag_negative_counts) / max(tag_negative_counts)\n\nwidth = 0.35\n#ind = np.arange(len(sorted_tags))\nind = np.arange(len(new_tags))\n\n\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(ind, tag_positive_counts, width, color='r')\nrects2 = ax.bar(ind + width, tag_negative_counts, width, color='y')\nax.set_title('POS tag doc-count per class (No-Schizo vs Schizo')\nax.legend((rects1[0], rects2[0]), ('Positive', 'Negative'))\n#ax.set_xticklabels(new_tags)\nax.set_xticklabels([pos_names[x] for x in new_tags], rotation='vertical')\nax.set_xticks(ind + width / 2)\nfig.subplots_adjust(bottom=0.18)\n#plt.show()\nplt.savefig(\"post_count.png\")\n\n'''","sub_path":"textos esquizofrenia/src/plot_featuredistribution.py","file_name":"plot_featuredistribution.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"521884255","text":"from lib import *\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pylab as plt\r\nimport mysql.connector\r\nimport json\r\nfrom time import time \r\nfrom sklearn.metrics.pairwise import rbf_kernel\r\n\r\ndef connectDB():\r\n conn = mysql.connector.connect(\r\n host = host,\r\n user = user,\r\n passwd = passwd,\r\n database = database\r\n )\r\n cursor = conn.cursor() \r\n # show tables\r\n cursor.execute('SHOW TABLES')\r\n for x in cursor:\r\n print(x)\r\n return conn, cursor\r\n\r\ndef createTable(cursor, tablename, colname, col_dtype):\r\n cols = ['`'+str(i)+'`' for i in colname]\r\n col_string = ','.join([x+' '+y for x, y in zip(cols, col_dtype)])\r\n execute_string = \"CREATE TABLE IF NOT EXISTS \"+ tablename +\" (\"+col_string+\")\"\r\n cursor.execute(execute_string)\r\n # show table schema\r\n cursor.execute('SHOW COLUMNS FROM '+tablename)\r\n for x in cursor:\r\n print(x)\r\n\r\ndef exec_sql(sql):\r\n try:\r\n cursor.execute(sql)\r\n return cursor\r\n except:\r\n print (\"Exec Error!!\", sql)\r\n\r\ndef json_to_array(raw, cols1):\r\n #print('json_to_array()')\r\n #print('raw:', raw)\r\n raw = json.loads(raw)\r\n data = [-1]*len(cols1)\r\n for i, col in enumerate(cols1):\r\n if col in raw.keys(): data[i] = raw[col]\r\n return data\r\n\r\ndef transform_to_df(results):\r\n cols0 = ['time', 'flag_working', 'flag_change_part', 'uuid', 'machine_num', 'floor', 'location']\r\n cols1 = ['x_pos', 'y_pos', 'z_pos', 'OPstate', 'F_actual', 'cycletime', 'feedratio', \\\r\n 'shop_name','workcount', 'RPM_actual', 'cuttingTime', 'poweronTime', 'spindle_load', \\\r\n 'spindle_temp', 'executionFlag', 'operatingTime', 'currenttoolnum', \"tool_preset_life_01\", \\\r\n \"tool_preset_life_02\", \"tool_preset_life_03\", \"tool_preset_life_04\", \"tool_preset_life_05\", \\\r\n \"tool_preset_life_11\", \"tool_preset_life_12\", \"tool_preset_life_13\", \"tool_preset_life_14\", \\\r\n \"tool_preset_life_15\", \"tool_current_life_01\", \"tool_current_life_02\", \"tool_current_life_03\", \\\r\n \"tool_current_life_04\", \"tool_current_life_05\", \"tool_current_life_11\", \"tool_current_life_12\", \\\r\n \"tool_current_life_13\", \"tool_current_life_14\", \"tool_current_life_15\"]\r\n #print('results:', results)\r\n all_data = []\r\n for row in results:\r\n data = [row[0], row[2], row[3], row[4], row[5], row[6], row[7]] + json_to_array(row[1], cols1)\r\n all_data.append(data)\r\n df = pd.DataFrame(all_data, columns=cols0+cols1)\r\n\r\n df['tool_current_life'] = np.where(df.currenttoolnum == 1, df.tool_current_life_01,\r\n np.where(df.currenttoolnum == 2, df.tool_current_life_02,\r\n np.where(df.currenttoolnum == 3, df.tool_current_life_03,\r\n np.where(df.currenttoolnum == 4, df.tool_current_life_04,\r\n np.where(df.currenttoolnum == 5, df.tool_current_life_05, 0)))))\r\n df['tool_preset_life'] = np.where(df.currenttoolnum == 1, df.tool_preset_life_01,\r\n np.where(df.currenttoolnum == 2, df.tool_preset_life_02,\r\n np.where(df.currenttoolnum == 3, df.tool_preset_life_03,\r\n np.where(df.currenttoolnum == 4, df.tool_preset_life_04,\r\n np.where(df.currenttoolnum == 5, df.tool_preset_life_05, 0)))))\r\n return df\r\n\r\n\r\ndef loadData(location, floor, machine_num, toolnum):\r\n ### step 1: 先找出要到basic_analysis_data中撈資料的初始時間點:part_start_time\r\n condition0 = \" where location='\" + location + \"' and floor='\" + floor + \"' and machine_num='\" + machine_num + \"' and toolnum='\" + str(toolnum) + \"' \"\r\n sql = \"select part_start_time from health_inference\"+ condition0 + \"order by part_start_time desc limit 1\"\r\n cursor = exec_sql(sql)\r\n results = cursor.fetchall()\r\n if len(results) == 0:\r\n print('health_inference table of currenttoolnum is empty!')\r\n condition1 = \" where location='\" + location + \"' and floor='\" + floor + \"' and machine_num='\" + machine_num + \"' \"\r\n sql = \"select date_time from basic_analysis_data\" + condition1 + \"and flag_change_part='1' order by date_time desc limit 30\"\r\n #sql = \"select date_time from basic_analysis_data\" + condition1 + \"limit 3000\"\r\n cursor = exec_sql(sql)\r\n results = cursor.fetchall()\r\n if len(results) < 2:#在basic_analysis_data中,只有少於2片的資料,先略過不計算\r\n return pd.DataFrame(), np.nan\r\n part_start_time = results[-1][0]#以第一片的時間做part_start_time\r\n else:\r\n part_start_time = results[0]\r\n print('len of results:', len(results))\r\n print('part_start_time:', part_start_time)\r\n\r\n ### step 2: 再去basic_analysis_data撈出所有>part_start_time的資料,裡面會有>=3筆換刀的資料在內\r\n print('fetch data')\r\n condition1 = \" where location='\" + location + \"' and floor='\" + floor + \"' and machine_num='\" + machine_num + \"' \"\r\n sql = \"select * from basic_analysis_data\" + condition1 + \"and date_time > '\" + str(part_start_time) + \"' \"\r\n cursor = exec_sql(sql) #在basic_analysis_data中,取出所有大於part_start_time的資料\r\n results = cursor.fetchall()\r\n raw_df = transform_to_df(results)\r\n return raw_df, part_start_time\r\n\r\ndef getTrainData(train, mean_load_len=30):\r\n k=5\r\n avg_mask = np.ones((k,))/k\r\n spindle_load_train = []\r\n tool_remain_life_train = []\r\n for machine in train.machine_num.unique():\r\n df_machine = train[(train.machine_num==machine)]\r\n for tool_group in df_machine.toolGroup.unique():\r\n df_machine_tool = df_machine[(df_machine.toolGroup==tool_group)]\r\n for i in range(len(df_machine_tool)-mean_load_len):\r\n if ((len(df_machine_tool)-i) < 200) & (i % 5 == 0):\r\n spd_mean = df_machine_tool.iloc[i:i+mean_load_len]['spindle_load_mean'].values\r\n spd_mean_avg = np.convolve(spd_mean, avg_mask, mode='valid')\r\n spindle_load_train.append(spd_mean_avg)\r\n tool_remain_life_train.append(df_machine_tool.iloc[i+mean_load_len]['tool_remain_life'])\r\n elif i % (mean_load_len/2) == 0:\r\n spd_mean = df_machine_tool.iloc[i:i+mean_load_len]['spindle_load_mean'].values\r\n spd_mean_avg = np.convolve(spd_mean, avg_mask, mode='valid')\r\n spindle_load_train.append(spd_mean_avg)\r\n tool_remain_life_train.append(df_machine_tool.iloc[i+mean_load_len]['tool_remain_life'])\r\n #print(len(spindle_load_train)) #534\r\n return spindle_load_train, tool_remain_life_train\r\n\r\ndef mmdTrain(spindle_load_train, tool_remain_life_train, spindle_load_test):\r\n start = time()\r\n min_list = []\r\n min_ten_list = []\r\n mmd_ten_list = []\r\n for i in range(len(spindle_load_test)): #1356\r\n x1 = spindle_load_test[i]\r\n mmd_result = []\r\n for j in range(len(spindle_load_train)):\r\n if i < 50:\r\n if tool_remain_life_train[j] > (50-i-15):\r\n flag=True\r\n else:\r\n flag=False\r\n else:\r\n flag=True\r\n if (tool_remain_life_train[j] < (1000 - i + 15)) & flag:\r\n print(i, ':', j)\r\n x2 = spindle_load_train[j]\r\n if np.ndim(x1)==1: x1 = np.reshape(x1, (-1,1))\r\n if np.ndim(x2)==1: x2 = np.reshape(x2, (-1,1))\r\n mmd_result.append(MMD_computing(x1, x2))\r\n else:\r\n mmd_result.append(np.inf)\r\n mmd_df = pd.DataFrame({'remain_life':tool_remain_life_train, \r\n 'mmd':mmd_result})\r\n \r\n #---- threshold\r\n mmd_df['mmd'] = abs(mmd_df['mmd'])\r\n mmd_df['threshold'] = mmd_df['mmd'] < 0.0006\r\n mmd_df = mmd_df.loc[mmd_df['threshold']]\r\n if len(mmd_df)!=0:\r\n mmd_df.sort_values(by='mmd', inplace=True)\r\n min_list.append(mmd_df.remain_life.iloc[0])\r\n min_ten_list.append(mmd_df.remain_life.values)\r\n mmd_ten_list.append(mmd_df.mmd.iloc[:10].values) \r\n else:\r\n min_list.append(0)\r\n min_ten_list.append(np.array([0,0]))\r\n mmd_ten_list.append(np.array([0,0])) \r\n '''\r\n #---- 10\r\n mmd_df['mmd'] = abs(mmd_df['mmd'])\r\n mmd_df.sort_values(by='mmd', inplace=True)\r\n min_list.append(mmd_df.remain_life.iloc[0])\r\n min_ten_list.append(mmd_df.remain_life.iloc[:10].values)\r\n mmd_ten_list.append(mmd_df.mmd.iloc[:10].values)\r\n '''\r\n print('time:', time() - start)\r\n return min_list, min_ten_list, mmd_ten_list\r\n\r\ndef sigma_computing(x1,x2):\r\n n, nfeatures = x1.shape\r\n m, mfeatures = x2.shape\r\n k1 = np.sum((x1*x1), 1)\r\n q = np.tile(k1, (m, 1)).transpose()\r\n del k1\r\n k2 = np.sum((x2*x2), 1)\r\n r = np.tile(k2, (n, 1))\r\n del k2\r\n h= q + r\r\n del q,r\r\n \r\n h = h-2*np.dot(x1,x2.transpose())\r\n h = np.array(h, dtype=float)\r\n mdist = np.median([i for i in h.flat if i])\r\n sigma = np.sqrt(mdist/2.0)\r\n if not sigma: sigma = 1\r\n return sigma\r\n\r\ndef MMD_computing(x1,x2):\r\n K11 = rbf_kernel(x1,x1,sigma_computing(x1,x1))\r\n K22 = rbf_kernel(x2,x2,sigma_computing(x2,x2))\r\n K12 = rbf_kernel(x1,x2,sigma_computing(x1,x2))\r\n \r\n m = K11.shape[0]\r\n n = K22.shape[0]\r\n t11 = (1./(m*(m-1)))*np.sum(K11 - np.diag(np.diagonal(K11)))\r\n t22 = (1./(n*(n-1)))* np.sum(K22 - np.diag(np.diagonal(K22)))\r\n t12 = (2./(m*n)) * np.sum(K12)\r\n MMD = t11 + t22 -t12\r\n return MMD\r\n\r\ndef inferenceData(train, raw_df, part_start_time, location, floor, machine_num, toolnum):\r\n raw_df = raw_df[raw_df.currenttoolnum==toolnum]\r\n raw_df = raw_df[raw_df.flag_working==1]\r\n if len(raw_df)==0:\r\n print('non working data!')\r\n return\r\n raw_df['part_no'] = raw_df['workcount'] + raw_df['tool_current_life'] * 0.0001 #### get unique part * 0.0001\r\n raw_df['changeTool'] =\\\r\n (raw_df['tool_current_life'].diff() <0) & \\\r\n (raw_df['tool_current_life']<60) & \\\r\n ((raw_df['tool_current_life'].diff(periods=-1)+50>0) | np.isnan(raw_df['tool_current_life'].diff(periods=-1)))\r\n raw_df['toolGroup'] = raw_df['changeTool'].cumsum()\r\n train1 = train[train['currenttoolnum']==toolnum]\r\n spindle_load_train, tool_remain_life_train = getTrainData(train1) \r\n row_list = []\r\n for part in raw_df.part_no.unique():\r\n # get last_mean_load from db\r\n #conn, cursor = connectDB()\r\n condition0 = \" where location='\" + location + \"' and floor='\" + floor + \"' and machine_num='\" + machine_num + \"' and toolnum='\" + str(toolnum) + \"' \"\r\n sql = \"select tool_current_life from health_inference\"+ condition0 + \"and part_start_time > '\" + str(part_start_time) +\"' order by part_start_time desc limit 30\"\r\n cursor = exec_sql(sql)\r\n last_mean_load = cursor.fetchall()\r\n if len(last_mean_load) < 30: \r\n last_mean_load = [-1]*14\r\n \r\n # get last_tool_current_life from db\r\n #conn, cursor = connectDB()\r\n condition0 = \" where location='\" + location + \"' and floor='\" + floor + \"' and machine_num='\" + machine_num + \"' and toolnum='\" + str(toolnum) + \"' \"\r\n sql = \"select tool_current_life from health_inference\"+ condition0 + \"order by part_start_time desc limit 1\"\r\n cursor = exec_sql(sql)\r\n last_tool_current_life = cursor.fetchall()\r\n if len(last_tool_current_life) == 0: \r\n last_tool_current_life = -1\r\n\r\n part_df = raw_df[raw_df.part_no==part]\r\n part_start_time = part_df['time'].iloc[0]\r\n part_end_time = part_df['time'].iloc[-1]\r\n part_start_time = pd.to_datetime(str(part_start_time))\r\n part_start_time = part_start_time.strftime('%Y-%m-%d %H:%M:%S')\r\n part_end_time = pd.to_datetime(str(part_end_time))\r\n part_end_time = part_end_time.strftime('%Y-%m-%d %H:%M:%S')\r\n mean_load = part_df['spindle_load'].mean()\r\n row1 = part_df.iloc[0]\r\n \r\n new_tool = (row1['tool_current_life'] - last_tool_current_life < 0) & \\\r\n (row1['tool_current_life']<60)\r\n new_tool = int(np.where(new_tool, 1, 0))\r\n \r\n # inference\r\n if last_mean_load != [-1]*14:\r\n spindle_load_test = last_mean_load + [mean_load]\r\n k=5\r\n smooth_mask = np.ones((k,))/k\r\n spindle_load_test = [np.convolve(spindle_load_test, smooth_mask, mode='valid')]\r\n min_list, min_ten_list, mmd_ten_list = mmdTrain(spindle_load_train, tool_remain_life_train, spindle_load_test)\r\n predict_remain_life = np.mean(min_ten_list)\r\n else:\r\n predict_remain_life = -1\r\n if (predict_remain_life > 30) | (predict_remain_life == -1):\r\n alarm_condition = 0\r\n else:\r\n alarm_condition = 1\r\n row = [part_start_time, part_end_time, '', row1['location'], row1['floor'], row1['machine_num'],\\\r\n row1['currenttoolnum'], row1['tool_preset_life'], row1['tool_current_life'], row1['workcount'], mean_load, new_tool, \\\r\n predict_remain_life, alarm_condition, 1]\r\n row = [str(x) for x in row]\r\n row_list.append(tuple(row))\r\n \r\n # insert 1 row into db\r\n colname = ['part_start_time', 'part_end_time', 'uuid', 'location', 'floor', 'machine_num', \\\r\n 'toolnum', 'tool_preset_life', 'tool_current_life', 'workcount', 'mean_load', 'new_tool', \\\r\n 'predict_remain_life', 'alarm_condition', 'model_version']\r\n #colname = colname[:]\r\n cols = \",\".join([str(i) for i in colname])\r\n val = row_list\r\n #val = [tuple(row[:]) for row in row_list]\r\n sql = \"INSERT INTO health_inference (\"+cols+\") VALUES (\"+ \"%s,\"*(len(val[0])-1) +\"%s)\" \r\n cursor.executemany(sql, val)\r\n conn.commit()\r\n print(cursor.rowcount, \"记录插入成功。\")\r\n \r\n###############################################################################################\r\nconn, cursor = connectDB()\r\n#cursor.execute('DROP TABLE health_inference')\r\ncolname = ['part_start_time', 'part_end_time', 'uuid', 'location', 'floor', 'machine_num', \\\r\n 'toolnum', 'tool_preset_life', 'tool_current_life', 'workcount', 'mean_load', 'new_tool', \\\r\n 'predict_remain_life', 'alarm_condition', 'model_version']\r\ncol_dtype = ['DATETIME(3)', 'DATETIME(3)', 'VARCHAR(25)', 'VARCHAR(15)', 'VARCHAR(15)', 'VARCHAR(5)', \\\r\n 'SMALLINT(6)', 'SMALLINT(6)', 'SMALLINT(6)', 'SMALLINT(6)', 'FLOAT', 'TINYINT(1)', \\\r\n 'FLOAT', 'TINYINT(1)', 'SMALLINT(6)']\r\ncreateTable(cursor, 'health_inference', colname = colname, col_dtype=col_dtype)\r\n\r\ntrain = pd.read_csv(csvDir + 'all_spindle_load_all.csv')\r\nlocations = ['GL']\r\nfloors = ['C04-1F']\r\n#machine_nums = ['D09']\r\nmachine_nums = ['D08', 'D09', 'D10', 'D11', 'D12']\r\ntoolnums = [2, 3, 4, 5]\r\nfor location in locations:\r\n for floor in floors:\r\n for machine_num in machine_nums:\r\n for toolnum in toolnums:\r\n print('@@@ start ', location, floor, machine_num, toolnum,'@@@')\r\n raw_df, part_start_time = loadData(location, floor, machine_num, toolnum)\r\n if len(raw_df)!=0:\r\n inferenceData(train, raw_df, part_start_time, location, floor, machine_num, toolnum)\r\n\r\n\r\n'''\r\nrow_list = []\r\nrow = ('2019-10-27 07:19:31',\r\n '2019-10-27 07:22:37',\r\n '',\r\n 'C04-1F',\r\n 'line',\r\n 'D09',\r\n 2,\r\n 900,\r\n 296,\r\n 16819,\r\n 13.381443298969073,\r\n False,\r\n 0.0,\r\n 1,\r\n 1)\r\nrow = row[:4]\r\nrow_list.append(tuple(row))\r\ncolname = ['part_start_time', 'part_end_time', 'uuid', 'location', 'floor', 'machine_num', \\\r\n 'toolnum', 'tool_preset_life', 'tool_current_life', 'workcount', 'mean_load', 'new_tool', \\\r\n 'predict_remain_life', 'alarm_condition', 'model_version']\r\ncolname = colname[:4]\r\ncols = \",\".join([str(i) for i in colname])\r\nsql = \"INSERT INTO health_inference (\"+cols+\") VALUES (\"+ \"%s,\"*(len(row_list[0])-1) +\"%s)\"\r\nval = row_list\r\ncursor.executemany(sql, val)\r\nconn.commit()\r\nprint(cursor.rowcount, \"记录插入成功。\")\r\n'''\r\n\r\n\r\n\r\n'''df_list = []\r\ndata1 = pd.read_csv('D:\\\\Sources\\\\Data\\\\Data-CAA_CNC\\\\data\\\\all_spindle_load2.csv')\r\ndata1.groupby(['machine_num', 'toolGroup'])['tool_current_life'].max()\r\ndata1['flagLife300'] = data1.groupby(['machine_num', 'toolGroup'])['tool_current_life'].transform(lambda x: max(x)>300)\r\ndata1 = data1[data1['flagLife300']]\r\ntrain = data1[data1['machine_num']!='D09']\r\ntrain = train[['shop_name', 'machine_num', 'currenttoolnum', 'toolGroup', 'spindle_load_mean', 'tool_remain_life']]\r\ndf_list.append(train)\r\ndf_final = pd.concat(df_list, axis=0)\r\ndf_final.to_csv('D:\\\\Sources\\\\Data\\\\Data-CAA_CNC\\\\data\\\\all_spindle_load_all.csv')\r\n'''\r\n#raw_df.to_csv('D:\\\\Sources\\\\Data\\\\Data-CAA_CNC\\\\data\\\\raw_df_test.csv')","sub_path":"health_inference.py","file_name":"health_inference.py","file_ext":"py","file_size_in_byte":17125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"328345001","text":"\"\"\"\ncoinfee library for http://coinfee.net/\n\nReleased into the public domain.\n\"\"\"\n\nfrom collections import namedtuple\nfrom warnings import warn\nimport json\nimport urllib2\n\nimport yaml\n\n\nCOINFEE_URL = 'https://coinfee.net/payment'\n\nTIMEOUT = 60\n\n\ndef payment(address,\n satoshis,\n unique,\n fee_address=None,\n fee=None,\n coinfee_url=COINFEE_URL):\n \"\"\"\n Processes a payment with coinfee.\n\n Returns:\n coinfee_payment.address which is the address to pay.\n coinfee_payment.satoshis which is the total amount that needs to be paid.\n coinfee_payment.status True/False, if payment has been finished.\n coinfee_payment.just_paid True/False, if sent in the current request.\n \"\"\"\n pre_data = {'address': address,\n 'satoshis': satoshis,\n 'unique': unique}\n if fee_address is not None and fee is not None:\n pre_data['fee_address'] = fee_address\n pre_data['fee'] = fee\n post_data = json.dumps(pre_data)\n try:\n http_return = urllib2.urlopen(coinfee_url,\n data=post_data,\n timeout=TIMEOUT)\n except urllib2.HTTPError as http_error:\n if http_error.code == 400:\n # Throw exception with output from coinfee.\n raise Exception(http_error.read())\n else:\n raise\n if http_return.getcode() == 200:\n data = yaml.safe_load(http_return.read())\n if 'deprecated' in data:\n if data['deprecated'] is not False:\n warn(str(data['deprecated']), DeprecationWarning)\n coinfee_payment = namedtuple('coinfee_payment',\n ['address',\n 'satoshis'\n 'status'])\n coinfee_payment.address = data['address']\n coinfee_payment.satoshis = data['satoshis']\n coinfee_payment.status = data['status']\n coinfee_payment.just_paid = data['just_paid']\n return coinfee_payment\n else:\n raise Exception('Fatal issue with coinfee.')\n","sub_path":"coinfee/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"523466633","text":"# Substitution Cipher\n# https://en.wikipedia.org/wiki/Substitution_cipher\n\nimport sys\nimport random\n\nLETTERS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\ndef main():\n # message = \"If a man is offered a fact which goes against his instincts, he will scrutinize it closely, and unless the evidence is overwhelming, he will refuse to believe it.\"\n message = \"Sy l nlx sr pyyacao l ylwj eiswi upar lulsxrj isr sxrjsxwjr, ia esmm rwctjsxsza sj wmpramh, lxo txmarr jia aqsoaxwa sr pqaceiamnsxu, ia esmm caytra jp famsaqa sj.\"\n\n key = \"LFWOAYUISVKMNXPBDCRJTQEGHZ\"\n mode = 'd' # Set to (e)ncrypt or (d)ecrypt\n\n if not key_is_valid(key):\n sys.exit(\"There is an error in the key or symbol set.\")\n \n if mode == 'e':\n translated = encrypt_message(message, key)\n elif mode == 'd':\n translated = decrypt_message(message, key)\n \n print(\"Using key %s, the message is:\" % (key))\n print(translated)\n\ndef key_is_valid(key):\n key_list = list(key)\n letters_list = list(LETTERS)\n\n key_list.sort()\n letters_list.sort()\n\n return key_list == letters_list\n\n\ndef encrypt_message(message, key):\n translated = \"\"\n\n for symbol in message:\n if symbol.upper() in LETTERS:\n if symbol.isupper():\n translated += key[LETTERS.find(symbol.upper())].upper()\n else:\n translated += key[LETTERS.find(symbol.upper())].lower()\n \n else:\n translated += symbol\n \n return translated\n\n\ndef decrypt_message(message, key):\n translated = \"\"\n\n for symbol in message:\n if symbol.upper() in key:\n if symbol.isupper():\n translated += LETTERS[key.find(symbol.upper())].upper()\n else:\n translated += LETTERS[key.find(symbol.upper())].lower()\n \n else:\n translated += symbol\n \n return translated\n\n\ndef get_random_key():\n key = list(LETTERS)\n random.shuffle(key)\n return \"\".join(key)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"src/substitution_cipher.py","file_name":"substitution_cipher.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"200933481","text":"from database_connection import engine\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom model import UserProfile, Card, Supply, WaterSupply, PowerSupply, GasSupply\r\nfrom sqlalchemy import func\r\n\r\nSession = sessionmaker(bind=engine)\r\nsession = Session()\r\n\r\nbob = UserProfile(user_name='Bob', user_phone='+380123456789', user_email='bob@gmail.com')\r\nboba = UserProfile(user_name='Boba', user_phone='+380123456780', user_email='boba@gmail.com')\r\n\r\nsession.add(bob)\r\nsession.commit()\r\nsession.add(boba)\r\nsession.commit()\r\n\r\nbob_id = (session.query(UserProfile).filter(UserProfile.user_phone == '+380123456789')[0]).user_id\r\nboba_id = (session.query(UserProfile).filter(UserProfile.user_phone == '+380123456780')[0]).user_id\r\n\r\nbobs_card = Card(user_id_fk = bob_id, card_number = '1234567890123456', card_name = 'Bob Bobovich', card_date = func.current_date(), card_ccv ='123')\r\nbobas_card = Card(user_id_fk = boba_id, card_number = '0987654321098765', card_name = 'Boba Bobovna', card_date = func.current_date(), card_ccv ='777')\r\nbobs_supply = Supply(user_id_fk = bob_id, water_supply_id = 123456, power_supply_id = 123, gas_supply_id = 5321123)\r\nbobas_supply = Supply(user_id_fk = boba_id, water_supply_id = 654321, power_supply_id = 321, gas_supply_id = 3211235)\r\n\r\nwater1 = WaterSupply(water_supply_id = 123456, water_hot_previous = 1.1, water_hot_current=1.5)\r\npower1 = PowerSupply(power_supply_id = 123, power_reading=11)\r\ngas1 = GasSupply(gas_supply_id = 5321123, gas_reading=4)\r\nwater2 = WaterSupply(water_supply_id = 654321, water_cold_previous = 0.5, water_cold_current= 0.7)\r\npower2 = PowerSupply(power_supply_id = 321, power_reading=6)\r\ngas2 = GasSupply(gas_supply_id = 3211235, gas_reading=3)\r\n\r\n\r\ninstances = [bobs_card, bobs_supply, bobas_card, bobas_supply, water1, power1, gas1, water2, power2, gas2]\r\nfor ins in instances:\r\n session.add(ins)\r\n session.commit()\r\n\r\n","sub_path":"Artur Safonov/workshop4/source/initiator.py","file_name":"initiator.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"500462903","text":"#coding=utf-8\n'''\n 黑白大消除\n'''\n\nfrom yuanneng.map.copyroom import CopyRoom,register_special_copyroom\nfrom yuanneng.monster import *\nfrom yuanneng import team\nfrom random import randint, choice\nfrom yuanneng import transmit\nfrom yuanneng.proto import activity_pb2, map_pb2, role_pb2\nfrom yuanneng.status import *\nfrom yuanneng.map import map\nimport numpy\nfrom random import shuffle\nfrom copy import copy\nimport datetime\nimport cPickle\nfrom time import time\nfrom yuanneng import world\nfrom yuanneng.cmd import *\nimport logging\nfrom yuanneng import mc\nBLACK_CHESS_TID = 90007\nWHITE_CHESS_TID = 90008\nBOMB_TID = 90009\nMAX_NUM = 6\nCHESS_MAP = 5005\nMONSTER_TIDS = [BLACK_CHESS_TID, WHITE_CHESS_TID, BOMB_TID]\n\ncan_clean_tbl = []\ndef init():\n register_special_copyroom(CHESS_MAP, Chessmap)\n register_special_monster(BLACK_CHESS_TID, ChessMonster)\n register_special_monster(BOMB_TID, ChessMonster)\n register_special_monster(WHITE_CHESS_TID, ChessMonster)\n global can_clean_tbl\n for x in range(0, MAX_NUM):\n for y in range(0, MAX_NUM):\n temp = []\n temp_pt = []\n for y_inc in range(y, MAX_NUM):\n temp_pt.append(x*MAX_NUM + y_inc)\n if len(temp_pt) >= 5:\n tp_cp = copy(temp_pt)\n tp_cp.sort()\n temp.append(tp_cp)\n temp_pt = []\n for x_inc in range(x, MAX_NUM):\n temp_pt.append(x_inc* MAX_NUM + y)\n if len(temp_pt) >=5:\n tp_cp = copy(temp_pt)\n tp_cp.sort()\n temp.append(tp_cp)\n y_inc = y\n temp_pt = []\n for x_inc in range(x, MAX_NUM): \n if y_inc > MAX_NUM - 1:\n break\n temp_pt.append(x_inc*MAX_NUM+ y_inc)\n if len(temp_pt) >= 5:\n tp_cp = copy(temp_pt)\n tp_cp.sort()\n temp.append(tp_cp)\n y_inc += 1\n y_inc = y\n temp_pt = []\n for x_inc in range(x, MAX_NUM):\n if y_inc < 0:\n break\n temp_pt.append(x_inc*MAX_NUM + y_inc)\n if len(temp_pt) >= 5:\n tp_cp = copy(temp_pt)\n tp_cp.sort()\n temp.append(tp_cp)\n y_inc -= 1\n if temp:\n can_clean_tbl += temp\n\n\n\nclass Chessmap(CopyRoom):\n def __init__(self, tid, team_id = 0):\n CopyRoom.__init__(self,tid,team_id = team_id)\n self.check_time = 0\n monsters = [BLACK_CHESS_TID] * 19 + [WHITE_CHESS_TID] * 19 +[BOMB_TID] * 2\n shuffle(monsters)\n monsters = monsters[0:MAX_NUM * MAX_NUM]\n pt_typ = numpy.reshape(monsters, (MAX_NUM,MAX_NUM))\n self.pt_monster_ids = {}#怪物gid对应点的映射\n self.pt_to_monster = {} #坐标对怪物对象的映射\n self.init_time = time() #进入地图时间\n self.double_exp = 0#双倍经验的计时\n self.hit = [0, 0] #·[连击次数, 连击时间]\n self.player_tbl = {}\n self.same_monster_tbl = {} #相同怪物的字典 {tid:[gid....].....}\n for x in range(0, MAX_NUM):\n temp_pt = []\n for y in range(0, MAX_NUM):\n new_monster = ChessMonster(int(pt_typ[x][y]), self.get_available_id(), self, x* 3+7 , y * 3 + 10)\n self.add_to(new_monster, silent = True)\n self.pt_monster_ids[new_monster.gid] = x * MAX_NUM + y\n self.pt_to_monster[(x,y)] = new_monster\n if new_monster.tid not in self.same_monster_tbl:\n self.same_monster_tbl[new_monster.tid] = [new_monster.gid]\n else:\n self.same_monster_tbl[new_monster.tid].append(new_monster.gid)\n\n def heart_beat(self):\n map.Map.heart_beat(self)\n\n def per_second_trigger(self, now, now_int):\n CopyRoom.per_second_trigger(self, now, now_int)\n self.check_time = time() - self.init_time\n if self.check_time >= 700:\n for obj in self.player_tbl.values():\n if obj.is_player():\n map_id, x, y = map.query_comp_map(self.tid)\n transmit.transmit_to(obj, map_id, x, y)\n if self.double_exp:\n if time() - self.double_exp >= 60:\n self.double_exp = 0\n \n def lot_revive(self, need_revive):\n monsters = [BLACK_CHESS_TID] * 19 + [WHITE_CHESS_TID] * 19 +[BOMB_TID] * 2\n shuffle(monsters)\n res = activity_pb2.BornMonsters()\n count = 0\n typ = [1]*2 + [2]*2 + [3]*2 + [0]*94\n shuffle(typ)\n hits = 0\n for monster_id in copy(need_revive):\n _monster = self.objs[monster_id]\n if _monster.eff:\n if _monster.eff == 1:\n hits += 10\n else:\n _monster.monster_change()\n self.same_monster_tbl[_monster.tid].remove(_monster.gid)\n _monster.tid = monsters[count]\n if _monster.tid not in self.same_monster_tbl:\n self.same_monster_tbl[_monster.tid] = [_monster.gid]\n else:\n self.same_monster_tbl[_monster.tid].append(_monster.gid)\n new_m = res.born_monster.add()\n new_m.x = _monster.x\n new_m.y = _monster.y\n new_m.tid = _monster.tid\n new_m.gid = _monster.gid\n new_m.typ = typ[count] if new_m.tid != BOMB_TID else 0\n _monster.eff = new_m.typ\n _monster.revive_add_attrs(_monster.eff)\n count += 1\n res.hits = 10 if hits else 0\n if hits:\n time_now = time()\n if time_now - self.hit[1] > 15:\n self.hit[0] = 10\n else:\n self.hit[0] += 10\n self.hit[1] = time_now\n self.tell_room(\"J09\",1,res)\n def same_revive(self, monster_tid):\n if monster_tid not in self.same_monster_tbl:\n return\n if not self.same_monster_tbl[monster_tid]:\n return\n self.lot_revive(self.same_monster_tbl[monster_tid])\n\n def add_to(self,obj, silent = False, with_point = True):\n if obj.is_player() and not hasattr(self,\"member_info\"):\n self.member_info = obj.lv_arg\n avg_lv = self.member_info[1]\n for m_obj in self.objs.itervalues():\n if m_obj.is_monster():\n m_obj.build_fight(avg_lv)\n if obj.is_player():\n self.player_tbl[obj.gid] = obj\n res = map_pb2.CopyLevel()\n res.level = self.member_info[1]\n obj.tell_object('V05', 1, res)\n CopyRoom.add_to(self, obj, silent = silent, with_point = with_point)\n\n def remove(self,obj,net_closed = False, silent = False, with_point = True):\n map.Map.remove(self,obj,net_closed, silent, with_point)\n if obj.is_player():\n #是队伍副本,且玩家主动退副本,从角色列表移除(断线的话不移除)\n del self.player_tbl[obj.gid]\n if self.team_id and not net_closed:\n team_copy = mc.get_team_copy(self.team_id)\n team_copy[1].remove(obj.gid)\n mc.set_team_copy(self.team_id,team_copy)\n player_num = len(self.player_tbl)\n if player_num == 0:\n self.destruct_copy()\n def complete(self):\n pass\n\nclass ChessMonster(ScalaMonster):\n def __init__(self,tid,gid,map_obj,x,y):\n ScalaMonster.__init__(self,tid,gid,map_obj,x,y, heart = False)\n self.heart = False\n self.eff = 0 #0没效果 1 连消连击加10 2攻击加200% 3经验翻倍\n\n def heart_beat(self):\n pass\n\n\n def die(self,kill_owner = None, kill_owner_gid = 0):\n self.hp = 0\n self.mp = 0\n self.set_status(ST_DEAD)\n self.clear_buff()\n self.clear_forbids()\n self.set_mon_status(AC_ST_DEAD)\n self.drop(kill_owner, kill_owner_gid)\n if kill_owner and kill_owner.is_player():\n res = role_pb2.RoleIdReq()\n res.id = kill_owner.gid\n self.map_obj.tell_room('J12', 1, res)\n if self.varia_type:\n chaoneng = CHAONENG_VAR[self.mon_typ]\n else:\n chaoneng = CHAONENG_NOVAR[self.mon_typ]\n kill_owner.add_chaoneng(chaoneng)\n if kill_owner.shaqi > 0:\n kill_owner.reduce_shaqi(1)\n allocated = False\n exps = query_exp(self.tid)\n if kill_owner.team:\n allocated = team.allocate_exp(kill_owner,exps, query_level(self.tid))\n if not allocated:\n exp = kill_owner.compute_exp(exps, query_level(self.tid))\n kill_owner.add_exp(exp* (2 if self.map_obj.double_exp else 1), km = True)\n called_pet = kill_owner.called_pet\n if called_pet:\n called_pet.add_exp(called_pet.compute_exp(exps * (2 if self.map_obj.double_exp else 1), query_level(self.tid)))\n self.monster_change(normal = True)\n \n \n def monster_change(self, normal = False):\n if self.tid == BOMB_TID:\n if self.eff == 2:\n map_obj = self.map_obj\n for obj in map_obj.objs.values():\n if obj.is_player():\n if 9323 not in obj.buff:\n obj.add_buff(9323, 30, [2 * obj.fight[\"wlgj\"], 2 * obj.fight[\"jsgj\"]], obj.gid)\n if normal:\n self.bomb()\n elif self.eff == 3:\n self.map_obj.double_exp = time()\n if normal:\n self.bomb()\n elif self.eff == 0:\n if normal:\n self.bomb()\n elif self.tid == BLACK_CHESS_TID or self.tid == WHITE_CHESS_TID:\n if self.eff == 1:\n self.map_obj.same_revive(self.tid)\n elif self.eff == 2:\n map_obj = self.map_obj\n for obj in map_obj.objs.values():\n if obj.is_player():\n if 9323 not in obj.buff:\n obj.add_buff(9323, 30, [2 * obj.fight[\"wlgj\"], 2 * obj.fight[\"jsgj\"]], obj.gid)\n if normal:\n self.revive()\n elif self.eff == 3:\n self.map_obj.double_exp = time()\n if normal:\n self.revive()\n elif self.eff == 0:\n if normal:\n self.revive()\n \n \n def bomb(self):\n monsters = [BLACK_CHESS_TID] * 19 + [WHITE_CHESS_TID] * 19 +[BOMB_TID] * 2\n shuffle(monsters)\n typ = [1]*2 + [2]*2 + [3]*2 + [0]*94 \n shuffle(typ)\n pt = self.map_obj.pt_monster_ids[self.gid]\n raw_num, col_num = pt / MAX_NUM, pt % MAX_NUM\n res = activity_pb2.BornMonsters()\n res.gid = self.gid\n count = 0 \n map_obj = self.map_obj\n has_eff = set()\n for raw in (-1, 0, 1):\n for col in (-1, 0, 1):\n temp_raw = raw_num + raw\n temp_col = col_num + col\n if (temp_raw, temp_col) not in self.map_obj.pt_to_monster:\n continue\n monster = map_obj.pt_to_monster[(temp_raw, temp_col)]\n if monster.eff:\n if monster.eff == 1:\n has_eff.add(monster.tid)\n continue\n else:\n self.monster_change()\n map_obj.same_monster_tbl[monster.tid].remove(monster.gid)\n born_m = res.born_monster.add()\n born_m.x = monster.x\n born_m.y = monster.y\n monster.tid = monsters[count]\n if monster.tid not in map_obj.same_monster_tbl:\n map_obj.same_monster_tbl[monster.tid] = [monster.gid]\n else:\n map_obj.same_monster_tbl[monster.tid].append(monster.gid)\n monster.eff = typ[count] if monster.tid != BOMB_TID else 0\n monster.revive_add_attrs(monster.eff)\n born_m.tid = monster.tid\n born_m.gid = monster.gid\n born_m.typ = monster.eff\n count += 1\n res.hits = 0\n# if hits:\n# map_obj.hit[0] += 10\n# map_obj.hit[1] = time()\n map_obj.tell_room(\"J11\", 1, res)\n for monster_tid in has_eff:\n map_obj.same_revive(monster_tid)\n \n \n def hurt_hp(self,val,attacker = None, from_pet = False):\n self.hp -= val\n if self.hp <= 0:\n self.die(self.map_obj.get_obj(self.kill_owner_gid), self.kill_owner_gid)\n def attacked(self,obj,val, from_pet = False):\n pass\n\n def revive(self):\n self.map_obj.same_monster_tbl[self.tid].remove(self.gid)\n if self.tid == BLACK_CHESS_TID:\n tid = WHITE_CHESS_TID\n elif self.tid == WHITE_CHESS_TID:\n tid = BLACK_CHESS_TID\n else:\n randnum = randint(1,100)\n if rand_num % 2 == 0:\n tid = WHITE_CHESS_TID\n else:\n tid = BLACK_CHESS_TID\n rand_num = randint(1, 100)\n self.eff = 0\n if rand_num <= 2:\n self.eff = choice([1,2,3])\n self.x, self.y = self.born_x, self.born_y\n self.tid = tid\n if self.tid not in self.map_obj.same_monster_tbl:\n self.map_obj.same_monster_tbl[self.tid] = [self.gid]\n else:\n self.map_obj.same_monster_tbl[self.tid].append(self.gid)\n self.revive_add_attrs(self.eff)\n res = activity_pb2.BornMonsters()\n new_m = res.born_monster.add()\n new_m.x = self.x\n new_m.y = self.y\n new_m.tid = self.tid\n new_m.gid = self.gid\n new_m.typ = self.eff\n self.map_obj.tell_room(\"J09\",1,res)\n \n \n def revive_add_attrs(self, eff = 0):\n self.load_base_fight(self.level)\n self.fight = copy(self.base_fight)\n self.fight['mhp'] *= 5 if eff != 0 else 1 \n self.hp = self.fight['mhp']\n self.mp = self.fight['mmp']\n self.set_status(ST_FREE)\n self.ac_status = AC_ST_FREE\n\n def drop(self,kill_owner, kill_owner_gid):\n pass\n\n@route('J10', require_para = activity_pb2. CleanMonsters)\n@cmd_exception_catch()\ndef clean_chessmonster(obj, data):\n if not hasattr(obj, \"map_obj\") or not obj.map_obj:\n return\n map_obj = obj.map_obj\n if not hasattr(map_obj, 'pt_monster_ids'):\n return\n if not hasattr(map_obj, \"player_tbl\"):\n return\n pt_monster_ids = map_obj.pt_monster_ids\n count = 0\n monster_lenth = []\n chess_count = 0\n need_revive= set()\n need_clear = []\n has_eff = set()\n for monsters in data.monsters:\n temp = []\n is_eff = False\n if len(monsters.gid) < 5:\n continue\n temp_revive = set()\n mons = map_obj.get_obj(monsters.gid[0])\n if not mons:\n continue\n monster_same = True\n mon_tid = mons.tid\n if mon_tid not in MONSTER_TIDS:\n continue\n for monster in monsters.gid:\n if monster not in pt_monster_ids:\n break\n mon = map_obj.get_obj(monster)\n if not mon:\n break\n if mon.tid != mon_tid:\n monster_same = False\n break\n if mon.eff == 1:\n is_eff =True\n pt = pt_monster_ids[monster]\n temp.append(pt)\n temp_revive.add(monster)\n lenth = len(temp_revive)\n if lenth < 5 or len(temp) != lenth or not monster_same:\n continue\n temp.sort()\n if temp not in can_clean_tbl:\n continue\n chess_count += lenth\n count += 1\n monster_lenth.append(lenth)\n if is_eff:\n has_eff.add(mon.tid)\n is_eff = False\n continue\n need_revive |= temp_revive\n need_clear.append(temp_revive)\n if not need_revive and not has_eff:\n return\n time_now = time()\n if time_now - map_obj.hit[1] > 15:\n if count > 1:\n map_obj.hit[0] = count * count\n else:\n map_obj.hit[0] = count\n else:\n if count > 1:\n map_obj.hit[0] += count * count\n else:\n map_obj.hit[0] += count\n for lenth in monster_lenth:\n if lenth == 6:\n map_obj.hit[0] += 1\n exps = int(chess_count * 50 * map_obj.member_info[1] / 10.0 * (1 + map_obj.hit[0] / 100.0)) * (2 if map_obj.double_exp else 1)\n for player in obj.map_obj.player_tbl.values():\n player.add_exp(exps)\n map_obj.hit[1] = time_now\n if need_clear:\n res = activity_pb2. CleanMonsters()\n for need_clr in need_clear:\n mons_lists = res.monsters.add()\n for monster_id in need_clr:\n mons_lists.gid.append(monster_id)\n map_obj.tell_room(\"J10\", 1, res)\n for monster_tid in has_eff:\n map_obj.same_revive(monster_tid)\n if need_revive:\n map_obj.lot_revive(need_revive)\n\n","sub_path":"yuanneng/activity/clean_chess.py","file_name":"clean_chess.py","file_ext":"py","file_size_in_byte":17351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"253481296","text":"'''\nGiven an array of integers greater than zero, find if it is possible to split it in two subarrays\n(without reordering the elements), such that the sum of the two subarrays is the same. \nPrint the two subarrays.\n'''\n\ndef findSplitPoint(arr) : \n left_sum=0\n for no in arr:\n left_sum+=no\n \n right_sum=0\n for i in range(len(arr)-1, -1, -1):\n right_sum+=arr[i]\n left_sum-=arr[i]\n if right_sum==left_sum:\n return i\n return -1\narr = [1,2,3,4,5,5]\nsplit_idx = findSplitPoint(arr)\nprint(split_idx)\nif split_idx!=-1:\n print(arr[0:split_idx], arr[split_idx:])\n","sub_path":"Split an array into two equal Sum subarrays.py","file_name":"Split an array into two equal Sum subarrays.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"449233602","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom Abraham.Código._7_DP import Grid\nfrom Abraham.Código._7_DP.iteracion_del_valor import mostrar_politica,mostrar_valores\n\n\nEPS = 0.75\nGAMMA = 0.95\nALPHA = 0.1\n\ndef MaxQ(Q_s):\n MaxV = float(\"-inf\")\n MaxA = None\n\n for a,v in Q_s.items():\n if v > MaxV:\n MaxV = v\n MaxA = a\n return MaxV,MaxA\n\ndef accionRandom(a,grid = Grid.grid_estandar(), eps=EPS):\n\n p = np.random.random()\n\n if p < (1.0 - eps):\n return a\n else:\n return np.random.choice(grid.posiblesAccionesBasicas())\n\n\nif __name__ == \"__main__\":\n\n grid = Grid.grid_negativo(-0.1)\n todos_estados = grid.todos_estados()\n\n Q = {}\n alpha_personal = {}\n\n for s in todos_estados:\n Q[s] = {}\n alpha_personal[s] = {}\n\n for a in grid.posiblesAccionesBasicas():\n Q[s][a] = 0\n alpha_personal[s][a] = 1.0\n\n\n t = 1.0\n deltas = [] # Máximo cambio en cada iteración\n\n nEpisodios = 5000\n for episo in range(nEpisodios):\n\n if episo % 100 == 0:\n print(\"Episodio = %d\" % (episo))\n\n s = (2,0)\n grid.set_estado(s)\n a = MaxQ(Q[s])[1]\n\n cambioMayor = 0\n while not grid.game_over():\n a = accionRandom(a, grid, eps=EPS / t) # 0.75 / x\n\n r = grid.mover(a)\n s2 = grid.estado_actual()\n max_q_s2,a2 = MaxQ(Q[s2])\n\n alpha = ALPHA / alpha_personal[s][a]\n alpha_personal[s][a] += 0.005\n\n antigua_qsa = Q[s][a]\n Q[s][a] = Q[s][a] + alpha * (r + GAMMA * max_q_s2 - Q[s][a])\n cambioMayor = max(cambioMayor,np.abs(Q[s][a] - antigua_qsa))\n\n s = s2\n a = a2\n\n deltas.append(cambioMayor)\n t += 1\n\n plt.plot(deltas)\n plt.show()\n\n politica = {}\n V = {}\n\n for s in grid.acciones:\n v,a = MaxQ(Q[s])\n politica[s] = a\n V[s] = v\n\n print(\"Política final\")\n mostrar_politica(politica,grid)\n\n print(\"Valor final\")\n mostrar_valores(V,grid)\n","sub_path":"Código/_9_TD/Q-Learning.py","file_name":"Q-Learning.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"331457316","text":"from gevent import monkey\nmonkey.patch_all()\n\nimport gevent\nimport grpc\nimport grpc.experimental.gevent as grpc_gevent\ngrpc_gevent.init_gevent()\n\nfrom gevent.queue import Queue\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), *(['..' + os.sep] * 1))))\n\nfrom pymongo import MongoClient\nfrom datetime import datetime, timedelta\n\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom google.protobuf.empty_pb2 import Empty\n\nfrom stock_service import stock_provider_pb2_grpc\nfrom stock_service import stock_provider_pb2\n\n\n\nAT_ONCE_SECONDS = 60\n\nSTOPPED = 0\nREQUEST_START = 1\nSTARTED = 2\nREQUEST_FINISH = 3\n\nsimulation_status = STOPPED\ndeliver_greenlet = None\ncollect_greenlet = None\nmsg_queue = Queue()\n\n\nclass RequestIterator(object):\n def __init__(self):\n self.queue = Queue()\n\n def __iter__(self):\n return self\n\n def _next(self):\n data = self.queue.get(True)\n if data[0] == stock_provider_pb2.SimulationMsgType.MSG_TICK:\n return stock_provider_pb2.SimulationMsg(msgtype=data[0], tick=data[1])\n elif data[0] == stock_provider_pb2.SimulationMsgType.MSG_BIDASK:\n return stock_provider_pb2.SimulationMsg(msgtype=data[0], bidask=data[1])\n elif data[0] == stock_provider_pb2.SimulationMsgType.MSG_SUBJECT:\n return stock_provider_pb2.SimulationMsg(msgtype=data[0], subject=data[1])\n elif data[0] == stock_provider_pb2.SimulationMsgType.MSG_ALARM:\n return stock_provider_pb2.SimulationMsg(msgtype=data[0], alarm=data[1])\n\n print('Unknown Message')\n return stock_provider_pb2.SimulationMsg()\n\n\n def __next__(self):\n return self._next()\n\n def next(self):\n return self._next()\n\n def append_tick(self, tick):\n self.queue.put_nowait((stock_provider_pb2.SimulationMsgType.MSG_TICK, tick))\n\n def append_subject(self, subject):\n self.queue.put_nowait((stock_provider_pb2.SimulationMsgType.MSG_SUBJECT, subject))\n\n def append_bidask(self, bidask):\n self.queue.put_nowait((stock_provider_pb2.SimulationMsgType.MSG_BIDASK, bidask))\n\n def append_alarm(self, alarm):\n self.queue.put_nowait((stock_provider_pb2.SimulationMsgType.MSG_ALARM, alarm))\n\n\nrequest_iterator = RequestIterator()\n\n\n\ndef tick_to_grpc(tick):\n tick_date = Timestamp()\n tick_date.FromDatetime(tick['date'] - timedelta(hours=9))\n code = tick['code']\n\n tick_data = stock_provider_pb2.CybosTickData(tick_date=tick_date,\n code=code,\n company_name=tick['1'],\n yesterday_diff=tick['2'],\n time=tick['3'],\n start_price=int(tick['4']),\n highest_price=int(tick['5']),\n lowest_price=int(tick['6']),\n ask_price=int(tick['7']),\n bid_price=int(tick['8']),\n cum_volume=tick['9'],\n cum_amount=tick['10'],\n current_price=int(tick['13']),\n buy_or_sell=(tick['14'] == ord('1')),\n cum_sell_volume_by_price=tick['15'],\n cum_buy_volume_by_price=tick['16'],\n volume=tick['17'],\n time_with_sec=tick['18'],\n market_type_exp=tick['19'],\n market_type=tick['20'],\n out_time_volume=tick['21'],\n cum_sell_volume=tick['27'],\n cum_buy_volume=tick['28'])\n # let handle is_kospi in stock_service\n return tick_data\n\n\ndef subject_to_grpc(tick):\n tick_date = Timestamp()\n tick_date.FromDatetime(tick['date'] - timedelta(hours=9))\n code = tick['code']\n tick_data = stock_provider_pb2.CybosSubjectTickData(tick_date=tick_date,\n time=tick['0'],\n name=tick['1'],\n code=code,\n company_name=tick['3'],\n buy_or_sell=(tick['4'] == ord('2')),\n volume=tick['5'],\n total_volume=tick['6'],\n foreigner_total_volume=tick['8'])\n return tick_data\n\n\ndef bidask_to_grpc(tick):\n tick_date = Timestamp()\n tick_date.FromDatetime(tick['date'] - timedelta(hours=9))\n code = tick['code']\n if 'time' in tick:\n return stock_provider_pb2.CybosBidAskTickData(tick_date=tick_date,\n code=code,\n time=tick['time'],\n volume=tick['volume'],\n bid_prices=tick['bid_prices'],\n ask_prices=tick['ask_prices'],\n bid_remains=tick['bid_remains'],\n ask_remains=tick['ask_remains'],\n total_ask_remain=tick['total_ask_remain'],\n total_bid_remain=tick['total_bid_remain'],\n out_time_total_ask_remain=tick['uni_ask_remain'],\n out_time_total_bid_remain=tick['uni_bid_remain'])\n\n bidask = stock_provider_pb2.CybosBidAskTickData(tick_date=tick_date,\n code=code,\n time=tick['1'],\n volume=tick['2'],\n total_ask_remain=tick['23'],\n total_bid_remain=tick['24'],\n out_time_total_ask_remain=tick['25'],\n out_time_total_bid_remain=tick['26'])\n for i in range(3, 19+1, 4):\n if tick[str(i+1)] > 0:\n bidask.bid_prices.append(tick[str(i+1)])\n bidask.bid_remains.append(tick[str(i+3)])\n\n if tick[str(i)] > 0:\n bidask.ask_prices.append(tick[str(i)])\n bidask.ask_remains.append(tick[str(i+2)])\n\n for i in range(27, 43+1, 4):\n if tick[str(i+1)] > 0:\n bidask.bid_prices.append(tick[str(i+1)])\n bidask.bid_remains.append(tick[str(i+3)])\n\n if tick[str(i)] > 0:\n bidask.ask_prices.append(tick[str(i)])\n bidask.ask_remains.append(tick[str(i+2)])\n return bidask\n\n\ndef alarm_to_grpc(tick):\n tick_date = Timestamp()\n tick_date.FromDatetime(tick['date'] - timedelta(hours=9))\n code = tick['code'] if 'code' in tick else tick['3']\n\n tick_data = stock_provider_pb2.CybosStockAlarm(tick_date=tick_date,\n time=tick['0'],\n type_category=tick['1'],\n market_category=tick['2'],\n code=code,\n alarm_category=tick['4'],\n title=tick['5'],\n content=tick['6'])\n return tick_data\n\n\ndef collect_db(db, from_time, until_time):\n collection_name = 'T' + from_time.strftime('%Y%m%d')\n return list(db[collection_name].find({'date': {'$gt': from_time, '$lte': until_time}}))\n\n\ndef tick_sender(tick_queue, speed):\n while simulation_status == STARTED:\n try:\n data = tick_queue.get(True, 1)\n except gevent.queue.Empty as ge:\n print('deliver tick queue empty')\n continue\n\n print('put ticks', len(data), 'speed', speed)\n now = datetime.now()\n datatime = None\n last_datatime = None\n #timeadjust = timedelta(seconds=0)\n for d in data:\n if simulation_status != STARTED:\n break\n \n d_date = d['date']\n if datatime is None:\n datatime = d_date\n last_datatime = datatime\n \n while (d_date - datatime) * speed > datetime.now() - now:\n gevent.sleep(((d_date - datatime) * speed - (datetime.now() - now)).microseconds / 1000000)\n now = datetime.now()\n\n if d['type'] == 'subject':\n request_iterator.append_subject(subject_to_grpc(d))\n elif d['type'] == 'bidask':\n request_iterator.append_bidask(bidask_to_grpc(d))\n elif d['type'] == 'tick':\n request_iterator.append_tick(tick_to_grpc(d))\n elif d['type'] == 'alarm':\n request_iterator.append_alarm(alarm_to_grpc(d))\n else:\n continue\n\n if d_date - last_datatime > timedelta(seconds=1):\n tick_date = Timestamp()\n tick_date.FromDatetime(d_date - timedelta(hours=9))\n stub.SetCurrentDateTime(tick_date)\n last_datatime = d_date\n\n datatime = d_date\n\n \n print('exit tick sender')\n\n\ndef start_tick(simulation_datetime, speed):\n global simulation_status \n\n tick_queue = Queue(3)\n db = MongoClient('mongodb://127.0.0.1:27017').trade_alarm\n finish_time = simulation_datetime.replace(hour=15, minute=30)\n simulation_status = STARTED\n deliver_greenlet = gevent.spawn(tick_sender, tick_queue, speed)\n stub.SetSimulationStatus(stock_provider_pb2.SimulationStatus(simulation_on=True,\n simulation_speed=speed))\n\n while simulation_datetime <= finish_time and simulation_status == STARTED:\n print('load data', simulation_datetime, 'data period seconds', AT_ONCE_SECONDS, 'real time', datetime.now())\n data = collect_db(db, simulation_datetime, simulation_datetime + timedelta(seconds=AT_ONCE_SECONDS))\n\n while True:\n try:\n tick_queue.put(data, True, 1)\n break\n except gevent.queue.Full as ge:\n if simulation_status != STARTED:\n print('Queue Full and exit simulation')\n break\n\n simulation_datetime += timedelta(seconds=AT_ONCE_SECONDS)\n gevent.sleep()\n print('load done', simulation_datetime, 'tick len', len(data), 'real time', datetime.now())\n\n simulation_status = REQUEST_FINISH\n while not deliver_greenlet.dead:\n gevent.sleep(1)\n\n simulation_status = STOPPED\n stub.SetSimulationStatus(stock_provider_pb2.SimulationStatus(simulation_on=False,\n simulation_speed=speed))\n \n\ndef operation_subscriber():\n global simulation_status, collect_greenlet\n response = stub.ListenSimulationOperation(Empty())\n for msg in response:\n print('Receive Msg', 'speed', msg.speed, 'datetime', msg.start_datetime.ToDatetime() + timedelta(hours=9), 'status', simulation_status)\n if msg.is_on and simulation_status == STOPPED: \n simulation_status = REQUEST_START\n collect_greenlet = gevent.spawn(start_tick, msg.start_datetime.ToDatetime() + timedelta(hours=9), msg.speed)\n elif msg.is_on and simulation_status == REQUEST_START:\n print('PREPARING SIMULATION')\n elif not msg.is_on and simulation_status == STARTED:\n print('STOP SIMULATION')\n simulation_status = REQUEST_FINISH\n elif not msg.is_on and simulation_status == REQUEST_FINISH:\n print('FINALIZING SIMULATION')\n \n\n\ndef simulation_data_sender(stub):\n responses = stub.SimulationData(request_iterator)\n for response in responses:\n pass\n print('simulation data sender done')\n\ndef run():\n global stub\n with grpc.insecure_channel('localhost:50052') as channel: \n subscribe_handlers = []\n stub = stock_provider_pb2_grpc.StockStub(channel)\n subscribe_handlers.append(gevent.spawn(operation_subscriber))\n simulation_data_sender(stub)\n gevent.joinall(subscribe_handlers)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"stock_service/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":13034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"226887936","text":"def verifica_progressao(lista):\n if len(lista) == 0:\n return 'NA'\n pa = True\n pg = True\n zero = False\n ret = 'NA'\n for i in lista:\n if i == 0:\n zero = True\n if zero == False:\n soma = lista[1]-lista[0]\n mult = lista[1]/lista[0]\n for n in len(lista)-1:\n if lista[n+1]-lista[n] != soma:\n pa = False\n if lista[n+1]/lista[n] != mult:\n pg = False\n if pg == True and pa == True:\n ret = 'AG'\n elif pg == True:\n ret = 'PG'\n elif pa == True:\n ret = 'PA'\n else:\n ret = 'NA'\n else:\n soma = lista[1]-lista[0]\n for i in len(lista)-1:\n if lista[n+1]-lista[n] != soma:\n pa = False\n if pa == True:\n ret = 'PA'\n else:\n ret = 'NA'\n return ret\n \n \n \n\n ","sub_path":"backup/user_202/ch57_2020_04_13_18_17_53_715842.py","file_name":"ch57_2020_04_13_18_17_53_715842.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"136141330","text":"import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nfrom tqdm import tqdm\nfrom tcvae.localconfig import LocalConfig\nfrom tcvae.train import get_inputs, get_all_measures\nfrom tcvae.dataset import get_dataset\n\n\nif __name__ == \"__main__\":\n conf = LocalConfig()\n conf.batch_size = 1\n\n print(\"Loading dataset\")\n\n train, valid, test = get_dataset(conf)\n dataset = train.concatenate(valid)\n dataset = dataset.concatenate(test)\n\n print(\"Dataset loaded\")\n\n f = open(\"heuristic_measures_stats.csv\", \"w\")\n\n print(\"Starting computing measures\")\n\n for batch in tqdm(iter(dataset)):\n inputs = get_inputs(batch)\n sample_name = batch[\"sample_name\"][0][0].numpy().decode()\n measures = list(get_all_measures(batch, conf)[0].numpy())\n measures_str = \",\".join([str(m) for m in measures])\n out_string = f\"{sample_name},{measures_str}\\n\"\n f.write(out_string)\n\n f.close()\n\n print(\"Finished\")\n","sub_path":"timbre_conditioned_vae/compute_measure_stats.py","file_name":"compute_measure_stats.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"83737780","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/6/18 22:45\n# @Author : W-gh\n# @Site : 文件批量重命名\n# @File : test5.py\n# @Software: PyCharm\n\nimport PySimpleGUI as sg\nfrom hashlib import sha1\nimport os, shutil\n\n\ndef gui():\n layout = [\n [sg.Text('你选择的文件夹是:', font=(\"宋体\", 10)), sg.Text('', key='text1', size=(50, 1), font=(\"宋体\", 10))],\n [sg.Text('前缀', font=(\"宋体\", 10)), sg.Input(key='prefix', size=(20, 1), font=(\"宋体\", 10)),\n sg.Text('后缀', font=(\"宋体\", 10)), sg.Input(key='suffix', size=(20, 1), font=(\"宋体\", 10))],\n [sg.Text('程序运行记录', justification='center')],\n [sg.Output(size=(70, 20), font=(\"宋体\", 10))],\n [sg.FolderBrowse('打开文件夹', key='folder', target='text1'), sg.Button('重命名'), sg.Button('关闭')]\n ]\n\n window = sg.Window('修改图片的工具箱', layout, font=(\"宋体\", 15), default_element_size=(50, 1))\n\n while True:\n event, values = window.read()\n if event in (None, '关闭'): # 如果用户关闭窗口或点击`关闭`\n break\n if event == '重命名':\n if values['folder']:\n print('{0}正在重命名原文件为hash值{0}'.format('*' * 10))\n mult_rename(values['folder'], values['prefix'], values['suffix'])\n print('{0}重命名完毕{0}'.format('*' * 10))\n else:\n print('请先选择文件夹')\n\n window.close()\n\n\ndef mult_rename(dir_path, prefix, suffix): # 批量重命名\n '''\n 批量文件重命名\n Args:\n dir_path: 文件路径\n prefix: 前缀\n suffix: 后缀\n\n Returns:\n\n '''\n for file in os.listdir(dir_path):\n file_path = os.path.join(dir_path, file)\n if not os.path.isdir(file_path): # 判断是否为文件夹\n file_name = os.path.basename(file_path).split('.')[0]\n pic_hash = str(prefix)+file_name+str(suffix)\n last = file[file.rindex(r'.'):] # 后缀\n new_name = pic_hash + last\n if file == new_name:\n print(file, '无需修改')\n else:\n try:\n new_path = os.path.join(dir_path, new_name)\n os.rename(file_path, new_path)\n print('{0}已重命名为{1}'.format(file, new_name))\n except FileExistsError:\n repeat_path = dir_path + r'\\重复文件夹'\n if os.path.exists(repeat_path) == False:\n os.makedirs(repeat_path)\n new_path = os.path.join(repeat_path, new_name)\n shutil.move(file_path, new_path)\n print(r'{0}文件重复,已移至重复文件夹下'.format(file))\n\n\ndef main():\n gui()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"28262317","text":"#! /usr/bin/env python3\n\nfrom functools import partial\nfrom typing import List, Dict, Tuple\n\nimport rospy\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom std_msgs.msg import String, Header\n\nfrom task_manager_msgs.msg import ScheduleItem, ScheduleUpdated\nfrom task_manager_msgs.srv import GetSchedule, GetScheduleResponse\nfrom commons_msgs.msg import Goal\nfrom task_allocator_msgs.msg import Confirmation\n\n# Node name\nNODE_NAME = 'task_manager'\n\n# Topics and services\nTASK_COMPLETED_TOPIC = 'task_completed'\nSCHEDULE_UPDATED_TOPIC = 'schedule_updated'\nCONFIRMATION_TOPIC = 'confirmation'\nGET_SCHEDULE_SVC = 'get_schedule'\nGOAL_ATTAINED_TOPIC = '/goal_attained'\n\n\nclass TaskManager:\n \"\"\"\n Node responsible for managing the tasks to be executed by a robot\n \"\"\"\n\n def __init__(self, robot_id: str, initial_x: float, initial_y: float):\n self.robot_id = robot_id\n self.location: Tuple[float, float] = (initial_x, initial_y)\n self.schedule_order: List[str] = []\n self.schedule: Dict[str, ScheduleItem] = dict()\n self.cost = 0\n self.total_seen_tasks = 0\n self.virtual_task_timeout = 60\n\n self.confirmation_sub = None\n self.robot_location_sub = None\n self.completed_sub = None\n self.agenda_srv = None\n self.completed_pub = None\n self.schedule_updated_pub = None\n\n self.__init_subscriptions()\n self.__init_publishers()\n\n rospy.loginfo(f\"[{robot_id}][{NODE_NAME}] node is ready - \"\n f\"\\n\\tlistening for new tasks on '{self.confirmation_sub.resolved_name}'\"\n f\"\\n\\tlistening task completion on '{self.completed_sub.resolved_name}'\"\n f\"\\n\\tpublishing schedule updates on '{self.schedule_updated_pub.resolved_name}'\"\n f\"\\n\\tserving schedule requests at {self.agenda_srv.resolved_name}\")\n rospy.spin()\n\n def __init_subscriptions(self):\n self.confirmation_sub = rospy.Subscriber(CONFIRMATION_TOPIC, Confirmation, self.add_to_schedule_cb)\n self.robot_location_sub = rospy.Subscriber('/' + self.robot_id + '/amcl_pose', PoseWithCovarianceStamped,\n self.update_location_cb)\n self.completed_sub = rospy.Subscriber(TASK_COMPLETED_TOPIC, String, self.delete_task_cb)\n\n def __init_publishers(self):\n self.goal_attained_pub = rospy.Publisher(GOAL_ATTAINED_TOPIC, Goal, queue_size=1)\n self.schedule_updated_pub = rospy.Publisher(SCHEDULE_UPDATED_TOPIC, ScheduleUpdated, queue_size=1)\n\n self.agenda_srv = rospy.Service(GET_SCHEDULE_SVC, GetSchedule, self.get_schedule_cb)\n\n def __new_id(self) -> str:\n self.total_seen_tasks += 1\n return f'{self.robot_id}_task_{self.total_seen_tasks}'\n\n def update_location_cb(self, robot_location: PoseWithCovarianceStamped):\n self.location = (robot_location.pose.pose.position.x, robot_location.pose.pose.position.y)\n\n def delete_task_cb(self, task_id: String) -> None:\n rospy.logdebug(f\"[{self.robot_id}][{NODE_NAME}] Removing task with id {task_id} from schedule\")\n\n try:\n self.schedule_order.remove(task_id.data)\n except ValueError:\n pass\n try:\n self.cost -= self.schedule[task_id.data].edge_cost_in\n\n # Notify the Goal Manager about goal succeeded\n self.goal_attained_pub.publish(self.schedule[task_id.data].task)\n\n del self.schedule[task_id.data]\n except KeyError:\n pass\n\n def clear_virtual_task(self, task_id: str, _=None):\n self.delete_task_cb(String(task_id))\n self.schedule_updated_pub.publish(schedule=[self.schedule[item_id] for item_id in self.schedule_order])\n\n def add_to_schedule_cb(self, confirmation: Confirmation) -> None:\n rospy.logdebug(\n f\"[{self.robot_id}][{NODE_NAME}] Received confirmation of task at [{confirmation.Bid.task.x},\"\n f\"{confirmation.Bid.task.y}] addressed to {confirmation.robot_id}: {confirmation.Bid}\")\n\n if self.robot_id != confirmation.robot_id:\n rospy.logdebug(f\"[{self.robot_id}][{NODE_NAME}] Not the intended recipient.\")\n return\n\n bid = confirmation.Bid\n schedule_item = ScheduleItem(task=bid.task, edge_cost_in=bid.edge_cost_in, id=self.__new_id())\n self.schedule[schedule_item.id] = schedule_item\n\n if bid.after or len(self.schedule_order) == 0:\n # flagged as \"insert at the end\" or agenda empty --> just append, no edge was removed\n # don't handle the case where the agenda has changed since the bid was made\n self.schedule_order.append(schedule_item.id)\n self.cost += schedule_item.edge_cost_in\n rospy.loginfo(f\"[{self.robot_id}][{NODE_NAME}] task insertion at the end - new cost: {self.cost}\")\n\n else:\n # insert into agenda at the appropriate point\n try:\n insert_at_index = self.schedule_order.index(bid.insert_at_id)\n except ValueError:\n # assume insertion point has already been processed --> insert at the front\n rospy.logwarn(\n f\"[{self.robot_id}][{NODE_NAME}] insertion point not found - assume already completed and \"\n f\"inserting at front!\")\n insert_at_index = 0\n bid.insert_at_id = self.schedule_order[0]\n insert_at_item = self.schedule[bid.insert_at_id]\n\n # update cost: remove old incoming edge, add cost for new edges and assign insertion point new edge cost\n self.cost = self.cost - insert_at_item.edge_cost_in + bid.edge_cost_out + schedule_item.edge_cost_in\n insert_at_item.edge_cost_in = bid.edge_cost_out\n\n self.schedule_order.insert(insert_at_index, schedule_item.id)\n rospy.loginfo(\n f\"[{self.robot_id}][{NODE_NAME}] insert task task at {insert_at_index} - new cost: {self.cost},\"\n f\" new length: {len(self.schedule_order)}\")\n\n self.schedule_updated_pub.publish(schedule=[self.schedule[item_id] for item_id in self.schedule_order])\n if confirmation.Bid.task.is_virtual:\n rospy.Timer(rospy.Duration(self.virtual_task_timeout), partial(self.clear_virtual_task, schedule_item.id))\n\n def get_schedule_cb(self, _) -> GetScheduleResponse:\n rospy.logdebug(f\"[{self.robot_id}][{NODE_NAME}] Returning agenda of length {len(self.schedule_order)}\")\n\n robot_location = ScheduleItem(edge_cost_in=0, id='robot_location',\n task=Goal(\n header=Header(frame_id='map', stamp=rospy.get_rostime()),\n x=self.location[0], y=self.location[1], is_virtual=False))\n return GetScheduleResponse(\n schedule=[robot_location] + [self.schedule[item_id] for item_id in self.schedule_order],\n cost=self.cost)\n\n\nif __name__ == '__main__':\n rospy.init_node(NODE_NAME, anonymous=True)\n robot_id = rospy.get_param('~robot_id', 'robot_0')\n initial_x = rospy.get_param('~initial_x', 'robot_0')\n initial_y = rospy.get_param('~initial_y', 'robot_0')\n\n TaskManager(robot_id, initial_x, initial_y)","sub_path":"ros/src/task_manager/scripts/TaskManager.py","file_name":"TaskManager.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"363257931","text":"from django.shortcuts import render\n\nimport commonware\n\nfrom gameon.submissions.models import Category, Entry, Challenge\nfrom gameon.events.models import Event\n\n\nlog = commonware.log.getLogger('playdoh')\n\n\ndef home(request, template='static_site/landing.html'):\n upcoming_events = Event.objects.get_upcoming()\n current_challenge = Challenge.objects.get_current_challenge()\n\n if not current_challenge.announce_winners:\n data = {\n 'events': upcoming_events.order_by('start_date')[:5],\n 'num_events': upcoming_events.count(),\n }\n else:\n data = {\n 'winners': {\n 'champ': Entry.objects.get(is_grand_champ=True),\n 'best_hack': Entry.objects.get(award=\"best-hack\"),\n 'best_device': Entry.objects.get(award=\"best-device\"),\n 'best_web': Entry.objects.get(award=\"best-web\"),\n }\n }\n template = 'static_site/closed.html'\n\n return render(request, template, data)\n\n\ndef rules(request, template='static_site/rules.html'):\n data = {\n 'categories': Category.objects.all().order_by('name'),\n }\n return render(request, template, data)\n\n\ndef judges(request, template='static_site/judges.html'):\n data = {}\n return render(request, template, data)\n\n\ndef judging(request, template='static_site/judging.html'):\n data = {}\n return render(request, template, data)\n\n\ndef prizes(request, template='static_site/prizes.html'):\n data = {\n 'categories': Category.objects.all().order_by('name'),\n }\n return render(request, template, data)\n\n\ndef resources(request, template='static_site/resources.html'):\n data = {}\n return render(request, template, data)\n\n\ndef legal(request, template='static_site/legal.html'):\n data = {}\n return render(request, template, data)\n\n\ndef faqs(request, template='static_site/faqs.html'):\n data = {}\n return render(request, template, data)\n","sub_path":"gameon/static_site/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"207612111","text":"#!/usr/bin/env python3\r\n\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom config import email\r\n\r\ndef SendEmail(message,subject,toad):\r\n sender='admin@bishop-web.com'\r\n to=toad\r\n mail_coding = 'windows-1251'\r\n #-----------------------\r\n msg=MIMEMultipart()\r\n msg['Subject']=subject\r\n msg['From']=sender\r\n msg['To']=to\r\n #-----------------------\r\n MIME_text=MIMEText(message.encode('cp1251',errors='ignore'),'plain',mail_coding)\r\n msg.set_charset(mail_coding)\r\n msg.attach(MIME_text)\r\n #-----------------------\r\n serv_host='smtp.bishop-web.com'\r\n port='587'\r\n #-----------------------\r\n log=email['login']\r\n pas=email['password']\r\n #-----------------------\r\n fro='admin@bishop-web.com'\r\n #-----------------------\r\n server=smtplib.SMTP(serv_host,port)\r\n server.starttls()\r\n server.login(log,pas)\r\n server.sendmail(fro, toad, msg.as_string())\r\n server.quit()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Bishop/Bishop_parser/Lsmtp.py","file_name":"Lsmtp.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"173015149","text":"import psycopg2\nimport secret\n\n\ntry:\n\tconn = psycopg2.connect(secret.string)\n\tcur = conn.cursor()\nexcept Exception as e:\n\tprint(\"Uh oh, can't connect. Invalid dbname, user or password?\")\n\tprint(e)\n\ndef add_po(data):\n # TODO add purchased_at TIMESTAMP\n sql_statement = \"\"\"\n\t INSERT INTO orders\n\t (po_number, user_id, purchased_at, user_addr_id)\n\t VALUES\n\t (%s,%s,%s,%s)\n\t RETURNING \"id\";\n \"\"\"\n cur.execute(sql_statement, data)\n order_id = cur.fetchone()[0]\n conn.commit()\n return order_id\n\ndef products_add_order(data):\n\tsql_statement = \"\"\"\n\tUPDATE user_product\n\tSET order_id = %s\n\tWHERE user_id = %s\n\tAND order_id = 1;\n\t\"\"\"\n\tcur.execute(sql_statement, data)\n\tconn.commit()\n\ndef get_all(data):\n\tsql_statement = \"\"\"\n\tSELECT po_number, purchased_at\n\tFROM orders\n\tWHERE user_id = %s;\n\t\"\"\"\n\tcur.execute(sql_statement, data)\n\torders_info = cur.fetchall()\n\treturn orders_info\n\ndef add_addr_to_order(data):\n\tuser_addr_id\n\tsql_statement = \"\"\"\n\tUPDATE orders\n\tSET user_addr_id = %s\n\tWHERE user_id = %s\n\tAND order_id = %s;\n\t\"\"\"\n\tcur.execute(sql_statement, data)\n\tconn.commit()\n\ndef get_products(data):\n\tsql_statement = \"\"\"\n\t\tSELECT products.id, products.seller_id,\n\t\tproducts.img_ext, products.price,\n\t\tproducts.name, products.description,\n\t\tproducts.updated_at, user_product.units,\n\t\tuser_product.added_at\n\t\tFROM user_product\n\t\tINNER JOIN products\n\t\tON user_product.product_id = products.id\n\t\tINNER JOIN orders\n\t\tON user_product.order_id = orders.id\n\t\tWHERE user_product.user_id = %s\n\t\tAND orders.po_number = %s;\n\t\"\"\"\n\tcur.execute(sql_statement, data)\n\tproduct_id = cur.fetchall()\n\treturn product_id\n\ndef get_payment_info(order_id, user_id):\n\t# get product arr\n\tsql_statement = \"\"\"\n\tSELECT user_product.product_id, \n\tuser_product.units, products.name, \n\tproducts.price, products.seller_id\n\tFROM user_product\n\tINNER JOIN products\n\tON user_product.product_id = products.id\n\tWHERE order_id = %s;\n\t\"\"\"\n\tcur.execute(sql_statement, (order_id,))\n\tproducts_info = cur.fetchall()\n\t# get email\n\torder_info = {}\n\torder_info[\"prods\"] = []\n\tfor product_info in products_info:\n\t\tsql_statement = \"\"\"\n\t\tSELECT email\n\t\tFROM seller_emails\n\t\tWHERE seller_id = %s;\n\t\t\"\"\"\n\t\tcur.execute(sql_statement, (product_info[4],))\n\t\tseller_emails = cur.fetchall()\n\t\tif len(seller_emails)>0:\n\t\t\tprint(\"::::::::::\",seller_emails)\n\t\t\torder_info[\"prods\"].append({\n\t\t\t\t'business_email': seller_emails[0][0],\n\t\t\t\t'item_name': product_info[2],\n\t\t\t\t'item_number': \"{}-{}\".format(product_info[0],product_info[1]),\n\t\t\t\t'ammount': float(int(product_info[1])*int(product_info[3]))/100\n\t\t\t\t})\n\tsql_statement = \"\"\"\n\tSELECT name,\n\taddress, address2,\n\tcity, state, zipcode\n\tFROM user_address\n\tWHERE user_id = %s;\n\t\"\"\"\n\tcur.execute(sql_statement, (user_id,))\n\tuser_info = cur.fetchone()\n\tif user_info[0]:\n\t\tfull_name = user_info[0].split(\" \")\n\t\tif len(full_name) > 1:\n\t\t\tfirst_name = full_name[0]\n\t\t\tlast_name = \" \".join(full_name[1:])\n\t\telse:\n\t\t\tfirst_name = full_name[0]\n\t\t\tlast_name = \" \"\n\telse:\n\t\tfirst_name = \" \"\n\t\tlast_name = \" \"\n\torder_info[\"usr\"] = {\n\t\t'first_name': first_name,\n\t\t'last_name': last_name,\n\t\t'address': user_info[1],\n\t\t'address2': user_info[2],\n\t\t'city': user_info[3],\n\t\t'state': user_info[4],\n\t\t'zip': user_info[5]\n\t}\n\treturn order_info","sub_path":"app/runtime/models/Orders.py","file_name":"Orders.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"40680657","text":"import os\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom dataset import TestDataset, Resizer_test, Normalizer_test, collater_test\nfrom src.efficientdet import EfficientDet\nfrom config import get_args\nfrom tqdm import tqdm\nimport pandas as pd\n\n\ndef test(opt):\n opt.resume = True\n test_set = TestDataset(opt.data_path, transform=transforms.Compose([Normalizer_test(), Resizer_test()]))\n\n opt.num_classes = test_set.num_classes\n opt.batch_size = opt.batch_size*4\n test_params = {\"batch_size\": opt.batch_size,\n \"shuffle\": False,\n \"drop_last\": False,\n \"collate_fn\": collater_test,\n \"num_workers\": 0}\n test_generator = DataLoader(test_set, **test_params)\n \n model = EfficientDet(opt)\n model.load_state_dict(torch.load(os.path.join(opt.pretrained_model, opt.network+'.pth')))\n model.cuda()\n model.set_is_training(False)\n model.eval()\n \n submission = {}\n submission['name'] = []\n submission['image_id'] = []\n submission['confidence'] = []\n submission['xmin'] = []\n submission['ymin'] = []\n submission['xmax'] = []\n submission['ymax'] = []\n\n progress_bar = tqdm(test_generator)\n progress_bar.set_description_str(' Testing')\n for i, data in enumerate(progress_bar):\n scale = data['scale']\n with torch.no_grad():\n output_list = model(data['img'].cuda().float())\n \n for j, output in enumerate(output_list):\n scores, labels, boxes = output\n\n if boxes.shape[0] == 0:\n continue\n \n boxes /= scale[j]\n imageName = test_set.getImageName(i*opt.batch_size+j)\n\n for box_id in range(boxes.shape[0]):\n pred_prob = float(scores[box_id])\n if pred_prob < opt.cls_threshold:\n break\n pred_label = int(labels[box_id])\n xmin, ymin, xmax, ymax = boxes[box_id, :].cpu().numpy()\n\n pred_label = test_set.index2label(pred_label)\n\n submission['name'].append(pred_label)\n submission['image_id'].append(imageName[:-4]+'.xml')\n submission['confidence'].append(pred_prob)\n submission['xmin'].append(xmin)\n submission['ymin'].append(ymin)\n submission['xmax'].append(xmax)\n submission['ymax'].append(ymax)\n\n pd.DataFrame(submission).to_csv('submisson.csv', index=False)\n\nif __name__ == \"__main__\":\n opt = get_args()\n test(opt)\n ","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"256683695","text":"# ==, <>, !=, <, >, <=, >=, not, and, or\r\n\r\n# score = 65\r\n# if score > 70:\r\n# print(\"good\")\r\n# else:\r\n# print(\"try harder\")\r\n\r\ndef greeting(lang):\r\n if lang == \"th\":\r\n print(\"sawadee\")\r\n else:\r\n print(\"hello\")\r\n\r\ndef greeting2(lang):\r\n if lang == \"th\":\r\n print(\"sawadee\")\r\n print(\"สวัสดี\")\r\n elif lang == \"jp\":\r\n print(\"konichiwa\")\r\n elif lang == \"kr\":\r\n print(\"ann-yeong\")\r\n else:\r\n print(\"hello\")\r\n\r\ndef meet_req(eng, interview):\r\n if eng > 70 and interview > 80:\r\n return True\r\n else:\r\n return False\r\n\r\ndef meet_req2(eng, interview, math):\r\n if eng > 70 and interview > 80 and math > 65:\r\n return True\r\n else:\r\n return False\r\n\r\n# # greeting(\"aa\")\r\n# # greeting2(\"th\")\r\n# # print(meet_req(80, 90))\r\n# # print(meet_req(80, 60))\r\n# print(meet_req2(80, 90, 70))","sub_path":"src/condition.py","file_name":"condition.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"218198576","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException, StaleElementReferenceException\nimport threading\nimport os\nimport tarfile\nimport gzip\nimport time\n\npage_counter = 1\n\ndef browser_setup():\n chromeOptions = webdriver.ChromeOptions()\n prefs = {\"download.default_directory\": \"/Users/{}/Desktop/TEXAS\".format(os.getlogin())}\n chromeOptions.add_experimental_option(\"prefs\", prefs)\n\n browser = webdriver.Chrome(executable_path = 'files/chromedriver.dms',\n chrome_options = chromeOptions) # fake Chrome browser mac\n # browser = webdriver.Chrome('C:\\\\Users\\Frozm\\PycharmProjects\\\\biologyScrapeData\\\\files\\win\\chromedriver.exe')\n return browser\n\ndef first_open_url(url):\n browser = browser_setup()\n browser.get(url)\n accept_terms(browser) #uncoment this\n\n # save activity in file\n global page_counter\n f = open('links.txt', 'a')\n print('Page {}'.format(page_counter))\n f.write('Page {}'.format(page_counter) + '\\n')\n f.close()\n\n get_items(browser)\n open_next_page(browser)\n\ndef accept_terms(browser):\n while True:\n try:\n accept_btn = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'css-oe4so')))\n #myElem = WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.NAME, 'q')))\n print(\"Terms are accepted\")\n accept_btn.click()\n except TimeoutException:\n print (\"Cant accept terms\")\n browser.refresh()\n continue\n break\n\ndef get_items(browser): # get links of all files\n while True:\n try:\n table = WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.TAG_NAME, 'tbody')))\n lines = table.find_elements_by_tag_name(\"tr\")\n for line in lines:\n if \"FPKM\" in line.find_elements_by_tag_name(\"td\")[2].find_element_by_tag_name(\"a\").get_attribute(\"innerHTML\"): # check name of file\n items_links.append(line.find_elements_by_tag_name(\"td\")[2].find_element_by_tag_name(\"a\").get_attribute(\"href\")) # add links\n f = open('links.txt', 'a')\n f.write(line.find_elements_by_tag_name(\"td\")[2].find_element_by_tag_name(\"a\").get_attribute(\"href\") + '\\n')\n f.close()\n\n f = open('lusc links.txt', 'a')\n f.write(line.find_elements_by_tag_name(\"td\")[2].find_element_by_tag_name(\"a\").get_attribute(\"href\") + '\\n')\n f.close()\n\n except TimeoutException:\n print(\"Cant get all items\")\n browser.refresh()\n continue\n break\n\n\ndef open_next_page(browser):\n runner = True\n while runner:\n try:\n buttons = WebDriverWait(browser, 20).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'test-pagination-link')))\n for btn in buttons:\n if btn.find_element_by_tag_name(\"button\").get_attribute(\"innerHTML\") == '›':\n global page_counter\n page_counter = page_counter + 1\n print('Page {}'.format(page_counter))\n f = open('links.txt','a')\n f.write('Page {}'.format(page_counter)+ '\\n')\n f.close()\n open_url(browser,btn.get_attribute(\"href\"))\n\n except TimeoutError:\n print(\"Cant go to next page\")\n browser.refresh()\n continue\n finally:\n runner = False\n break\n print(\"last element\")\n\ndef open_url(browser, url):\n browser.get(url)\n get_items(browser)\n open_next_page(browser)\n\ndef file_to_list(file):\n with open(file, \"r\") as fd:\n lines = fd.read().splitlines()\n return lines\n\ndef download_from_links(links,firstInd, thread=1):\n browser = browser_setup()\n terms = True\n for num, link in enumerate(links, start=firstInd):\n browser.get(link)\n if terms: # accept terms first time\n try:\n accept_terms(browser)\n terms = False\n except:\n pass\n try:\n download_btn = WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.CLASS_NAME, 'test-download-button')))\n download_btn.click()\n\n except TimeoutException:\n try:\n print(\"Thread {}. TimeoutException 1 for file {} \".format(thread, num))\n browser.refresh()\n download_btn = WebDriverWait(browser, 30).until(EC.presence_of_element_located((By.CLASS_NAME, 'test-download-button')))\n download_btn.click()\n except TimeoutException:\n try:\n print(\"Thread {}. TimeoutException 2 for file {} \".format(thread, num))\n browser.refresh()\n download_btn = WebDriverWait(browser, 30).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'test-download-button')))\n download_btn.click()\n except TimeoutException:\n try:\n print(\"Thread {}. TimeoutException 3 for file {} \".format(thread, num))\n browser.refresh()\n download_btn = WebDriverWait(browser, 40).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'test-download-button')))\n download_btn.click()\n except TimeoutException:\n try:\n print(\"Thread {}. TimeoutException 4 for file {} \".format(thread, num))\n browser.refresh()\n download_btn = WebDriverWait(browser, 40).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'test-download-button')))\n download_btn.click()\n except TimeoutException:\n print(\"Thread {}. TimeoutException 5 for file {} \".format(thread, num))\n browser.refresh()\n download_btn = WebDriverWait(browser, 50).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'test-download-button')))\n download_btn.click()\n except StaleElementReferenceException:\n print(\"Thread {}. StaleElementReferenceException for file {} \".format(thread, num))\n browser.refresh()\n download_btn = WebDriverWait(browser, 20).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'test-download-button')))\n download_btn.click()\n except StaleElementReferenceException:\n print(\"Thread {}. StaleElementReferenceException for file {} \".format(thread, num))\n browser.refresh()\n download_btn = WebDriverWait(browser, 20).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'test-download-button')))\n download_btn.click()\n except Exception as e:\n print(\"Thread {}. Common exception for file {} \".format(thread, num))\n browser.refresh()\n download_btn = WebDriverWait(browser, 20).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'test-download-button')))\n download_btn.click()\n\n print(\"Thread {}. File {} downloaded. Files left {}\".format(thread, num, firstInd+len(links)-num))\n\ndef assignFileNameToEntityID(browser, thread):\n global page_counter\n tableRows = WebDriverWait(browser, 20).until(EC.presence_of_all_elements_located((By.CLASS_NAME, ' css-14pzyw')))\n fileName = tableRows[1].find_elements_by_tag_name(\"td\")[0].get_attribute(\"innerHTML\")\n\n downstream = WebDriverWait(browser, 1).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'test-downstream-analyses')))\n # while(downstream[0].find_elements_by_tag_name(\"h2\")[0].get_attribute(\"innerHTML\") == 'No Downstream Analysis files found.'): # if no files found refresh page\n # browser.refresh()\n # time.sleep(5)\n # downstream = WebDriverWait(browser, 30).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'test-downstream-analyses')))\n # print(\"Case didn't load: \" + downstream[0].find_elements_by_tag_name(\"h2\")[0].get_attribute(\"innerHTML\"))\n\n try:\n entityTable = WebDriverWait(browser, 1).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'test-entity-table-wrapper')))\n run = True\n while run:\n if 'TCGA' in entityTable[2].find_elements_by_tag_name(\"a\")[0].get_attribute(\"innerHTML\"):\n entityID = entityTable[2].find_elements_by_tag_name(\"a\")[0].get_attribute(\"innerHTML\") # entityID\n run = False\n else:\n browser.refresh()\n entityTable = WebDriverWait(browser, 20).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'test-entity-table-wrapper')))\n entityID = entityTable[2].find_elements_by_tag_name(\"a\")[0].get_attribute(\"innerHTML\") # entityID\n\n except TimeoutException:\n browser.refresh()\n entityTable = WebDriverWait(browser, 30).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'test-entity-table-wrapper')))\n except:\n browser.refresh()\n entityTable = WebDriverWait(browser, 30).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'test-entity-table-wrapper')))\n\n file = \"fileWithEntityID\" + str(thread) + '.txt'\n f = open(file, 'a')\n f.write(fileName + \":\" + entityID + '\\n')\n f.close()\n print(str(page_counter) + \": \" + fileName + \":\" + entityID)\n page_counter = page_counter + 1\n\ndef assigning(links, firstInd, thread = 1):\n browser = browser_setup()\n for num, link in enumerate(links, start=firstInd):\n try:\n browser.get(link)\n try:\n accept_terms(browser)\n except:\n pass\n assignFileNameToEntityID(browser, thread)\n except:\n print(\"Thread {}. TimeoutException 1 for file {} \".format(thread, num))\n browser.get(link)\n accept_terms(browser)\n time.sleep(3)\n assignFileNameToEntityID(browser, thread)\n print(\"Thread {}. File {} downloaded. Files left {}\".format(thread, num, firstInd + len(links) - num))\n\ndef parallelAssigning(list):\n t = threading.Thread(target=assigning, args=(list[0: 396], 0, 1))\n t1 = threading.Thread(target=assigning, args=(list[397: 793], 397, 2))\n t2 = threading.Thread(target=assigning, args=(list[793:], 793, 3))\n\n t.start()\n t1.start()\n t2.start()\n\ndef downloading(list):\n t = threading.Thread(target=download_from_links, args=(list[0: 396], 0, 1))\n t1 = threading.Thread(target=download_from_links, args=(list[397: 793], 397, 2))\n t2 = threading.Thread(target=download_from_links, args=(list[793:], 793, 3))\n #t3 = threading.Thread(target=download_from_links, args=(list[894:], 894, 4))\n\n t.start()\n t1.start()\n t2.start()\n #t3.start()\n\ndef unzip_files(list):\n '''First unzipin'''\n for file in list:\n if (file.endswith(\"tar.gz\")):\n tar = tarfile.open(file, \"r:gz\")\n tar.extractall(path='/Users/frozmannik/Desktop/LUAD data/extracted')\n tar.close()\n elif (file.endswith(\"tar\")):\n tar = tarfile.open(file, \"r:\")\n tar.extractall(path='/Users/frozmannik/Desktop/LUAD data/extracted')\n tar.close()\n print(\"All files are unziped\")\n\ndef save_txt(folders, path):\n '''second unzip'''\n '''unzip files and save them in folder'''\n for folder in folders:\n if folder == '.DS_Store':\n print(\"DS STORE\")\n else:\n for file in os.listdir(folder):\n if file.endswith(\".gz\"):\n #i = i+1\n content = gzip.open(folder + \"/\" +file)\n data = content.read()\n with open(os.path.join(path, file[:-3]), \"wb\") as f: # write bytes to file\n f.write(data)\n print(\"All files are saved in {}\".format(path))\n\ndef save_links_without_page(list):\n with open('lusc links.txt', 'w') as f:\n for item in list:\n f.write(\"%s\\n\" % item)\n\nif __name__ == '__main__':\n items_links = []\n\n urlLUSC = 'https://portal.gdc.cancer.gov/repository?facetTab=files&files_size=100&filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.project.project_id%22%2C%22value%22%3A%5B%22TCGA-LUSC%22%5D%7D%7D%2C%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22files.data_category%22%2C%22value%22%3A%5B%22Transcriptome%20Profiling%22%5D%7D%7D%5D%7D&searchTableTab=files'\n urlLUSCLast = 'https://portal.gdc.cancer.gov/query?files_offset=2800&files_size=100&filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.project.project_id%22%2C%22value%22%3A%5B%22TCGA-LUAD%22%5D%7D%7D%2C%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22files.data_category%22%2C%22value%22%3A%5B%22Transcriptome%20Profiling%22%5D%7D%7D%5D%7D%5D%7D&query=cases.project.project_id%20in%20%5BTCGA-LUAD%5D%20and%20files.data_category%20in%20%5B%22Transcriptome%20Profiling%22%5D%20&searchTableTab=files'\n\n urlLUADLast ='https://portal.gdc.cancer.gov/query?files_offset=2900&files_size=100&filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.project.project_id%22%2C%22value%22%3A%5B%22TCGA-LUAD%22%5D%7D%7D%2C%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22files.data_category%22%2C%22value%22%3A%5B%22Transcriptome%20Profiling%22%5D%7D%7D%5D%7D%5D%7D&query=cases.project.project_id%20in%20%5BTCGA-LUAD%5D%20and%20files.data_category%20in%20%5B%22Transcriptome%20Profiling%22%5D%20&searchTableTab=files'\n urlLUAD = 'https://portal.gdc.cancer.gov/query?files_size=100&filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.project.project_id%22%2C%22value%22%3A%5B%22TCGA-LUAD%22%5D%7D%7D%2C%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22files.data_category%22%2C%22value%22%3A%5B%22Transcriptome%20Profiling%22%5D%7D%7D%5D%7D%5D%7D&query=cases.project.project_id%20in%20%5BTCGA-LUAD%5D%20and%20files.data_category%20in%20%5B%22Transcriptome%20Profiling%22%5D%20&searchTableTab=files'\n\n #first_open_url(urlLUSC)\n #links = file_to_list('/Users/frozmannik/PycharmProjects/biologyScrape/lusc links.txt')\n\n files = file_to_list(\"/Users/frozmannik/PycharmProjects/biologyScrape/mergedLUSC.txt\")\n # parallelAssigning(links)\n #assignFileNameToEntityID('https://portal.gdc.cancer.gov/files/a53757ce-a89e-47a3-bd07-0c996f323499')\n #first_open_url(urlLUADLast)\n # os.chdir('/Users/frozmannik/Desktop/LUAD data/extracted')\n #unzip_files(os.listdir('/Users/frozmannik/Desktop/LUAD data'))\n # path = '/Users/frozmannik/Desktop/LUAD data/extracted/files'\n #downloading(list)\n #print( os.listdir('/Users/frozmannik/Desktop/data/extracted'))\n #save_txt(os.listdir('/Users/frozmannik/Desktop/LUAD data/extracted'), path)\n # save_links_without_page(items_links)\n\n #print(len(os.listdir('/Users/frozmannik/Desktop/LUAD data')))\n dic = {}\n path = '/Users/frozmannik/Desktop/finaltxtLUSC/'\n os.chdir('/Users/frozmannik/Desktop/finaltxtLUSC') # set working directory\n for file in files:\n oldName, newName = file.split(\":\")[0],file.split(\":\")[1]\n #print(oldName +\" : \" + newName)\n if \"-UQ\" in oldName:\n newName = newName + \"-UQ\"\n try:\n os.rename(path+oldName[:-3],path+newName+\".txt\")\n except FileNotFoundError:\n print(\"file not found\")\n\n\n # print(os.listdir())\n print(\"end of execution\")\n #print(os.listdir('/Users/frozmannik/Desktop/LUAD data'))\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":16408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"623940595","text":"__author__ = 'nilay'\n\nimport json\n\nclass JF(object):\n def __init__(self):\n self.name = \"JSON Folding\"\n self.threshold = 20\n \n @staticmethod\n def preprocess(self,jsonobject):\n \"\"\"Check if JSON level 2 objects and below contain more dictionaries than threshold.\n Fold them as an array by adding new key value pair: \"key\":<actual key from unfolded json>.\"\"\"\n \n foldedJson = {}\n \n for key in jsonobject.keys():\n # Process only if dictionary value is also json \n if type(jsonobject[key]) == dict:\n # Process only if there are more than threshold objects\n if len(jsonobject[key].keys()) > self.threshold:\n #print len(jsonobject[key].keys())\n foldedData = []\n # Create array out of long dictionary object\n for innerkey in jsonobject[key].keys():\n innerdict = jsonobject[key][innerkey]\n innerdict.update({\"unfoldedKey\":innerkey})\n foldedData.append(innerdict)\n foldedJson[key] = foldedData\n else:\n foldedJson[key] = jsonobject[key]\n else:\n foldedJson[key] = jsonobject[key]\n return foldedJson\n\ndef main():\n\n jsonfilename = \"aac-objects\"\n jsonobject = json.load(open(jsonfilename+'.json','r'))\n jppInstance = JF()\n jsondata = JF.preprocess(jppInstance,jsonobject)\n json.dump(jsondata, open(jsonfilename+'_folded.json','w'),indent=4)\n \nif __name__ == \"__main__\":\n main()","sub_path":"aac-objects/json_folding.py","file_name":"json_folding.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"147439337","text":"\"\"\"\nUtilities to manage AWS Elastic Filesystem resources.\n\nTo delete EFS filesystems, use ``aegea rm``.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os, sys, argparse, getpass, base64\nfrom datetime import datetime\n\nfrom . import register_parser\nfrom .ls import add_name, filter_collection, filter_and_tabulate, register_listing_parser\nfrom .util import Timestamp, paginate\nfrom .util.printing import format_table, page_output, get_field, get_cell, tabulate\nfrom .util.aws import clients, ensure_vpc, ensure_subnet, encode_tags, make_waiter, ensure_security_group\nfrom .util.compat import lru_cache\n\ndef efs(args):\n efs_parser.print_help()\n\nefs_parser = register_parser(efs, help=\"Manage Elastic Filesystem resources\", description=__doc__,\n formatter_class=argparse.RawTextHelpFormatter)\n\ndef ls(args):\n table = []\n for filesystem in clients.efs.describe_file_systems()[\"FileSystems\"]:\n filesystem[\"tags\"] = clients.efs.describe_tags(FileSystemId=filesystem[\"FileSystemId\"])[\"Tags\"]\n for mount_target in clients.efs.describe_mount_targets(FileSystemId=filesystem[\"FileSystemId\"])[\"MountTargets\"]:\n mount_target.update(filesystem)\n table.append(mount_target)\n args.columns += args.mount_target_columns\n page_output(tabulate(table, args, cell_transforms={\"SizeInBytes\": lambda x, r: x.get(\"Value\") if x else None}))\n\nparser = register_listing_parser(ls, parent=efs_parser, help=\"List EFS filesystems\")\nparser.add_argument(\"--mount-target-columns\", nargs=\"+\")\n\ndef create(args):\n vpc = ensure_vpc()\n creation_token = base64.b64encode(bytearray(os.urandom(24))).decode()\n fs = clients.efs.create_file_system(CreationToken=creation_token, PerformanceMode=args.performance_mode)\n clients.efs.create_tags(FileSystemId=fs[\"FileSystemId\"], Tags=encode_tags(args.tags + [\"Name=\" + args.name]))\n waiter = make_waiter(clients.efs.describe_file_systems, \"FileSystems[].LifeCycleState\", \"available\", \"pathAny\")\n waiter.wait(FileSystemId=fs[\"FileSystemId\"])\n for subnet in vpc.subnets.all():\n clients.efs.create_mount_target(FileSystemId=fs[\"FileSystemId\"],\n SubnetId=subnet.id,\n SecurityGroups=[ensure_security_group(g, vpc).id for g in args.security_groups])\n return fs\n\nparser = register_parser(create, parent=efs_parser, help=\"Create an EFS filesystem\")\nparser.add_argument(\"name\")\nparser.add_argument(\"--performance-mode\", choices={\"generalPurpose\", \"maxIO\"}, default=\"generalPurpose\")\nparser.add_argument(\"--tags\", nargs=\"+\", default=[], metavar=\"NAME=VALUE\")\nparser.add_argument(\"--security-groups\", nargs=\"+\", default=[__name__])\n","sub_path":"aegea/efs.py","file_name":"efs.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"127809085","text":"import sys\nimport time\n\ndef bar(num, total):\n r = '\\r[%s%s]%d%%' % (\"=\" * num, \" \"*(total-num), num, )\n sys.stdout.write(r)\n sys.stdout.flush()\n\nN = 100\nTIME = 0.1\nif __name__ == '__main__':\n for i in range(0, N + 1):\n time.sleep(TIME)\n bar(i, N)\n","sub_path":"python_fun/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"45847529","text":"class Karyawan:\n nama_perusahaan = 'ABC'\n insentif_lembur = 250000\n def __init__(self, nama, usia, pendapatan):\n self.__nama = nama\n self.__usia = usia\n self.__pendapatan = pendapatan\n self.__pendapatan_tambahan = 0\n def lembur(self):\n insentif_lembur = self.__insentif_lembur\n if usia > 30:\n insentif_lembur *= 2\n self.__pendapatan_tambahan += insentif_lembur\n def tambahan_proyek(self, insentif_proyek):\n self.__pendapatan_tambahan += insentif_proyek\n def total_pendapatan(self):\n return self.__pendapatan + self.__pendapatan_tambahan\n\nkaryawan_1 = Karyawan('Kiki', 35, 8000000)\nkaryawan_1.lembur()\nkaryawan_1.tambahan_proyek(karyawan_1.total_pendapatan())\nprint(karyawan_1.total_pendapatan())\n\n\n'''\nPotongan kode di atas akan menghasilkan output.\n\nAns:\nPotongan kode gagal dijalankan.\n'''","sub_path":"01 Python for Data Professional Beginner/Python for Data Professional Beginner - Part 3/03 Encapsulation & Inheritance/03 encapsulation quiz2.py","file_name":"03 encapsulation quiz2.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"63577021","text":"# Alec Critten\r\n# Integer factorization program\r\n\r\nimport random\r\nimport math\r\n\r\n# Create an array to hold the prime factors\r\nfactors = []\r\n\r\ndef main():\r\n # Get a random long integer\r\n bigInt = random.getrandbits(30)\r\n\r\n print(\"Integer to be factored:\", bigInt, \"\\n\")\r\n\r\n # Create an array to store prime factors\r\n count = 1\r\n\r\n # The iterative loop to find primes\r\n while (count < bigInt):\r\n\r\n # Iniitially test if the number is even\r\n if count == 1:\r\n bigInt = divTwo(bigInt)\r\n print(\"largest factor: \", int(bigInt))\r\n\r\n # Test the number's primality. If prime, end loop and print factors (all primes found).\r\n if (isPrime(bigInt) & (bigInt != 1)):\r\n factors.append(bigInt)\r\n bigInt = 0\r\n elif ((bigInt % count == 0) & (count != 1)):\r\n bigInt = bigInt / count\r\n factors.append(count)\r\n print(\"Current largest factor:\", int(bigInt))\r\n count = 1\r\n\r\n # Count iterates by 2 to avoid evens\r\n count += 2\r\n\r\n # Visual indication of factor search\r\n if (count % 1000 == 0):\r\n print(\"(up to\", count, \"in the factor search)\")\r\n\r\n\r\n # OUTSIDE THE WHILE\r\n # add the remaining int and print results\r\n if (bigInt > 1):\r\n factors.append(bigInt)\r\n print(\"\\n\")\r\n print(\"here are the prime factors:\")\r\n for prime in factors:\r\n print(int(prime), \"\\t\")\r\n\r\n\r\n\r\n# Divides the number by two as long as it is even\r\ndef divTwo(num):\r\n while (num % 2 == 0):\r\n num = num / 2\r\n factors.append(num)\r\n return num\r\n\r\n# Tests the number's primality\r\ndef isPrime(num):\r\n for ct in range(2, math.floor(num / 2)):\r\n if (num % ct == 0):\r\n return False\r\n return True\r\n\r\n\r\nmain()\r\n","sub_path":"int_fact.py","file_name":"int_fact.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"364242016","text":"# dataset settings\ndataset_type = 'STAREDataset'\ndata_root = 'data/STARE'\nimg_scale = (605, 700)\ncrop_size = (128, 128)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='RandomResize',\n scale=img_scale,\n ratio_range=(0.5, 2.0),\n keep_ratio=True),\n dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(type='PackSegInputs')\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='Resize', scale=img_scale, keep_ratio=True),\n # add loading annotation after ``Resize`` because ground truth\n # does not need to do resize data transform\n dict(type='LoadAnnotations'),\n dict(type='PackSegInputs')\n]\nimg_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]\ntta_pipeline = [\n dict(type='LoadImageFromFile', backend_args=None),\n dict(\n type='TestTimeAug',\n transforms=[\n [\n dict(type='Resize', scale_factor=r, keep_ratio=True)\n for r in img_ratios\n ],\n [\n dict(type='RandomFlip', prob=0., direction='horizontal'),\n dict(type='RandomFlip', prob=1., direction='horizontal')\n ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]\n ])\n]\ntrain_dataloader = dict(\n batch_size=4,\n num_workers=4,\n persistent_workers=True,\n sampler=dict(type='InfiniteSampler', shuffle=True),\n dataset=dict(\n type='RepeatDataset',\n times=40000,\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_prefix=dict(\n img_path='images/training',\n seg_map_path='annotations/training'),\n pipeline=train_pipeline)))\nval_dataloader = dict(\n batch_size=1,\n num_workers=4,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_prefix=dict(\n img_path='images/validation',\n seg_map_path='annotations/validation'),\n pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(type='IoUMetric', iou_metrics=['mDice'])\ntest_evaluator = val_evaluator\n","sub_path":"configs/_base_/datasets/stare.py","file_name":"stare.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"639713435","text":"from __future__ import print_function\nfrom collections import namedtuple\nfrom decimal import *\nimport xml.etree.ElementTree as ET\nfrom pathbuilder import build_paths\nfrom timestampers import Timestamp, channel4, influx_ts\nfrom clients.influx import InfluxDatabaseClient, Datum, Data, Metadata, Record, Records, Tag\nimport time\n\nChannel4Record = namedtuple('Channel4Record', 'depth temperature timestamp')\n\n\ndef get_root(infile):\n tree = ET.parse(infile)\n root = tree.getroot()\n return root\n\n\ndef obtain_timestamps(root, schema):\n min_time = root.find(schema + 'wellLog').find(schema + 'minDateTimeIndex').text\n max_time = root.find(schema + 'wellLog').find(schema + 'maxDateTimeIndex').text\n min_timestamp = Timestamp(min_time, channel4)\n min_timestamp.adjust(1, \"microseconds\") # add one to avoid overlaps\n max_timestamp = Timestamp(max_time, channel4)\n max_timestamp.adjust(-1, \"microseconds\") # subtract one to avoid overlaps\n return min_timestamp, max_timestamp\n\n\ndef obtain_records(root, schema, min_timestamp, max_timestamp):\n records = []\n for measurement in root.find(schema + 'wellLog').find(schema + 'logData').findall(schema + 'data'):\n measurement_data = measurement.text.split(',')\n depth = measurement_data[0]\n temperature = measurement_data[3]\n records.append(Channel4Record(depth, temperature, min_timestamp))\n #records.append(Channel4Record(depth, temperature, max_timestamp))\n return records\n\ndef downsample_records(records, sample_rate):\n records = sorted(records, key = lambda x: Decimal(x.depth))\n mask = range(0, len(records), sample_rate)\n records = [records[i] for i in mask]\n return records\n\n\ndef obtain_sleep_time(min_timestamp, prev_min_time):\n nanosecond_to_second_conversion_factor = 1000000000\n prev = prev_min_time.to(influx_ts)/nanosecond_to_second_conversion_factor\n min = min_timestamp.to(influx_ts)/nanosecond_to_second_conversion_factor\n sleep_time = min - prev\n return sleep_time\n\n\ndef write(client, records, log, database='spirit'):\n client.database = database\n client.database.create()\n influx_records = []\n for record in records:\n tag = Tag(\"depth\", record.depth)\n datum = Datum(\"value\", record.temperature)\n data = Data([datum])\n metadata = Metadata(\"dts\", tags=[tag])\n influx_records.append(Record(data, metadata))\n print (\"writing {} records (in write function)\".format(len(influx_records)), file=log)\n print (len(influx_records))\n for i in range(len(records)/4000):\n client.write(Records(influx_records[4000*i:4000*(i+1)]))\n client.write(Records(influx_records[(len(records)/4000) * 4000:]))\n\n #client.write(Records(influx_records[:4000]))\n #client.write(Records(influx_records[4000:8000]))\n #client.write(Records(influx_records[8000:12000]))\n #client.write(Records(influx_records[12000:16000]))\n #client.write(Records(influx_records[16000:20000]))\n #client.write(Records(influx_records[20000:24000]))\n #client.write(Records(influx_records[24000:28000]))\n #client.write(Records(influx_records[28000:32000]))\n #client.write(Records(influx_records[32000:36000]))\n #client.write(Records(influx_records[36000:]))\n\n\nif __name__ == '__main__':\n log = open('generate_data_log.out', 'a')\n dts_schema = '{http://www.witsml.org/schemas/1series}'\n paths = build_paths([\"dts\"])\n paths = sorted(paths)\n #for path in paths:\n # print(path, file=log)\n client = InfluxDatabaseClient()\n prev_min_time = None\n sleep_time_scaler = 1200\n #sleep_time_scaler = 20\n for path in paths[28:]:\n log.flush()\n print(path, file=log)\n root = get_root(path)\n min_timestamp, max_timestamp = obtain_timestamps(root, dts_schema)\n records = obtain_records(root, dts_schema, min_timestamp, max_timestamp)\n records = downsample_records(records, sample_rate=100)\n print(len(records), file=log)\n if prev_min_time:\n sleep_time = obtain_sleep_time(min_timestamp, prev_min_time)\n print(str(sleep_time), file=log)\n print(\"sleeping for {} seconds\\nsleep_time={}, sleep time scaler = {}\".format(sleep_time/sleep_time_scaler, sleep_time, sleep_time_scaler))\n print(\"after sleep, will write {} records\".format(len(records)))\n time.sleep(sleep_time/sleep_time_scaler)\n log.write(\"writing {} records\".format(len(records)))\n write(client, records, log)\n prev_min_time = min_timestamp\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass Reader(object):\n def __init__(self):\n pass\n\n def read(self, infile):\n '''overrride this method'''\n pass\n\n\n\n\n\nclass XMLReader(Reader):\n def read(self, infile):\n self._root = self._get_root(infile)\n records = self._obtain_records (self._root)\n return records\n\n def _get_root(self, infile):\n tree = ET.parse(infile)\n root = tree.getroot()\n return root\n\n def _obtain_records(self, root):\n '''override this method'''\n pass\n\nclass DTSXMLReader(XMLReader):\n def __init__(self):\n super(DTSXMLReader, self).__init__()\n self.schema = '{http://www.witsml.org/schemas/1series}'\n\n def _obtain_records(self, root):\n records = []\n min_time = root.find(self.schema + 'wellLog').find(self.schema + 'minDateTimeIndex').text\n max_time = root.find(self.schema + 'wellLog').find(self.schema + 'maxDateTimeIndex').text\n for measurement in root.find(self.schema + 'wellLog').find(self.schema + 'logData').findall(self.schema + 'data'):\n measurement_data = measurement.text.split(',')\n min_timestamp = Timestamp(min_time, channel4)\n min_timestamp.adjust(1, \"microseconds\") #add one to avoid overlaps\n max_timestamp = Timestamp(max_time, channel4)\n max_timestamp.adjust(-1, \"microseconds\") #subtract one to avoid overlaps\n depth = measurement_data[0]\n temperature = measurement_data[3]\n records.append(Channel4Record(depth, temperature, min_timestamp))\n records.append(Channel4Record(depth, temperature, max_timestamp))\n return records\n","sub_path":"udf/agent/examples/spirit/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"368061845","text":"from HeavenEnemySelectionClass import *\nfrom DemonClass import *\nfrom RogueAngelClass import *\nfrom ArchangelMichaelClass import *\nfrom GODClass import *\nfrom LuciferClass import *\nfrom GhostSamuraiClass import *\nfrom LightningAngelClass import *\nfrom DragonKnightClass import *\nimport os\n\nclass Heaven(object):\n\tdef __init__(self, player):\n\t\tself.player = player\n\t\tself.playerAlive = True\n\t\tself.enemyAlive = True\n\t\tself.playerTurn = True\n\t\tself.EnemyTurn = False\n\t\tself.Turn = 0\n\t\tself.attack = 0\n\t\tself.GSOptions = ['1 = Physical Attack', '2 = Ethereal Blades']\n\t\tself.LAOptions = ['1 = Physical Attack', '2 = Chain Lightning']\n\t\tself.DKOptions = ['1 = Physical Attack', '2 = Implosion']\n\t\t\n\tdef ChooseEnemy(self, player):\n\t\tHES = HeavenEnemySelection()\n\t\tenemy = HES.ChooseEnemy()\n\t\tif enemy == None:\n\t\t\treturn player\n\t\telse:\n\t\t\tplayer = self.BattleEnemy(player, enemy)\n\t\t\treturn player\n\t\t\n\tdef BattleEnemy(self, player, enemy):\n\t\twhile (self.enemyAlive == True or self.playerAlive == True):\n\t\t\ttry:\n\t\t\t\tenemy = self.AttackOptions(player, enemy)\n\t\t\t\tif enemy.currentHP <= 0:\n\t\t\t\t\tos.system(\"cls\")\n\t\t\t\t\tprint (str(player.name) + \" has killed the \" + (str(enemy.name)) + \".\")\n\t\t\t\t\tplayer = enemy.DropEXP(player, enemy)\n\t\t\t\t\tself.playerAlive = False\n\t\t\t\t\tplayer = self.ChooseEnemy(player)\n\t\t\t\t\treturn player\n\t\t\t\tplayer = enemy.Attack(player, enemy)\t\n\t\t\t\tif player.currentHP <= 0:\n\t\t\t\t\tos.system(\"cls\")\n\t\t\t\t\tprint (str(player.name) + \"has died.\")\n\t\t\t\t\tif player.lives <= 0:\n\t\t\t\t\t\tprint(\"Game Over\")\n\t\t\t\t\t\tplayer.currentHP = 0\n\t\t\t\t\tif player.lives == 1:\n\t\t\t\t\t\tplayer.lives -= 1\n\t\t\t\t\t\tplayer.currentHP = player.MaxHP\n\t\t\t\t\t\tpause = input(str(player.name) + \" is out of lives!!\")\n\t\t\t\t\t\tplayer = self.ChooseEnemy(player)\n\t\t\t\t\t\tself.playerAlive = False\n\t\t\t\t\t\treturn player\n\t\t\texcept:\n\t\t\t\tprint (\"HP = \" + str(player.currentHP))\n\t\t\t\treturn player\n\t\t\t\t\n\t\t\t\t\t\t\n\tdef AttackOptions(self, player, enemy):\n\t\tDK = DragonKnight()\n\t\tGS = GhostSamurai()\n\t\tLA = LightningAngel()\n\t\tif type(player) == type(GS):\n\t\t\tprint (str(self.GSOptions))\n\t\t\tattackGS = input('Choose Your Attack : ')\n\t\t\tif attackGS == \"1\":\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tenemy = player.Attack(player, enemy)\n\t\t\t\treturn enemy\n\t\t\tif attackGS == \"2\":\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tplayer = player.SpendMP(player)\n\t\t\t\tif player.currentMP < 0:\n\t\t\t\t\tprint(str(player.name) + \" is out of MP. So \" + str(player.name) + \" attacks instead.\")\n\t\t\t\t\tenemy = player.Attack(player, enemy)\n\t\t\t\t\treturn enemy\n\t\t\t\telse:\n\t\t\t\t\tenemy = player.EtherealBlades(player, enemy)\t\n\t\t\t\t\treturn enemy\n\t\t\telse:\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tenemy = player.Attack(player, enemy)\n\t\t\t\treturn enemy\n\t\tif type(player) == type(LA):\n\t\t\tprint (str(self.LAOptions))\n\t\t\tattackLA = input('Choose Your Attack : ')\n\t\t\tif attackLA == \"1\":\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tenemy = player.Attack(player, enemy)\n\t\t\t\treturn enemy\n\t\t\tif attackLA == \"2\":\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tplayer = player.SpendMP(player)\n\t\t\t\tif player.currentMP < 0:\n\t\t\t\t\tprint(str(player.name) + \" is out of MP. So \" + str(player.name) + \" attacks instead.\")\n\t\t\t\t\tenemy = player.Attack(player, enemy)\n\t\t\t\t\treturn enemy\n\t\t\t\telse:\n\t\t\t\t\tenemy = player.ChainLightning(player, enemy)\n\t\t\t\t\treturn enemy\n\t\t\telse:\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tenemy = player.Attack(player, enemy)\n\t\t\t\treturn enemy\n\t\tif type(player) == type(DK):\n\t\t\tprint (str(self.DKOptions))\n\t\t\tattackDK = input('Choose Your Attack : ')\n\t\t\tif attackDK == \"1\":\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tenemy = player.Attack(player, enemy)\n\t\t\t\treturn enemy\n\t\t\tif attackDK == \"2\":\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tplayer = player.SpendMP(player)\n\t\t\t\tif player.currentMP < 0:\n\t\t\t\t\tprint(str(player.name) + \" is out of MP. So \" + str(player.name) + \" attacks instead.\")\n\t\t\t\t\tenemy = player.Attack(player, enemy)\n\t\t\t\t\treturn enemy\n\t\t\t\telse:\n\t\t\t\t\tenemy = player.Implosion(player, enemy)\t\n\t\t\t\t\treturn enemy\t\t\n\t\t\telse:\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tenemy = player.Attack(player, enemy)\n\t\t\t\treturn enemy","sub_path":"HeavenClass.py","file_name":"HeavenClass.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"393948564","text":"# configure this as the hub of the group\nimport os\n\nos.environ['NET_IS_HUB'] = 'True'\n\n# Importing the application code will automatically launch the peer and begin\n# listening for connection requests as well as set up the connection registry.\nimport app\n\nif __name__ == '__main__':\n print(\"Enter the message to send to the peers subscribed to you.\")\n while 1:\n # As you can imagine, this can be used anywhere in your application. In\n # this example, we are just going to take your message and broadcast it\n # to all the subscribed peers.\n your_message = input(\"Message: \")\n\n # This will trigger the \"myEvent\" that was wrapped on around this\n # function. When this is triggered, it will package up your message and\n # send it to the peers.\n app.something_happened(your_message)\n","sub_path":"examples/simple_subscription/hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"299645936","text":"from docx import Document\nfrom docx.shared import Inches\nfrom docx.enum.table import WD_TABLE_ALIGNMENT\nfrom docx.enum.table import WD_ALIGN_VERTICAL\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nfrom docx.oxml.ns import qn\nfrom docx.shared import Cm,Pt,RGBColor\nfrom docx.enum.style import WD_STYLE_TYPE\nfrom docx import Document\nfrom docx.shared import Cm,Pt,RGBColor\nfrom docx.enum.table import WD_TABLE_ALIGNMENT\nfrom docx.enum.table import WD_ALIGN_VERTICAL\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nfrom docx.oxml.ns import qn\nimport pandas as pd\n\ndf = pd.read_excel(\"/Users/wangsiren/Downloads/ForHBNUSU/LiteratureandSportsDept/人名学号对照表.xlsx\",usecols=[0,1,2,3],names=None) # 读取项目名称列,不要列名\ndf_li = df.values.tolist()\nresult = []\nfor s_li in df_li:\n result.append(s_li)\n\n\n\ndocument = Document(\"/Users/wangsiren/Downloads/ForHBNUSU/LiteratureandSportsDept/空白A4.docx\")\ndocument.styles['Normal'].font.name = u'宋体'\ndocument.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体')\nstyle = document.styles['Normal']\nfont = style.font\nfont.size = Pt(14)\n\n\n\ntable0 = document.add_table(rows=1, cols=1)\ncell0 = table0.cell(0,0)\ncell0.text = result[0][2]+\"\\n\" + \"带队老师:\"+result[0][3]\ncell0.paragraphs[0].paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER\ncell0.paragraphs[0].paragraph_format.alignment = WD_ALIGN_VERTICAL.CENTER\nrun = table0.cell(0,0).paragraphs[0].add_run('smida')\nrun.font.name = '宋体'\nrun.font.size = 14\n\ntotal = len(result)\nprint(len(result))\nC = 3\nR = total // C * 2\nn = 0\ntable = document.add_table(rows=R, cols=C)\nfor i in range(0,R//2):\n for m in range(0,C):\n cells = table.cell(i,m)\n paragraph = cells.paragraphs[0]\n run = paragraph.add_run()\n run.add_picture('/Users/wangsiren/Downloads/ForHBNUSU/LiteratureandSportsDept/照片/'+result[n][0]+'.jpg', width=Inches(1.25))\n cells.paragraphs[0].paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER\n cells.paragraphs[0].paragraph_format.alignment = WD_ALIGN_VERTICAL.CENTER\n cells = table.cell(i+1,m)\n cells.text = result[n][0]+\"\\n\" + str(result[n][1])\n cells.paragraphs[0].paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER\n cells.paragraphs[0].paragraph_format.alignment = WD_ALIGN_VERTICAL.CENTER\n n = n + 1\n if n == len(result):break\n if n == len(result):break\ndocument.save(\"/Users/wangsiren/Downloads/ForHBNUSU/LiteratureandSportsDept/学院.docx\")\n","sub_path":"ZXC.py","file_name":"ZXC.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"172141100","text":"\nfrom bs4 import BeautifulSoup\nimport requests\nreq = requests.get('http://www.sina.com.cn/')\nreq.encoding = 'utf-8'\n# print(req.text)\nhtml_demo = '\\<html>\\\n<head>\\\n<title>我的第一个 HTML 页面\\\n\\\n\\\n

body 元素的内容会显示在浏览器中。

\\\n

title 元素的内容会显示在浏览器的标题栏中。

\\\n\\\n'\nsoup = BeautifulSoup(html_demo, 'html.parser')\n# print(soup)\nprint(soup.text)\ntext = soup.select('p')\nprint(text[0].text)\n\n","sub_path":"pythonPachong/InternetPaChong.py","file_name":"InternetPaChong.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"267779639","text":"# N * N 숫자가 시계방향으로 이뤄짐.\n# 1 2 3\n# 8 9 4\n# 7 6 5\n\n# index\n# (0,0) (0,1) (0,2)\n# (1,0) (1,1) (1,2)\n# (2,0) (2,1) (2,2)\n\n# T = int(input())\n#\n# for tc in range(1, T+1):\n# N = int(input())\n# snail = [[0]*N for _ in range(N)]\n#\n# dr = [0, 1, 0, -1]\n# dc = [1, 0, -1, 0]\n#\n# r, c = 0, 0\n# dist = 0\n#\n# for n in range(1, N * N + 1):\n# snail[r][c] = n\n# r += dr[dist]\n# c += dc[dist]\n#\n# if r < 0 or c < 0 or r >= N or c >= N or snail[r][c] != 0:\n# r -= dr[dist]\n# c -= dc[dist]\n# dist = (dist + 1) % 4\n# r += dr[dist]\n# c += dc[dist]\n#\n# print(\"#{}\".format(tc))\n# for row in snail:\n# print(*row)\n# print()\n\ntc = int(input())\n\nfor idx in range(1, tc + 1):\n snail = [[0] * idx for _ in range(idx)]\n dx = [0, 1, 0, -1]\n dy = [1, 0, -1, 0]\n\n cnt = 1\n x, y = 0, -1\n\n k = 0\n while cnt <= idx * idx:\n nx, ny = x + dx[k], y + dy[k]\n if 0 <= nx < idx and 0 <= ny < idx and snail[nx][ny] == 0:\n snail[nx][ny] = cnt\n cnt += 1\n x, y = nx, ny\n else:\n k = (k + 1) % 4\n print(\"#{}\".format(idx))\n for i in range(idx):\n print(*snail[i])","sub_path":"SWEA/Recommand/달팽이 숫자.py","file_name":"달팽이 숫자.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"618761960","text":"# useful library for reading in data\nimport pandas as pd\n\n# the base API\napi = \"https://data.consumerfinance.gov/resource/jhzv-w97w.json\"\n\n# the number of records we're going to request\nquery = '?&$limit=100000'\n\n# additional API specifications I'm not using right now\n#$where=date%20between%20%272014-01-01T00:00:00%27%20and%20%272015-01-01T00:00:00%27'\n \n# I have an app token in case we need that, but I haven't so far\ndataset_identifier = 'jhzv-w97w'\nAPP_TOKEN = '48ozcpj4nCO3mqgJOl8GoIJgF'\ntoken = '?$$app_token='\n\n# total query we're running at the moment\nfull_query = api+query\n\n# calls down the data from the CFPB API and reads it into memory\n# remember that we only requested 100,000 records (total is ~750k)\ncfpb = pd.read_json(full_query)\n\n# saves the data as a csv\ncfpb.to_csv('cfpb_sample_data.csv',encoding='utf-8',index=False)\n\n# this squeezes in under the 50M limit at which github yells at you\n","sub_path":"data/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"340460912","text":"n = int(input())\na = list(map(int,input().split()))\nmax=0\nl=1\nfor i in range(n-1):\n if(a[i]>a[i+1]):\n if(max 0:\n\t\t\tp = bit_copy.bit_length()-1\n\t\t\ton_bit_pos.append(p)\n\t\t\tbit_copy -= 2**p\n\t\tall_values = [Circle.EMPTY for _ in range(42)]\n\t\tfor bit_pos in on_bit_pos:\n\t\t\tpos = int(bit_pos/3)\n\t\t\tif bit_pos % 3 == 1:\n\t\t\t\tall_values[pos] = Circle.RED\n\t\t\telif bit_pos % 3 == 2:\n\t\t\t\tall_values[pos] = Circle.YELLOW\n\t\tboard = []\n\t\tfor i in range(0, len(all_values), 6):\n\t\t\tboard.append(all_values[i:(i+6)])\n\t\treturn board, turn\n\n\tdef check_winner(self):\n\t\tlines = (\n\t\t\tself.board, # columns\n\t\t\tzip(*self.board), # rows\n\t\t\tdiagonalsPos(self.board, 7, 6), # positive diagonals\n\t\t\tdiagonalsNeg(self.board, 7, 6) # negative diagonals\n\t\t)\n\n\t\tfor line in chain(*lines):\n\t\t\tfor color, group in groupby(line):\n\t\t\t\tif color != Circle.EMPTY and len(list(group)) >= 4:\n\t\t\t\t\treturn color\n\n\tdef printBoard(self):\n\t\t\"\"\"Print the board.\"\"\"\n\t\tprint(' '.join(map(str, range(self.cols))))\n\t\tfor y in range(self.rows):\n\t\t\tprint(' '.join(str(self.board[x][y].value) for x in range(self.cols)))\n","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"592953864","text":"import torch\nimport torch.nn as nn\n\nclass Corpus:\n\n def __init__(self, stem, min_len=2, max_len=10):\n self.train = open('train.' + stem + '.txt', 'r', encoding='utf-8')\n self.valid = open('valid.' + stem + '.txt', 'r', encoding='utf-8')\n self.test = open('test.' + stem + '.txt', 'r', encoding='utf-8')\n self.min_len = min_len\n self.max_len = max_len\n f = open('voc.' + stem + '.txt', 'r', encoding='utf-8')\n lines = f.read().strip().split('\\n')\n vocab = [ line.split()[0] for line in lines ]\n self.BOS = len(vocab)\n vocab.append('BOS')\n self.EOS = len(vocab)\n vocab.append('EOS')\n f.close()\n self.vocab = vocab\n\n def __del__(self):\n self.train.close()\n self.valid.close()\n self.test.close()\n\n def rewind(self, f):\n f.seek(0)\n\n def read_seq(self, f, test=False):\n while True:\n for line in f:\n seq = [ int(x) - 1 for x in line.strip().split() ]\n if len(seq) >= self.min_len and len(seq) <= self.max_len:\n yield [self.BOS] + seq + [self.EOS]\n if test: return\n f.seek(0)\n\n def _get_batch(self, f, batch_size=10, test=False):\n batch = list()\n target = list()\n for seq in self.read_seq(f, test=test):\n batch.append(seq[:-1])\n target.append(seq[1:])\n if len(batch) == batch_size:\n yield batch, target\n batch = list()\n target = list()\n yield batch, target\n\n def get_batch(self, f, batch_size=10, test=False):\n for batch, target in self._get_batch(f, batch_size=batch_size, test=test):\n perm = sorted(range(len(batch)), key=lambda k: len(batch[k]), reverse=True)\n batch = [ batch[i] for i in perm ]\n target = [ target[i] for i in perm ]\n yield batch, target\n \n","sub_path":"rnn_data.py","file_name":"rnn_data.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"22428657","text":"from datetime import datetime\nfrom django.shortcuts import render, redirect\nfrom .models import Book, User\nfrom django.contrib import messages\n\n# 2\nHOURS_OF_OPERATION = [\n {'day': 'Sunday', 'open': 'closed', 'close': 'closed'},\n {'day': 'Monday', 'open': '8am', 'close': '5pm'},\n {'day': 'Tuesday', 'open': '8am', 'close': '5pm'},\n {'day': 'Wednesday', 'open': 'closed', 'close': 'closed'},\n {'day': 'Thursday', 'open': '8am', 'close': '5pm'},\n {'day': 'Friday', 'open': '8am', 'close': '5pm'},\n {'day': 'Saturday', 'open': '8am', 'close': '5pm'}\n]\n\n\ndef add_book_view(request):\n\n if request.method == 'POST':\n check_books_list(request)\n\n # simple data validation\n title = request.POST.get('title', None) # request.POST['title']\n author = request.POST.get('author', None)\n\n if not title or not author:\n return redirect(request, '/add_book')\n\n # save the book\n request.session['books'].append({\n 'id': len(request.session['books']),\n 'title': title,\n 'author': author\n })\n # request.session.save()\n\n # print(request.session['books'])\n\n return redirect('/book') # always redirect after POST\n\n else: # is GET request, return the add book HTML\n return render(request, 'add_book.html')\n\n\ndef book_view(request):\n # removed check_books_list(request)\n\n return render(request, 'book.html', {\n 'books': Book.objects.all()\n })\n\n\ndef check_books_list(request):\n if 'books' not in request.session:\n request.session['books'] = []\n\n\ndef index_view(request):\n day_of_week = datetime.today().weekday()\n user = None if 'user_id' not in request.session else User.objects.get(id=request.session['user_id'])\n\n context = {\n 'todays_hours': HOURS_OF_OPERATION[day_of_week],\n 'hours_of_operation': HOURS_OF_OPERATION,\n 'user': user\n }\n\n return render(request, 'index.html', context)\n\n\ndef login_view(request):\n if request.method == 'POST':\n user = User.objects.authenticate(request.POST.get('email', None), request.POST.get('password', None))\n if user:\n request.session['user_id'] = user.id\n\n return redirect('index')\n\n else:\n messages.add_message(request, messages.ERROR, 'invalid credentials')\n return redirect('login')\n\n else:\n return render(request, 'login.html')\n\n\ndef logout_view(request):\n request.session.clear()\n return redirect('index')\n\n\ndef register_view(request):\n errors = User.objects.validate(request.POST)\n\n if errors:\n for e in errors.values():\n messages.error(request, e)\n return redirect('/login')\n\n else:\n user = User.objects.register(request.POST)\n\n request.session['user_id'] = user.id\n\n return redirect('index')\n\n\ndef view_book(request, book_id):\n check_books_list(request)\n\n # validation\n if book_id > len(request.session['books']) - 1:\n return redirect('/book')\n\n return render(request, 'view_book.html', {\n 'book': request.session['books'][book_id]\n })\n","sub_path":"book_website/book_website/book_manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"351589905","text":"import pytest\nfrom spytest import st, tgapi\n\nfrom tests.qos.qos_map import verify_counter_cpu_asic_bcm\n\nimport apis.system.basic as basic_obj\nimport apis.routing.ip as ip_obj\nimport apis.common.asic as asicapi\nfrom apis.system.sflow import enable_disable_config, add_del_collector\nimport apis.qos.copp as copp_obj\nimport apis.routing.ip_helper as ip_helper_obj\nimport apis.switching.vlan as vlan_obj\n\ncopp_data = dict()\ncopp_data_pir = dict()\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef copp_module_hooks(request):\n global vars, tg, tg_ph_1, d1_p1,hw_constants, deviation_percentage, d1_p1_mac, copp_data , vlan_igmp, copp_data_pir\n vars = st.ensure_min_topology(\"D1T1:1\")\n hw_constants = st.get_datastore(vars.D1 , \"constants\", \"default\")\n st.debug(\"hw_constants: {}\".format(hw_constants))\n tg, tg_ph_1 = tgapi.get_handle_byname(\"T1D1P1\")\n d1_p1 = vars.D1T1P1\n vlan_igmp = 3188\n vlan_obj.create_vlan(vars.D1, vlan_igmp)\n deviation_percentage = 0.05\n ret_val = copp_obj.get_copp_config(dut=vars.D1, table_name='all')\n if ret_val:\n copp_data = ret_val\n else:\n st.report_fail('module_config_failed', 'show copp config command failed')\n copp_data_pir = copp_obj.set_copp_pir_config(vars.D1, config='get')\n # Get the DUT mac address\n d1_p1_mac = basic_obj.get_ifconfig(vars.D1, d1_p1)[0]['mac']\n # Config the routing interface\n ip_obj.config_ip_addr_interface(dut=vars.D1, interface_name=d1_p1, ip_address='1.1.1.2', subnet='24')\n yield\n # Un-configure the routing interface\n ip_obj.delete_ip_interface(dut=vars.D1, interface_name=d1_p1, ip_address='1.1.1.2', subnet='24')\n vlan_obj.delete_vlan(vars.D1, vlan_igmp)\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef copp_func_hooks(request):\n asicapi.clear_counters(vars.D1)\n yield\n\n\ndef retrun_group_dict(copp_data,copp_group):\n for each in copp_data:\n if copp_group in each:\n return copp_data[each]['value']\n\n\ndef sflow_copp_config_undo():\n copp_queue = retrun_group_dict(copp_data, 'sflow')['queue']\n string_copp = 'copp-scheduler-policy@' + copp_queue\n\n if \"COPP_TABLE:trap.group.sflow\" in copp_data.keys():\n copp_obj.set_copp_config(vars.D1, [\"COPP_TABLE:trap.group.sflow\", \"cbs\", copp_data[\"COPP_TABLE:trap.group.sflow\"]['value']['cbs']],[\"COPP_TABLE:trap.group.sflow\", \"cir\", copp_data[\"COPP_TABLE:trap.group.sflow\"]['value']['cir']])\n else:\n copp_obj.set_copp_config(vars.D1, [\"COPP_TABLE:copp-system-sflow\", \"cbs\", copp_data[\"COPP_TABLE:copp-system-sflow\"]['value']['cbs']],\n [\"COPP_TABLE:copp-system-sflow\", \"cir\", copp_data[\"COPP_TABLE:copp-system-sflow\"]['value']['cir']])\n if string_copp in copp_data_pir['SCHEDULER'].keys():\n if copp_data_pir['SCHEDULER'][string_copp]['pir'] != '600':\n copp_obj.set_copp_pir_config(vars.D1,'apply', [string_copp, \"pir\", copp_data_pir['SCHEDULER'][string_copp]['pir']])\n st.log(\"performing reboot\")\n st.reboot(vars.D1)\n\ndef sflow_unconfig():\n enable_disable_config(vars.D1, interface=False, interface_name=None, action=\"enable\",\n cli_type=\"klish\")\n add_del_collector(vars.D1, collector_name=\"collector_1\", ip_address=\"1.1.1.1\",\n port_number=None, action=\"add\", cli_type=\"klish\")\n\n@pytest.mark.copp\ndef test_ft_copp_lldp():\n \"\"\"\n scenario : Verify CoPP functionality for lldp\n Author : vishnuvardhan.talluri@broadcom.com\n :return:\n \"\"\"\n success = True\n st.log(\"testcase to verify COPP for lldp\")\n copp_cir_lldp = int(retrun_group_dict(copp_data, 'lldp')['cir'])\n sent_rate_pps = copp_cir_lldp * 2\n deviation = copp_cir_lldp * deviation_percentage\n copp_queue = retrun_group_dict(copp_data, 'lldp')['queue']\n tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1])\n st.log('sending lldp packets for {}pps and expecting rate limit to {}pps '.format(sent_rate_pps,copp_cir_lldp))\n tg_stream_handle = tg.tg_traffic_config(port_handle=tg_ph_1, mac_src=\"00:11:97:2F:8E:82\", mac_dst=\"01:80:C2:00:00:0E\",\n mode='create', transmit_mode='continuous', l2_encap='ethernet_ii',\n data_pattern='02 07 04 00 11 97 2F 8E 80 04 07 03 00 11 97 2F 8E 82 06 02 00 78 00 00 00 00 '\n '00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00',\n ethernet_value='88CC',rate_pps=sent_rate_pps)['stream_id']\n st.log(\"send lldp packets and verify cpu counter\")\n tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle])\n st.wait(5)\n if not verify_counter_cpu_asic_bcm(dut=vars.D1, queue=copp_queue, value=copp_cir_lldp, tol=deviation):\n st.error('CPU counter check for rate limiting lldp to {}pps is failed'.format(copp_cir_lldp))\n success = False\n tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle])\n if success:\n st.report_pass(\"test_case_passed\")\n else:\n st.report_fail(\"test_case_failed\")\n\n\ndef test_ft_copp_lacp():\n \"\"\"\n scenario : Verify CoPP functionality for lacp\n Author : vishnuvardhan.talluri@broadcom.com\n :return:\n \"\"\"\n success = True\n #copp_cir_lacp = hw_constants['COPP_CIR_LACP']\n copp_cir_lacp = int(retrun_group_dict(copp_data, 'lacp')['cir'])\n sent_rate_pps = copp_cir_lacp * 2\n deviation = copp_cir_lacp * deviation_percentage\n st.log(\"testcase to verify COPP for lacp\")\n copp_queue = retrun_group_dict(copp_data, 'lacp')['queue']\n tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1])\n st.log('sending lacp packets for {}pps and expecting rate limit to {}pps '.format(sent_rate_pps,copp_cir_lacp))\n\n tg_stream_handle = tg.tg_traffic_config(port_handle=tg_ph_1, mac_src=\"D8:C4:97:72:73:5F\", mac_dst=\"01:80:C2:00:00:02\",\n mode='create', transmit_mode='continuous', data_pattern_mode='fixed',l2_encap='ethernet_ii',\n data_pattern='01 01 01 14 FF FF D8 C4 97 72 73 5F 00 07 00 FF 00 20 85 00 00 00 02 14 00 00 '\n '00 00 00 00 00 00 00 00 00 00 00 00 02 00 00 00 03 10 00 00 00 00 00 00 00 00 '\n '00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 '\n '00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 '\n '00 00 00 00 00 00', ethernet_value='8809', rate_pps=sent_rate_pps)['stream_id']\n st.log(\"send lacp request and verify cpu counter\")\n tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle])\n st.wait(5)\n if not verify_counter_cpu_asic_bcm(dut=vars.D1, queue=copp_queue, value=copp_cir_lacp, tol=deviation):\n st.error('CPU counter check for rate limiting lacp to {}pps is failed'.format(copp_cir_lacp))\n success = False\n tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle])\n if success:\n st.report_pass(\"test_case_passed\")\n else:\n st.report_fail(\"test_case_failed\")\n\n\n@pytest.mark.copp\ndef test_ft_copp_dhcp():\n \"\"\"\n scenario : Verify CoPP functionality for dhcp\n Author : vishnuvardhan.talluri@broadcom.com\n :return:\n \"\"\"\n success = True\n copp_cir_dhcp = int(retrun_group_dict(copp_data, 'dhcp')['cir'])\n sent_rate_pps = copp_cir_dhcp * 2\n deviation = copp_cir_dhcp * deviation_percentage\n st.log(\"testcase to verify COPP for dhcp\")\n copp_queue = retrun_group_dict(copp_data, 'dhcp')['queue']\n tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1])\n st.log('sending dhcp packets for {}pps and expecting rate limit to {}pps '.format(sent_rate_pps,copp_cir_dhcp))\n\n tg_stream_handle = tg.tg_traffic_config(port_handle=[tg_ph_1], mac_src=\"00:13:5F:1F:F2:80\", mac_dst=\"33:33:00:01:00:02\",\n l3_protocol='ipv6', mode='create', transmit_mode='continuous',\n rate_pps=sent_rate_pps, data_pattern='01 D1 49 5E 00 08 00 02 00 78 00 01 00 0A 00 03 00 01 00 13 '\n '5F 1F F2 80 00 06 00 06 00 19 00 17 00 18 00 19 00 0C 00 33 '\n '00 01 00 00 00 00 00 00 00 00', frame_size=116,\n ipv6_dst_addr=\"FF02:0:0:0:0:0:1:2\", ipv6_src_addr=\"FE80:0:0:0:201:5FF:FE00:500\",\n ipv6_next_header=17, ipv6_traffic_class=224,l4_protocol='udp',udp_dst_port=546,\n udp_src_port=547, ipv6_hop_limit=255)['stream_id']\n st.log(\"send dhcpv6 solicit and verify cpu counter\")\n tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle])\n st.wait(5)\n if not verify_counter_cpu_asic_bcm(dut=vars.D1, queue=copp_queue, value=copp_cir_dhcp, tol=deviation):\n st.error('CPU counter check for rate limiting dhcp to {}pps is failed'.format(copp_cir_dhcp))\n success = False\n tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle])\n\n if success:\n st.report_pass(\"test_case_passed\")\n else:\n st.report_fail(\"test_case_failed\")\n\n\n@pytest.mark.copp\ndef test_ft_copp_arp():\n \"\"\"\n scenario : Verify CoPP functionality for arp\n Author : vishnuvardhan.talluri@broadcom.com\n :return:\n \"\"\"\n success = True\n copp_cir_arp = int(retrun_group_dict(copp_data, 'arp')['cir'])\n sent_rate_pps = copp_cir_arp * 2\n deviation = copp_cir_arp * deviation_percentage\n copp_queue = retrun_group_dict(copp_data, 'arp')['queue']\n st.log(\"testcase to verify COPP for arp\")\n\n tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1])\n st.log('sending ARP packets for {}pps and expecting rate limit to {}pps '.format(sent_rate_pps,copp_cir_arp))\n\n tg_stream_handle = tg.tg_traffic_config(port_handle=[tg_ph_1], mac_src=\"00:00:00:11:11:80\", mac_dst=\"FF:FF:FF:FF:FF:FF\",\n mode='create', transmit_mode='continuous',rate_pps=sent_rate_pps, l2_encap='ethernet_ii',\n l3_protocol='arp', arp_src_hw_addr=\"00:00:00:11:11:80\", arp_dst_hw_addr=\"00:00:00:00:00:00\",\n arp_operation='arpRequest', ip_src_addr='1.1.1.1', ip_dst_addr='1.1.1.2')['stream_id']\n st.log(\"send ARP request and verify cpu counter\")\n tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle])\n st.wait(5)\n if not verify_counter_cpu_asic_bcm(dut=vars.D1, queue=copp_queue, value=copp_cir_arp, tol=deviation):\n st.error('CPU counter check for rate limiting arp to {}pps is failed'.format(copp_cir_arp))\n success = False\n tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle])\n\n if success:\n st.report_pass(\"test_case_passed\")\n else:\n st.report_fail(\"test_case_failed\")\n\n\n@pytest.mark.copp\ndef test_copp_ndp():\n \"\"\"\n scenario : Verify that IPv6 NDP control packets are getting rate-limited in CoPP with srtcm policer\n Author : vishnuvardhan.talluri@broadcom.com\n :return:\n \"\"\"\n success = True\n copp_cir_ndp = int(retrun_group_dict(copp_data, 'arp')['cir'])\n sent_rate_pps = copp_cir_ndp * 2\n deviation = copp_cir_ndp * deviation_percentage\n copp_queue = retrun_group_dict(copp_data, 'arp')['queue']\n st.log(\"testcase to verify COPP for ndp\")\n\n tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1])\n st.log('sending ndp packets for {}pps and expecting rate limit to {}pps '.format(sent_rate_pps,copp_cir_ndp))\n\n tg_stream_handle = tg.tg_traffic_config(port_handle=[tg_ph_1], mac_src=\"00:0a:01:01:23:01\", mac_dst=\"b8:6a:97:ca:bb:98\",\n l3_protocol='ipv6', mode='create', transmit_mode='continuous',\n rate_pps=sent_rate_pps, data_pattern='FF FF', l4_protocol=\"icmp\",\n ipv6_dst_addr=\"fe80::ba6a:97ff:feca:bb98\", ipv6_src_addr=\"2001::2\",\n ipv6_next_header=58, icmp_target_addr='2001::2', icmp_type=136, icmp_ndp_nam_o_flag=0,\n icmp_ndp_nam_r_flag=1, icmp_ndp_nam_s_flag=1, ipv6_hop_limit=255)['stream_id']\n st.log(\"send ndp discover and verify cpu counter\")\n tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle])\n st.wait(5)\n if not verify_counter_cpu_asic_bcm(dut=vars.D1, queue=copp_queue, value=copp_cir_ndp, tol=deviation):\n st.error('CPU counter check for rate limiting NDP to {}pps is failed'.format(copp_cir_ndp))\n success = False\n tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle])\n\n if success:\n st.report_pass(\"test_case_passed\")\n else:\n st.report_fail(\"test_case_failed\")\n\n\n@pytest.mark.copp\ndef test_ft_copp_bgp():\n \"\"\"\n scenario : Verify CoPP functionality for bgp\n Author : vishnuvardhan.talluri@broadcom.com\n :return:\n \"\"\"\n success = True\n st.log(\"testcase to verify COPP for bgp\")\n copp_cir_bgp = int(retrun_group_dict(copp_data, 'bgp')['cir'])\n sent_rate_pps = copp_cir_bgp * 2\n deviation = copp_cir_bgp * deviation_percentage\n copp_queue = retrun_group_dict(copp_data, 'bgp')['queue']\n tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1])\n st.log('sending bgp packets for {}pps and expecting rate limit to {}pps '.format(sent_rate_pps,copp_cir_bgp))\n tg_stream_handle = tg.tg_traffic_config(port_handle=tg_ph_1, mac_src=\"E4:F0:04:38:07:DA\", mac_dst=d1_p1_mac,\n mode='create', transmit_mode='continuous',\n data_pattern='FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF 00 2D 01 04 00 C8 00 5A 05 05 '\n '05 05 10 02 0E 01 04 00 01 00 01 02 00 41 04 00 00 00 C8',\n rate_pps=sent_rate_pps, l3_protocol='ipv4', ip_protocol=6, ip_src_addr='1.1.1.1',\n l4_protocol='tcp', ip_precedence=5, frame_size=103,\n ip_dst_addr='1.1.1.2', tcp_dst_port=179, tcp_src_port=54821, tcp_window=115,\n tcp_seq_num=1115372998, tcp_ack_num=1532875182,tcp_ack_flag=1, tcp_psh_flag=1, ip_ttl=1)['stream_id']\n st.log(\"send bgp open packets and verify cpu counter\")\n tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle])\n st.wait(5)\n if not verify_counter_cpu_asic_bcm(dut=vars.D1,queue=copp_queue,value=copp_cir_bgp,tol=deviation):\n st.error('CPU counter check for rate limiting bgp to {}pps is failed'.format(copp_cir_bgp))\n success = False\n tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle])\n\n if success:\n st.report_pass(\"test_case_passed\")\n else:\n st.report_fail(\"test_case_failed\")\n\n\n@pytest.fixture(scope=\"function\")\ndef copp_fixture_igmp(request,copp_module_hooks):\n # copp fixture for igmp\n ip_obj.delete_ip_interface(dut=vars.D1, interface_name=d1_p1, ip_address='1.1.1.2', subnet='24')\n vlan_obj.add_vlan_member(vars.D1, vlan_igmp, [vars.D1T1P1])\n yield\n st.log(\"### CLEANUP for copp igmp ###\")\n vlan_obj.delete_vlan_member(vars.D1, vlan_igmp, [vars.D1T1P1], \"access\")\n ip_obj.config_ip_addr_interface(dut=vars.D1, interface_name=d1_p1, ip_address='1.1.1.2', subnet='24')\n\n\n@pytest.mark.copp\ndef test_ft_copp_igmp(copp_fixture_igmp):\n \"\"\"\n scenario : Verify CoPP functionality for igmp\n Author : sreenivasula.reddy@broadcom.com\n :return:\n \"\"\"\n success = True\n copp_cir_igmp = int(retrun_group_dict(copp_data, 'igmp')['cir'])\n sent_rate_pps = copp_cir_igmp * 2\n deviation = copp_cir_igmp * deviation_percentage\n copp_queue = retrun_group_dict(copp_data, 'igmp')['queue']\n st.log(\"testcase to verify COPP for igmp\")\n\n if not vlan_obj.verify_vlan_config(vars.D1, vlan_igmp, untagged=[vars.D1T1P1]):\n st.report_fail('vlan_untagged_member_fail', vars.D1T1P1, vlan_igmp)\n\n tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1])\n st.log('sending igmp packets for {}pps and expecting rate limit to {}pps '.format(sent_rate_pps, copp_cir_igmp))\n\n tg_stream_handle = tg.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='continuous',\n length_mode='fixed', rate_pps=sent_rate_pps, mac_src='00:01:05:00:1A:00',\n mac_dst='01:00:5e:01:01:02', ethernet_value='8100',\n data_pattern_mode='fixed', l2_encap='ethernet_ii',\n data_pattern='0C 74 08 00 46 00 00 20 00 00 00 00 01 02 2D CA 15 01 01 '\n '0A E0 01 01 02 94 04 00 00 16 64 08 98 E0 01 01 02 00 01 '\n '02 03 04 05 06 07 08 09 0A 0B 0C 0D 99 2C 9E 39')['stream_id']\n st.log(\"send igmp query packet and verify cpu counter\")\n tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle])\n st.wait(5)\n if not verify_counter_cpu_asic_bcm(dut=vars.D1, queue=copp_queue, value=copp_cir_igmp, tol=deviation):\n st.error('CPU counter check for rate limiting igmp to {}pps is failed'.format(copp_cir_igmp))\n success = False\n tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle])\n if success:\n st.report_pass(\"igmp_rate_limit_status\", copp_cir_igmp, \"passed\")\n else:\n st.report_fail(\"igmp_rate_limit_status\", copp_cir_igmp, \"failed\")\n\n\n@pytest.mark.copp\ndef test_ft_copp_sflow():\n \"\"\"\n scenario : Verify CoPP functionality for sflow\n Author : sreenivasula.reddy@broadcom.com\n :return:\n \"\"\"\n success = True\n copp_queue = retrun_group_dict(copp_data, 'sflow')['queue']\n string_copp = 'copp-scheduler-policy@' + copp_queue\n if \"COPP_TABLE:trap.group.sflow\" in copp_data.keys():\n copp_obj.set_copp_config(vars.D1, [\"COPP_TABLE:trap.group.sflow\", \"cbs\", \"600\"],[\"COPP_TABLE:trap.group.sflow\", \"cir\", \"600\"])\n else:\n copp_obj.set_copp_config(vars.D1, [\"COPP_TABLE:copp-system-sflow\", \"cbs\", \"600\"],\n [\"COPP_TABLE:copp-system-sflow\", \"cir\", \"600\"])\n if string_copp in copp_data_pir['SCHEDULER'].keys():\n if copp_data_pir['SCHEDULER'][string_copp]['pir'] != '600':\n copp_obj.set_copp_pir_config(vars.D1,'apply', [string_copp, \"pir\", \"600\"])\n\n st.log(\"performing reboot\")\n st.reboot(vars.D1)\n try:\n enable_disable_config(vars.D1, interface=False, interface_name=None, action=\"enable\",\n cli_type=\"klish\")\n add_del_collector(vars.D1, collector_name=\"collector_1\", ip_address=\"1.1.1.1\",\n port_number=None, action=\"add\", cli_type=\"klish\")\n except Exception as e:\n st.log(e)\n st.report_fail(\"exception_observed\", e)\n copp_obj.get_copp_config(dut=vars.D1, table_name='all')\n copp_cir_sflow = hw_constants['COPP_CIR_SFLOW']\n sent_rate_pps = \"921828\"\n deviation = copp_cir_sflow * deviation_percentage\n st.log(\"testcase to verify COPP for sflow\")\n tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1])\n st.log('sending packets for {}pps and expecting rate limit to {}pps '.format(sent_rate_pps, copp_cir_sflow))\n tg_stream_handle = tg.tg_traffic_config(mac_src='00.00.00.00.00.01', mac_dst='00.00.00.00.00.02', rate_pps=sent_rate_pps, mode='create', \\\n port_handle=tg_ph_1, transmit_mode='continuous', l2_encap='ethernet_ii_vlan', vlan_id='10')['stream_id']\n tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle])\n st.wait(5)\n if not verify_counter_cpu_asic_bcm(dut=vars.D1, queue=copp_queue, value=copp_cir_sflow, tol=deviation):\n st.error('CPU counter check for rate limiting igmp to {}pps is failed'.format(copp_cir_sflow))\n success = False\n tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle])\n if success:\n sflow_copp_config_undo()\n sflow_unconfig()\n st.report_pass(\"sflow_rate_limit_status\", copp_cir_sflow, \"passed\")\n else:\n sflow_copp_config_undo()\n sflow_unconfig()\n st.report_fail(\"sflow_rate_limit_status\", copp_cir_sflow, \"failed\")\n\n\n@pytest.mark.copp\ndef test_ft_copp_udp():\n \"\"\"\n scenario : Verify CoPP functionality for UDP\n # IP helper case: RtIpHeAdFn011\n Author :\n :return:\n \"\"\"\n\n copp_queue = '2'\n string_copp = 'copp-scheduler-policy@' + copp_queue\n\n if string_copp in copp_data_pir['SCHEDULER'].keys():\n if copp_data_pir['SCHEDULER'][string_copp]['pir'] != '5000':\n copp_obj.set_copp_pir_config(vars.D1, 'apply', [string_copp, \"pir\", \"5000\"])\n st.log(\"performing reboot\")\n st.reboot(vars.D1)\n\n success = True\n config_rate_limit_value = 5000\n st.log(\"test case to verify COPP for UDP broadcast packets\")\n st.log(\"On DUT enable IP helper globally\")\n ip_helper_obj.config(vars.D1, helper_status='enable')\n st.log(\"Configuring rate limit value {} for UDP broadcast packets.\".format(config_rate_limit_value))\n ip_helper_obj.config(vars.D1, rate_limit_val=config_rate_limit_value)\n if not ip_helper_obj.verify(vars.D1, forward_protocol='', verify_list=[{'forwarding': 'Enabled',\n 'enable_ports': ['TFTP', 'NTP', 'DNS',\n 'TACACS',\n 'NetBios-Name-Server',\n 'NetBios-Datagram-Server',\n ], 'rate_limit': str(config_rate_limit_value)}]):\n st.report_fail(\"UDP_forwarding_status_verification_failed\")\n st.log(\"Configure IP helper address {} on interface {}\".format(\"2.2.2.2\", vars.D1T1P1))\n # noinspection PyInterpreter\n ip_helper_obj.config(vars.D1, action_str='add', intf_name=vars.D1T1P1, ip_address=\"2.2.2.2\")\n copp_cir_udp = config_rate_limit_value\n sent_rate_pps = copp_cir_udp * 2\n deviation = copp_cir_udp * deviation_percentage\n tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1])\n st.log('sending UDP packets for {}pps and expecting rate limit to {}pps '.format(sent_rate_pps, copp_cir_udp))\n tg_stream_handle = tg.tg_traffic_config(port_handle=[tg_ph_1], mac_src=\"00:00:00:11:22:33\", mac_dst=\"FF:FF:FF:FF:FF:FF\",\n mode='create', transmit_mode='continuous',\n data_pattern='00 03 01 00 00 01 00 00 00 00 00 00 06 67 6f 6f 67 6c 65 03 63 6f 6d 00 00 01 00 01',\n rate_pps=sent_rate_pps, l3_protocol='ipv4', ip_protocol=17, ip_src_addr='1.1.1.1',\n l4_protocol='udp', ip_dst_addr='255.255.255.255', udp_dst_port='53', udp_src_port=54821)['stream_id']\n\n st.log(\"send UDP broadcast packets and verify CPU counter\")\n tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle])\n st.wait(5)\n if not verify_counter_cpu_asic_bcm(dut=vars.D1, queue=copp_queue, value=copp_cir_udp, tol=deviation):\n st.error('CPU counter check for rate limiting udp to {} pps is failed'.format(copp_cir_udp))\n success = False\n tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle])\n\n ip_helper_obj.config(vars.D1, rate_limit_val=600)\n ip_helper_obj.config(vars.D1, action_str='remove', intf_name=vars.D1T1P1, ip_address=\"2.2.2.2\")\n st.log(\"On DUT Disable IP helper globally\")\n ip_helper_obj.config(vars.D1, cli_type='click', helper_status='disable')\n\n if string_copp in copp_data_pir['SCHEDULER'].keys():\n if copp_data_pir['SCHEDULER'][string_copp]['pir'] != '5000':\n copp_obj.set_copp_pir_config(vars.D1, 'apply', [string_copp, \"pir\", copp_data_pir['SCHEDULER'][string_copp]['pir']])\n st.log(\"performing reboot\")\n st.reboot(vars.D1)\n msg_str = \"UDP broadcast rate limit\"\n if success:\n st.report_pass(\"IP_helper_test_case_msg_status\", msg_str, \"passed\")\n else:\n st.report_fail(\"IP_helper_test_case_msg_status\", msg_str, \"failed\")\n\n","sub_path":"tests/qos/test_copp.py","file_name":"test_copp.py","file_ext":"py","file_size_in_byte":24174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"180109006","text":"def isPalindrome(string, start, end):\r\n if start >= end: #base case\r\n return True\r\n else:\r\n firstChar = string[start]\r\n lastChar = string[end]\r\n if firstChar != lastChar:\r\n return False\r\n else:\r\n return isPalindrome(string, start+1, end-1)\r\n\r\ns = input()\r\n# '\\' is an escape sequence\r\nif isPalindrome(s, 0, len(s) - 1):\r\n print(\"\\\"{}\\\" is a palindrome\".format(s))\r\nelse:\r\n print(\"\\\"{}\\\" is not a palindrome\".format(s))","sub_path":"basic/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"177745454","text":"import csv\nimport os\nimport os.path\nimport tarfile\nfrom urllib.parse import urlparse\n\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nfrom PIL import Image\nimport pickle\nimport ast\n\ntags = ['airport', 'animal', 'beach', 'bear', 'birds', 'boats', 'book', 'bridge', 'buildings', 'cars', 'castle', 'cat',\n 'cityscape', 'clouds', 'computer', 'coral', 'cow', 'dancing', 'dog', 'earthquake', 'elk', 'fire', 'fish',\n 'flags', 'flowers', 'food', 'fox', 'frost', 'garden', 'glacier', 'grass', 'harbor', 'horses', 'house', 'lake',\n 'leaf', 'map', 'military', 'moon', 'mountain', 'nighttime', 'ocean', 'person', 'plane', 'plants', 'police',\n 'protest', 'railroad', 'rainbow', 'reflection', 'road', 'rocks', 'running', 'sand', 'sign', 'sky', 'snow',\n 'soccer', 'sports', 'statue', 'street', 'sun', 'sunset', 'surf', 'swimmers', 'tattoo', 'temple', 'tiger',\n 'tower', 'town', 'toy', 'train', 'tree', 'valley', 'vehicle', 'water', 'waterfall', 'wedding', 'whales',\n 'window', 'zebra']\n\n\ndef categoty_to_idx(category):\n cat2idx = {}\n for cat in category:\n cat2idx[cat] = len(cat2idx)\n return cat2idx\n\n\nclass NusWide(data.Dataset):\n def __init__(self, root, transform=None, phase='train', inp_name=None):\n self.root = root\n self.phase = phase\n self.transform = transform\n self.num_classes = 81\n self.get_anno()\n self.tags = tags\n if inp_name is not None:\n with open(inp_name, 'rb') as f:\n self.inp = pickle.load(f)\n self.inp_name = inp_name\n else:\n self.inp = None\n self.inp_name = None\n\n def get_anno(self):\n self.img_name_list = []\n self.tag_list = []\n img_list_path = os.path.join(self.root, 'nus_wide_data_{}.csv'.format(self.phase))\n with open(img_list_path, 'r') as f:\n reader = csv.reader(f)\n rownum = 0\n for row in reader:\n if rownum == 0:\n pass\n else:\n self.img_name_list.append(row[0].split('/')[1])\n tag_names = ast.literal_eval(row[1])\n tag = [-1 for i in range(self.num_classes)]\n for tag_name in tag_names:\n tag[tags.index(tag_name)] = 1\n self.tag_list.append(tag)\n rownum += 1\n def __len__(self):\n return len(self.img_name_list)\n\n def __getitem__(self, index):\n filename = self.img_name_list[index]\n tag = self.tag_list[index]\n img = Image.open(os.path.join(self.root, 'images', filename)).convert('RGB')\n if self.transform is not None:\n img = self.transform(img)\n target = np.asarray(tag)\n target[target==0] = -1\n\n if self.inp is None:\n return (img, filename), target\n else:\n return (img, filename, self.inp), target\n","sub_path":"data/data_nuswide.py","file_name":"data_nuswide.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"548647259","text":"import json\n\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.vary import vary_on_headers\nfrom django.urls import reverse\nfrom django.http import HttpResponse\n\nfrom wagtail.utils.pagination import paginate\nfrom wagtail.wagtailadmin.forms import SearchForm\nfrom wagtail.wagtailadmin import messages\nfrom wagtail.wagtailadmin.utils import PermissionPolicyChecker\nfrom wagtail.wagtailadmin.utils import permission_denied\nfrom wagtail.wagtailadmin.utils import popular_tags_for_model\nfrom wagtail.wagtailcore.models import Collection\nfrom wagtail.wagtailsearch import index as search_index\n\nfrom wagtail_embed_videos import get_embed_video_model\nfrom wagtail_embed_videos.forms import get_embed_video_form\nfrom wagtail_embed_videos.permissions import permission_policy\n\npermission_checker = PermissionPolicyChecker(permission_policy)\n\n\n@permission_checker.require_any('add', 'change', 'delete')\n@vary_on_headers('X-Requested-With')\ndef index(request):\n EmbedVideo = get_embed_video_model()\n\n # Get embed videos\n embed_videos = permission_policy.instances_user_has_any_permission_for(\n request.user, ['change', 'delete']\n ).order_by('-created_at')\n\n # Search\n query_string = None\n if 'q' in request.GET:\n form = SearchForm(request.GET, placeholder=_(\"Search videos\"))\n if form.is_valid():\n query_string = form.cleaned_data['q']\n\n embed_videos = embed_videos.search(query_string)\n else:\n form = SearchForm(placeholder=_(\"Search videos\"))\n\n # Filter by collection\n current_collection = None\n collection_id = request.GET.get('collection_id')\n if collection_id:\n try:\n current_collection = Collection.objects.get(id=collection_id)\n embed_videos = embed_videos.filter(collection=current_collection)\n except (ValueError, Collection.DoesNotExist):\n pass\n\n # Pagination\n paginator, embed_videos = paginate(request, embed_videos)\n\n collections = permission_policy.collections_user_has_any_permission_for(\n request.user, ['add', 'change']\n )\n if len(collections) < 2:\n collections = None\n\n # Create response\n if request.is_ajax():\n return render(\n request,\n 'wagtail_embed_videos/embed_videos/results.html',\n {\n 'embed_videos': embed_videos,\n 'query_string': query_string,\n 'is_searching': bool(query_string),\n }\n )\n\n else:\n return render(\n request,\n 'wagtail_embed_videos/embed_videos/index.html',\n {\n 'embed_videos': embed_videos,\n 'query_string': query_string,\n 'is_searching': bool(query_string),\n\n 'search_form': form,\n 'popular_tags': popular_tags_for_model(EmbedVideo),\n 'collections': collections,\n 'current_collection': current_collection,\n 'user_can_add': permission_policy.user_has_permission(request.user, 'add'),\n }\n )\n\n\n@permission_checker.require('change')\ndef edit(request, embed_video_id):\n EmbedVideo = get_embed_video_model()\n EmbedVideoForm = get_embed_video_form(EmbedVideo)\n\n embed_video = get_object_or_404(EmbedVideo, id=embed_video_id)\n\n if not permission_policy.user_has_permission_for_instance(request.user, 'change', embed_video):\n return permission_denied(request)\n\n if request.method == 'POST':\n form = EmbedVideoForm(request.POST, request.FILES, instance=embed_video, user=request.user)\n if form.is_valid():\n form.save()\n\n # Reindex the embed video to make sure all tags are indexed\n search_index.insert_or_update_object(embed_video)\n\n messages.success(\n request,\n _(\"Video '{0}' updated.\").format(embed_video.title), buttons=[\n messages.button(\n reverse(\n 'wagtail_embed_videos:edit',\n args=(embed_video.id,)\n ),\n _('Edit again')\n )\n ]\n )\n return redirect('wagtail_embed_videos:index')\n else:\n messages.error(request, _(\"The video could not be saved due to errors.\"))\n else:\n form = EmbedVideoForm(instance=embed_video, user=request.user)\n\n return render(request, \"wagtail_embed_videos/embed_videos/edit.html\", {\n 'embed_video': embed_video,\n 'form': form,\n 'user_can_delete': permission_policy.user_has_permission_for_instance(\n request.user, 'delete', embed_video\n ),\n })\n\n\ndef json_response(document, status=200):\n return HttpResponse(json.dumps(document), content_type='application/json', status=status)\n\n\ndef preview(request, embed_video_id):\n embed_video = get_object_or_404(get_embed_video_model(), id=embed_video_id)\n\n return HttpResponse({'embed_video_preview': embed_video.url.thumbnail}, content_type='image/jpeg')\n\n\n@permission_checker.require('delete')\ndef delete(request, embed_video_id):\n embed_video = get_object_or_404(get_embed_video_model(), id=embed_video_id)\n\n if not permission_policy.user_has_permission_for_instance(request.user, 'delete', embed_video):\n return permission_denied(request)\n\n if request.method == 'POST':\n embed_video.delete()\n messages.success(request, _(\"Video '{0}' deleted.\").format(embed_video.title))\n return redirect('wagtail_embed_videos:index')\n\n return render(request, \"wagtail_embed_videos/embed_videos/confirm_delete.html\", {\n 'embed_video': embed_video,\n })\n\n\n@permission_checker.require('add')\ndef add(request):\n EmbedVideoModel = get_embed_video_model()\n EmbedVideoForm = get_embed_video_form(EmbedVideoModel)\n\n if request.method == 'POST':\n embed_video = EmbedVideoModel(uploaded_by_user=request.user)\n form = EmbedVideoForm(request.POST, request.FILES, instance=embed_video, user=request.user)\n if form.is_valid():\n form.save()\n\n # Reindex the embed video to make sure all tags are indexed\n search_index.insert_or_update_object(embed_video)\n\n messages.success(\n request,\n _(\"Video '{0}' added.\").format(embed_video.title),\n buttons=[\n messages.button(\n reverse(\n 'wagtail_embed_videos:edit',\n args=(embed_video.id,)\n ),\n _('Edit')\n )\n ]\n )\n return redirect('wagtail_embed_videos:index')\n else:\n messages.error(request, _(\"The video could not be created due to errors.\"))\n else:\n form = EmbedVideoForm(user=request.user)\n\n return render(request, \"wagtail_embed_videos/embed_videos/add.html\", {\n 'form': form,\n })\n\n\ndef usage(request, embed_video_id):\n embed_video = get_object_or_404(get_embed_video_model(), id=embed_video_id)\n\n # Pagination\n paginator, used_by = paginate(request, embed_video.get_usage())\n\n return render(request, \"wagtail_embed_videos/embed_videos/usage.html\", {\n 'embed_video': embed_video,\n 'used_by': used_by\n })\n","sub_path":"wagtail_embed_videos/views/embed_videos.py","file_name":"embed_videos.py","file_ext":"py","file_size_in_byte":7483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"173287595","text":"import webapp2\nimport date_validator\nimport cgi\n\nform=\"\"\"\n
\n What is your birthday?\n
\n
\n
%(error)s
\n
\n \n \n \n
\n
\n \n
\n\"\"\"\n\n\n#escape html tag inputs\ndef escape_html(s):\n return cgi.escape(s, quote = True)\n\n\n# handler class for root url rendering birthday form\nclass HelloWebapp2(webapp2.RequestHandler):\n\n def write_form(self, error=\"\", month=\"\", day=\"\", year=\"\"):\n self.response.write(form % {'error': error, 'month': escape_html(month)\n ,'day': escape_html(day), 'year': escape_html(year)})\n\n def get(self):\n self.write_form()\n\n def post(self):\n user_month = self.request.get('month')\n user_day = self.request.get('day')\n user_year = self.request.get('year')\n\n month = date_validator.valid_month(user_month)\n day = date_validator.valid_day(user_day)\n year = date_validator.valid_year(user_year)\n\n if not(month and day and year):\n self.write_form(\"dude, is that even a date!?\",\n user_month, user_day, user_year)\n else:\n self.redirect('/thanks')\n\n#handler class after birthday data posted\nclass ThanksHandler(HelloWebapp2):\n def get(self):\n #birthday = month + \" \" + day + \",\" + \" \" + year + \"!\"\n self.response.write(\" that is a good day!\")\n\napp = webapp2.WSGIApplication([\n ('/', HelloWebapp2), ('/thanks', ThanksHandler)], debug=True)\n\ndef main():\n from paste import httpserver\n httpserver.serve(app, host='127.0.0.1', port='8080')\n\nif __name__ == '__main__':\n main()\n","sub_path":"Backend/hello_webapp2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"556256350","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nPlot Voltage data coming from Arduino\r\n\"\"\"\r\nimport serial\r\nimport matplotlib.animation as animation\r\nfrom matplotlib import pyplot as plt\r\nimport time\r\n\r\nser = serial.Serial('COM3', 9600)\r\ntime.sleep(2)\r\n# Create figure for plotting\r\nfig = plt.figure()\r\nax = fig.add_subplot(1, 1, 1)\r\nx_len = 200 # Number of points to display\r\ny_range = [0, 5] \r\nplt.style.use('ggplot')\r\n\r\nxs = []\r\nys = []\r\nax.set_ylim(y_range)\r\n\r\nline = ax.plot(xs, ys)\r\n\r\n# This function is called periodically from FuncAnimation\r\ndef animate(i, xs, ys):\r\n\r\n # Read temperature (Celsius) from TMP102\r\n data = ser.readline() # read data from serial\r\n print(data.decode())\r\n \r\n # Add x and y to lists\r\n # Add x and y to lists\r\n try:\r\n print(data.decode())\r\n ys.append(float(data.decode()))\r\n xs.append(i)\r\n\r\n except ValueError:\r\n print(\"error reading float\")\r\n\r\n # Limit x and y lists to 20 items\r\n xs = xs[-x_len:]\r\n ys = ys[-x_len:]\r\n\r\n # Draw x and y lists\r\n ax.clear()\r\n ax.plot(xs, ys)\r\n \r\n # Format plot\r\n plt.ylabel('Voltage')\r\n\r\n\r\n# Set up plot to call animate() function periodically\r\nanimate = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=10)\r\nplt.show()\r\n","sub_path":"python/graph_voltage.py","file_name":"graph_voltage.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"206985936","text":"import json\nfrom pathlib import Path\nfrom typing import Tuple, List, Union, Dict\nimport warnings\n\nfrom geopandas import GeoDataFrame\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport shapely\nimport rasterio\n\nfrom .utils import get_logger, folium_base_map, DrawFoliumOverride, _plot_images\n\ntry:\n from IPython.display import display\n from IPython import get_ipython\n\n get_ipython().run_line_magic(\"matplotlib\", \"inline\")\nexcept (ImportError, AttributeError):\n # No Ipython installed, Installed but run in shell\n pass\n\nlogger = get_logger(__name__)\n\n\n# pylint: disable=no-member, duplicate-code\nclass Tools:\n def __init__(self, auth=None):\n \"\"\"\n The tools class contains functionality that is not bound to a specific UP42 object,\n e.g. for aoi handling etc., UP42 block information, validatin a block manifest etc.\n They can be accessed from every object and also from the imported up42 package directly.\n\n Public methods:\n read_vector_file, get_example_aoi, draw_aoi, plot_coverage, plot_quicklooks\n \"\"\"\n if auth:\n self.auth = auth\n self.quicklooks = None\n self.results = None\n\n # pylint: disable=no-self-use\n def read_vector_file(\n self, filename: str = \"aoi.geojson\", as_dataframe: bool = False\n ) -> Union[Dict, GeoDataFrame]:\n \"\"\"\n Reads vector files (geojson, shapefile, kml, wkt) to a feature collection,\n for use as the aoi geometry in the workflow input parameters\n (see get_input_parameters).\n\n Example aoi fiels are provided, e.g. example/data/aoi_Berlin.geojson\n\n Args:\n filename: File path of the vector file.\n as_dataframe: Return type, default FeatureCollection, GeoDataFrame if True.\n\n Returns:\n Feature Collection\n \"\"\"\n suffix = Path(filename).suffix\n\n if suffix == \".kml\":\n gpd.io.file.fiona.drvsupport.supported_drivers[\"KML\"] = \"rw\"\n df = gpd.read_file(filename, driver=\"KML\")\n elif suffix == \".wkt\":\n with open(filename) as wkt_file:\n wkt = wkt_file.read()\n df = pd.DataFrame({\"geometry\": [wkt]})\n df[\"geometry\"] = df[\"geometry\"].apply(shapely.wkt.loads)\n df = GeoDataFrame(df, geometry=\"geometry\", crs=4326)\n else:\n df = gpd.read_file(filename)\n\n if df.crs.to_string() != \"EPSG:4326\":\n df = df.to_crs(epsg=4326)\n df.geometry = df.geometry.buffer(0)\n # TODO: Explode multipolygons (if neccessary as union in aoi anyway most often).\n\n # TODO: Have both bboxes for each feature and overall?\n\n if as_dataframe:\n return df\n else:\n return df.__geo_interface__\n\n def get_example_aoi(\n self, location: str = \"Berlin\", as_dataframe: bool = False\n ) -> Union[dict, GeoDataFrame]:\n \"\"\"\n Gets predefined, small, rectangular example aoi for the selected location.\n\n Args:\n location: Location, one of Berlin, Washington.\n as_dataframe: Returns a dataframe instead of dict FeatureColletions\n (default).\n\n Returns:\n Feature collection json with the selected aoi.\n \"\"\"\n logger.info(\"Getting small example aoi in %s.\", location)\n if location == \"Berlin\":\n example_aoi = self.read_vector_file(\n f\"{str(Path(__file__).resolve().parent)}/data/aoi_berlin.geojson\"\n )\n elif location == \"Washington\":\n example_aoi = self.read_vector_file(\n f\"{str(Path(__file__).resolve().parent)}/data/aoi_washington.geojson\"\n )\n else:\n raise ValueError(\n \"Please select one of 'Berlin' or 'Washington' as the \" \"location!\"\n )\n\n if as_dataframe:\n df = GeoDataFrame.from_features(example_aoi, crs=4326)\n return df\n else:\n return example_aoi\n\n # pylint: disable=no-self-use\n def draw_aoi(self):\n \"\"\"\n Displays an interactive map to draw an aoi by hand, returns the folium object if\n not run in a Jupyter notebook.\n\n Export the drawn aoi via the export button, then read the geometries via\n read_aoi_file().\n \"\"\"\n m = folium_base_map(layer_control=True)\n DrawFoliumOverride(\n export=True,\n filename=\"aoi.geojson\",\n position=\"topleft\",\n draw_options={\n \"rectangle\": {\"repeatMode\": False, \"showArea\": True},\n \"polygon\": {\"showArea\": True, \"allowIntersection\": False},\n \"polyline\": False,\n \"circle\": False,\n \"marker\": False,\n \"circlemarker\": False,\n },\n edit_options={\"polygon\": {\"allowIntersection\": False}},\n ).add_to(m)\n\n try:\n assert get_ipython() is not None\n display(m)\n except (AssertionError, NameError):\n logger.info(\n \"Returning folium map object. To display it directly run in a \"\n \"Jupyter notebook!\"\n )\n return m\n\n @staticmethod\n def plot_coverage(\n scenes: GeoDataFrame,\n aoi: GeoDataFrame = None,\n legend_column: str = \"scene_id\",\n figsize=(12, 16),\n ) -> None:\n \"\"\"\n Plots a coverage map of a dataframe with geometries e.g. the results of catalog.search())\n Args:\n scenes: GeoDataFrame of scenes, results of catalog.search()\n aoi: GeoDataFrame of aoi.\n legend_column: Dataframe column set to legend, default is \"scene_id\".\n Legend entries are sorted and this determines plotting order.\n figsize: Matplotlib figure size.\n \"\"\"\n if legend_column not in scenes.columns:\n legend_column = None # type: ignore\n logger.info(\n \"Given legend_column name not in scene dataframe, \"\n \"plotting without legend.\"\n )\n\n ax = scenes.plot(\n legend_column,\n categorical=True,\n figsize=figsize,\n cmap=\"Set3\",\n legend=True,\n alpha=0.7,\n legend_kwds=dict(loc=\"upper left\", bbox_to_anchor=(1, 1)),\n )\n\n if aoi is not None:\n aoi.plot(color=\"r\", ax=ax, fc=\"None\", edgecolor=\"r\", lw=1)\n # TODO: Add aoi to legend.\n # from matplotlib.patches import Patch\n # patch = Patch(label=\"aoi\", facecolor='None', edgecolor='r')\n # ax.legend(handles=handles, labels=labels)\n # TODO: Overlay quicklooks on geometry.\n ax.set_axis_off()\n plt.show()\n\n def plot_quicklooks(\n self,\n figsize: Tuple[int, int] = (8, 8),\n filepaths: List = None,\n titles: List[str] = None,\n ) -> None:\n \"\"\"\n Plots the downloaded quicklooks (filepaths saved to self.quicklooks of the\n respective object, e.g. job, catalog).\n\n Args:\n figsize: matplotlib figure size.\n filepaths: Paths to images to plot. Optional, by default picks up the last\n downloaded results.\n titles: List of titles for the subplots, optional.\n\n \"\"\"\n if filepaths is None:\n if self.quicklooks is None:\n raise ValueError(\"You first need to download the quicklooks!\")\n filepaths = self.quicklooks\n\n plot_file_format = [\".jpg\", \".jpeg\", \".png\"]\n warnings.filterwarnings(\n \"ignore\", category=rasterio.errors.NotGeoreferencedWarning\n )\n _plot_images(\n plot_file_format=plot_file_format,\n figsize=figsize,\n filepaths=filepaths,\n titles=titles,\n )\n\n def plot_results(\n self,\n figsize: Tuple[int, int] = (8, 8),\n filepaths: List[Union[str, Path]] = None,\n titles: List[str] = None,\n ) -> None:\n \"\"\"\n Plots the downloaded results data.\n\n Args:\n figsize: matplotlib figure size.\n filepaths: Paths to images to plot. Optional, by default picks up the last\n downloaded results.\n titles: Optional list of titles for the subplots.\n \"\"\"\n if filepaths is None:\n if self.results is None:\n raise ValueError(\"You first need to download the results!\")\n filepaths = self.results\n\n plot_file_format = [\".tif\"] # TODO: Add other fileformats.\n _plot_images(\n plot_file_format=plot_file_format,\n figsize=figsize,\n filepaths=filepaths,\n titles=titles,\n )\n\n def get_blocks(\n self, block_type=None, basic: bool = True, as_dataframe=False,\n ) -> Union[List[Dict], Dict]:\n \"\"\"\n Gets a list of all public blocks on the marketplace.\n\n Args:\n block_type: Optionally filters to \"data\" or \"processing\" blocks, default None.\n basic: Optionally returns simple version {block_id : block_name}\n as_dataframe: Returns a dataframe instead of json (default).\n\n Returns:\n A list of the public blocks and their metadata. Optional a simpler version\n dict.\n \"\"\"\n try:\n block_type = block_type.lower()\n except AttributeError:\n pass\n if not hasattr(self, \"auth\"):\n raise Exception(\n \"Requires authentication with UP42, use up42.authenticate()!\"\n )\n url = f\"{self.auth._endpoint()}/blocks\"\n response_json = self.auth._request(request_type=\"GET\", url=url)\n public_blocks_json = response_json[\"data\"]\n\n if block_type == \"data\":\n logger.info(\"Getting only data blocks.\")\n blocks_json = [\n block for block in public_blocks_json if block[\"type\"] == \"DATA\"\n ]\n elif block_type == \"processing\":\n logger.info(\"Getting only processing blocks.\")\n blocks_json = [\n block for block in public_blocks_json if block[\"type\"] == \"PROCESSING\"\n ]\n else:\n blocks_json = public_blocks_json\n\n if basic:\n logger.info(\n \"Getting blocks name and id, use basic=False for all block details.\"\n )\n blocks_basic = {block[\"name\"]: block[\"id\"] for block in blocks_json}\n if as_dataframe:\n return pd.DataFrame.from_dict(blocks_basic, orient=\"index\")\n else:\n return blocks_basic\n\n else:\n if as_dataframe:\n return pd.DataFrame(blocks_json)\n else:\n return blocks_json\n\n def get_block_details(self, block_id: str, as_dataframe=False) -> Dict:\n \"\"\"\n Gets the detailed information about a specific public block from\n the server, includes all manifest.json and marketplace.json contents.\n\n Args:\n block_id: The block id.\n as_dataframe: Returns a dataframe instead of json (default).\n\n Returns:\n A dict of the block details metadata for the specific block.\n \"\"\"\n if not hasattr(self, \"auth\"):\n raise Exception(\n \"Requires authentication with UP42, use up42.authenticate()!\"\n )\n url = f\"{self.auth._endpoint()}/blocks/{block_id}\" # public blocks\n response_json = self.auth._request(request_type=\"GET\", url=url)\n details_json = response_json[\"data\"]\n\n if as_dataframe:\n return pd.DataFrame.from_dict(details_json, orient=\"index\").transpose()\n else:\n return details_json\n\n def validate_manifest(self, path_or_json: Union[str, Path, Dict]) -> Dict:\n \"\"\"\n Validates the block manifest, input either manifest json string or filepath.\n\n Args:\n path_or_json: The input manifest, either filepath or json string, see example.\n\n Returns:\n A dictionary with the validation results and potential validation errors.\n\n Example:\n ```json\n {\n \"_up42_specification_version\": 2,\n \"name\": \"sharpening\",\n \"type\": \"processing\",\n \"tags\": [\n \"imagery\",\n \"processing\"\n ],\n \"display_name\": \"Sharpening Filter\",\n \"description\": \"This block enhances the sharpness of a raster\n image by applying an unsharp mask filter algorithm.\",\n \"parameters\": {\n \"strength\": {\"type\": \"string\", \"default\": \"medium\"}\n },\n \"machine\": {\n \"type\": \"large\"\n },\n \"input_capabilities\": {\n \"raster\": {\n \"up42_standard\": {\n \"format\": \"GTiff\"\n }\n }\n },\n \"output_capabilities\": {\n \"raster\": {\n \"up42_standard\": {\n \"format\": \"GTiff\",\n \"bands\": \">\",\n \"sensor\": \">\",\n \"resolution\": \">\",\n \"dtype\": \">\",\n \"processing_level\": \">\"\n }\n }\n }\n }\n ```\n \"\"\"\n if isinstance(path_or_json, (str, Path)):\n with open(path_or_json) as src:\n manifest_json = json.load(src)\n else:\n manifest_json = path_or_json\n if not hasattr(self, \"auth\"):\n raise Exception(\n \"Requires authentication with UP42, use up42.authenticate()!\"\n )\n url = f\"{self.auth._endpoint()}/validate-schema/block\"\n response_json = self.auth._request(\n request_type=\"POST\", url=url, data=manifest_json\n )\n logger.info(\"The manifest is valid.\")\n return response_json[\"data\"]\n","sub_path":"up42/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":14424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"326369132","text":"import numpy as np\nimport time\nimport argparse\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(description='Calculate accuracies on overrepresented genera')\nparser.add_argument(\"data_dir\", help=\"Directory which contains numpy array data\", type=str)\nargs = parser.parse_args()\n\ntrue_cat = np.load(args.data_dir + 'overrep_y.npy')\nnames = np.load(args.data_dir + 'overrep_names.npy')\n\ngenus = []\n\nfor i in names:\n\tif ' ' in i:\n\t\tgenus.append(i.split(' ')[3])\n\telse:\n\t\tgenus.append(i.split('_')[0][1:])\n\ngenus = np.asarray(genus)\nbgds_cat = np.argmax(np.load('DNN_overrep_softmax.npy'), axis=1)\nplasclass_chr_cat = np.genfromtxt(args.data_dir + 'overrep_chr.plasclass', delimiter='\\t', dtype=str)\nplasclass_pl_cat = np.genfromtxt(args.data_dir + 'overrep_pl.plasclass', delimiter='\\t', dtype=str)\nplasclass_cat = np.vstack((plasclass_chr_cat, plasclass_pl_cat))\nplasclass_cat = np.asarray(plasclass_cat[:,1], dtype=float) >= 0.5\nplasflow_chr_cat = np.genfromtxt(args.data_dir + 'overrep_chr.plasflow_pred.tsv', skip_header=1, delimiter='\\t', dtype=str)\nplasflow_pl_cat = np.genfromtxt(args.data_dir + 'overrep_pl.plasflow_pred.tsv', skip_header=1, delimiter='\\t', dtype=str)\nplasflow_cat = np.vstack((plasflow_chr_cat, plasflow_pl_cat))\nplasflow_cat = np.asarray([x.startswith('plasmid.') for x in plasflow_cat[:,5]])\nprint(np.unique(genus, return_counts=True))\ngen_acc_data = []\n\nprint('\\t'.join(['', 'PlasFlow', 'PlasFlow', 'PlasClass', 'PlasClass', 'BGDS', 'BGDS']))\nprint('\\t'.join(['', 'Chrom.', 'Plasmid', 'Chrom.', 'Plasmid', 'Chrom.', 'Plasmid']))\nprint('\\t'.join(['Genus', 'Acc. (%)', 'Acc. (%)', 'Acc. (%)', 'Acc. (%)', 'Acc. (%)', 'Acc. (%)']))\n\nfor gen in np.unique(genus):\n\tbgds_chr_acc = 1 - np.mean(bgds_cat[(true_cat==0) & (genus==gen)])\n\tbgds_pl_acc = np.mean(bgds_cat[(true_cat==1) & (genus==gen)])\n\tplasflow_chr_acc = 1 - np.mean(plasflow_cat[(true_cat==0) & (genus==gen)])\n\tplasflow_pl_acc = np.mean(plasflow_cat[(true_cat==1) & (genus==gen)])\n\tplasclass_chr_acc = 1 - np.mean(plasclass_cat[(true_cat==0) & (genus==gen)])\n\tplasclass_pl_acc = np.mean(plasclass_cat[(true_cat==1) & (genus==gen)])\n\tgen_acc_data.append([plasflow_chr_acc, plasflow_pl_acc, plasclass_chr_acc, plasclass_pl_acc, bgds_chr_acc, bgds_pl_acc])\n\tprint('\\t'.join([gen] + [str(np.round(x*100,2)) for x in gen_acc_data[-1]] ))\n\ngen_acc_data = np.asarray(gen_acc_data)\nsort_ind = np.asarray([1,9,0,6,8,2,5,3,4,7])\ngen_acc_data = gen_acc_data[sort_ind]\nPF_acc = np.mean(np.asarray(gen_acc_data[:,1:3], dtype=float), axis=1)\nPC_acc = np.mean(np.asarray(gen_acc_data[:,3:5], dtype=float), axis=1)\nmy_acc = np.mean(np.asarray(gen_acc_data[:,5:], dtype=float), axis=1)\n\nind = np.arange(10) \nwidth = 0.2 \nplt.figure(figsize=(10, 7))\nplt.bar(ind, PF_acc, width, label='PlasFlow')\nplt.bar(ind + width, PC_acc, width, label='PlasClass')\nplt.bar(ind + 2*width, my_acc, width, label='DNN with GBDS')\nplt.ylabel('Accuracy (%)')\nplt.xlim(-0.7,10)\nplt.ylim(50,100)\nplt.xticks(ind + width, gen_acc_data[:,0], rotation='vertical')\nplt.legend(loc=2, prop={'size': 10})\nplt.gcf().subplots_adjust(bottom=0.25)\nplt.savefig(\"overrep_accuracy_comparison.pdf\")\n","sub_path":"create_figures.py","file_name":"create_figures.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"82048887","text":"#!/usr/bin/python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom duckietown.msgs.msg import Twist2DStamped\nfrom sensor_msgs.msg import Joy\n\nclass Control_car(object):\n\tdef __init__(self):\n\t\tself.joy = None\n\t\t\n\t\t# Setup parameter\n\t\tself.v_gain = 0.41\n\t\tself.omega_gain = 0.83\n\t\t\n\t\t# Subscribers\n\t\tself.sub_joy = rospy.Subscriber(\"joy\", Joy, self.cbJoy, queue_size=1)\n\t\t\n\t\t# Pubishers\n\t\tself.pub_car_cmd = rospy.Publisher(\"~car_cmd\", Twist2DStamped, queue_size=1)\n\n\t##### Joy Control #####\n\tdef cbJoy(self, joy_msg):\n\t\tself.joy = joy_msg\n\t\t\n\t\tcar_cmd_msg = Twist2DStamped()\n\t\tcar_cmd_msg.v = self.joy.axes[1] * self.v_gain\n\t\tcar_cmd_msg.omega = self.joy.axes[3] * self.omega_gain\n\t\tself.pub_car_cmd.publish(car_cmd_msg)\n\nif __name__ == \"__main__\":\n\trospy.init_node(\"control_car\", anonymous=False)\n\tcontrol_car = Control_car()\n\trospy.spin()\n","sub_path":"control_car/src/control_car_node.py","file_name":"control_car_node.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"432223933","text":"import time\nimport webbrowser\n \n#initialize variables\nbreak_count=0\nplay_list =[]\n\n#take all input\ntotal_break= int(input('Number of breaks: '))\nwait_time = int(input('Time between break (seconds): '))\nfor i in range(total_break):\n play_list.append(input('Your video number %i: ' %(i+1)))\n\nprint('Here is your playlist: ')\nprint(play_list)\nprint(\"This program started on \"+time.ctime())\n\nwhile (break_count AgentType:\n if args.agent == 'continuous':\n agent = CarRacingAgent(\n action_space=args.method,\n n_stacked_frames=args.stackframes,\n epsilon=args.epsilon,\n learning_rate=args.learning_rate,\n discount_factor=args.discount_factor\n )\n elif args.agent == 'discrete':\n agent = DiscreteAgent(\n method=args.method,\n discount_factor=args.discount_factor,\n epsilon=args.epsilon,\n learning_rate=args.learning_rate,\n )\n\n if args.pretrained:\n agent.load(args.pretrained)\n\n return agent\n\n\ndef start():\n try:\n args = prepare_options()\n logfile = datetime.today().strftime('%Y-%m-%dT%Hh%Mm%Ss.log')\n configure_logger(args.loglevel, logfile=Settings.LOG_DIR.joinpath(logfile))\n\n agent = get_agent(args)\n if args.command == 'train':\n logger.info('Starting Training Process')\n agent.train(episodes=args.episodes)\n elif args.command == 'play':\n if args.pretrained is None:\n logger.warning('No Pretrained Model specified. Play will use Random Weights')\n reward, steps = agent.play(render=args.render)\n logger.info(f'Reward {reward}, Steps: {steps}')\n\n except Exception as error:\n logger.error(error)\n raise\n\n\nif __name__ == '__main__':\n start()\n","sub_path":"reinforcement/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"113261392","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 9 19:05:16 2017\n\n@author: prasanna\n\"\"\"\n\nimport numpy\nimport matplotlib\nburnrate=numpy.zeros(1001)\nt=numpy.linspace(0,100,1001)\nburnrate[:501]=20\ndt=0.1\ng=9.81\nve=325\nrho=1.091\na=numpy.pi*0.5**2\nC_d=0.15\nms=50\n\n#matplotlib.pyplot.plot(t,burnrate)\nmp= 100 - burnrate * t\nmp[51:]=0\nv=numpy.zeros(1001)\nh=numpy.zeros(1001)\nh[0]=0\nv[0]=0\nfor i in range(0,999):\n v[i+1]=v[i]+dt*(-g+(burnrate[i]*ve-0.5*rho*numpy.square(v[i])*a*C_d)/(ms+mp[i]))\n h[i+1]=h[i]+v[i]*dt\n\n \n ","sub_path":"working/rocket2.py","file_name":"rocket2.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"632422309","text":"#coding=utf-8\n\nimport enum\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import (\n Migrate,\n MigrateCommand,\n)\nfrom sqlalchemy import (\n Column,\n Integer,\n TIMESTAMP,\n func,\n)\n\n\ndb = SQLAlchemy()\nmigrate = Migrate(db=db)\n\n\nclass TimestampModel:\n updated_at = Column(TIMESTAMP, onupdate=func.now())\n created_at = Column(TIMESTAMP, default=func.now())\n\n\nclass IdentityModel:\n id = Column(Integer, primary_key=True, autoincrement=True)\n\n\nclass EnumBase(enum.Enum):\n @classmethod\n def values(cls):\n return list(map(lambda c: c.value, cls))\n","sub_path":"app/extends/sqlalchemy.py","file_name":"sqlalchemy.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"73915779","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('restaurants', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Establishment',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('address', models.CharField(max_length=50)),\n ('city', models.ForeignKey(to='restaurants.City')),\n ('restaurant', models.ForeignKey(to='restaurants.Restaurant')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='stablishment',\n name='city',\n ),\n migrations.RemoveField(\n model_name='stablishment',\n name='restaurant',\n ),\n migrations.DeleteModel(\n name='Stablishment',\n ),\n ]\n","sub_path":"restaurant/apps/restaurants/migrations/0002_auto_20150615_0213.py","file_name":"0002_auto_20150615_0213.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"76669774","text":"from kivy.app import App\nfrom kivy.factory import Factory\nfrom kivy.properties import ObjectProperty\nfrom kivy.lang import Builder\n\nfrom electrum.gui.kivy.i18n import _\n\nBuilder.load_string('''\n\n id: popup\n title: _('Transaction Fees')\n size_hint: 0.8, 0.8\n pos_hint: {'top':0.9}\n method: 0\n BoxLayout:\n orientation: 'vertical'\n BoxLayout:\n orientation: 'horizontal'\n size_hint: 1, 0.5\n Label:\n text: _('Method') + ':'\n Button:\n text: _('Mempool') if root.method == 2 else _('ETA') if root.method == 1 else _('Static')\n background_color: (0,0,0,0)\n bold: True\n on_release:\n root.method = (root.method + 1) % 3\n root.update_slider()\n root.update_text()\n BoxLayout:\n orientation: 'horizontal'\n size_hint: 1, 0.5\n Label:\n text: (_('Target') if root.method > 0 else _('Fee')) + ':'\n Label:\n id: fee_target\n text: ''\n Slider:\n id: slider\n range: 0, 4\n step: 1\n on_value: root.on_slider(self.value)\n Widget:\n size_hint: 1, 0.5\n BoxLayout:\n orientation: 'horizontal'\n size_hint: 1, 0.5\n TopLabel:\n id: fee_estimate\n text: ''\n font_size: '14dp'\n Widget:\n size_hint: 1, 0.5\n BoxLayout:\n orientation: 'horizontal'\n size_hint: 1, 0.5\n Button:\n text: 'Cancel'\n size_hint: 0.5, None\n height: '48dp'\n on_release: popup.dismiss()\n Button:\n text: 'OK'\n size_hint: 0.5, None\n height: '48dp'\n on_release:\n root.on_ok()\n root.dismiss()\n''')\n\n\n\n\nclass FeeSliderDialog:\n\n def __init__(self, config, slider):\n self.config = config\n self.slider = slider\n self.read_config()\n self.update_slider()\n\n def get_method(self):\n dynfees = self.method > 0\n mempool = self.method == 2\n return dynfees, mempool\n\n def update_slider(self):\n dynfees, mempool = self.get_method()\n maxp, pos, fee_rate = self.config.get_fee_slider(dynfees, mempool)\n self.slider.range = (0, maxp)\n self.slider.step = 1\n self.slider.value = pos\n\n def read_config(self):\n mempool = self.config.use_mempool_fees()\n dynfees = self.config.is_dynfee()\n self.method = (2 if mempool else 1) if dynfees else 0\n\n def save_config(self):\n value = int(self.slider.value)\n dynfees, mempool = self.get_method()\n self.config.FEE_EST_DYNAMIC = dynfees\n self.config.FEE_EST_USE_MEMPOOL = mempool\n if dynfees:\n if mempool:\n self.config.FEE_EST_DYNAMIC_MEMPOOL_SLIDERPOS = value\n else:\n self.config.FEE_EST_DYNAMIC_ETA_SLIDERPOS = value\n else:\n self.config.FEE_EST_STATIC_FEERATE_FALLBACK = self.config.static_fee(value)\n\n def update_text(self):\n pass\n\n\nclass FeeDialog(FeeSliderDialog, Factory.Popup):\n\n def __init__(self, app, config, callback):\n Factory.Popup.__init__(self)\n FeeSliderDialog.__init__(self, config, self.ids.slider)\n self.app = app\n self.config = config\n self.callback = callback\n self.update_text()\n\n def on_ok(self):\n self.save_config()\n self.callback()\n\n def on_slider(self, value):\n self.update_text()\n\n def update_text(self):\n pos = int(self.ids.slider.value)\n dynfees, mempool = self.get_method()\n if self.method == 2:\n fee_rate = self.config.depth_to_fee(pos)\n target, estimate = self.config.get_fee_text(pos, dynfees, mempool, fee_rate)\n msg = 'In the current network conditions, in order to be positioned %s, a transaction will require a fee of %s.' % (target, estimate)\n elif self.method == 1:\n fee_rate = self.config.eta_to_fee(pos)\n target, estimate = self.config.get_fee_text(pos, dynfees, mempool, fee_rate)\n msg = 'In the last few days, transactions that confirmed %s usually paid a fee of at least %s.' % (target.lower(), estimate)\n else:\n fee_rate = self.config.static_fee(pos)\n target, estimate = self.config.get_fee_text(pos, dynfees, True, fee_rate)\n msg = 'In the current network conditions, a transaction paying %s would be positioned %s.' % (target, estimate)\n self.ids.fee_target.text = target\n self.ids.fee_estimate.text = msg\n","sub_path":"electrum/gui/kivy/uix/dialogs/fee_dialog.py","file_name":"fee_dialog.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"336868641","text":"# Program Menghapus Item\n\n# I.S. Menerima input ID barang yang akan dibuang (diperiksa apakah barang itu ada)\n# F.S. Item dibuang dari database / pesan error jika input ID tidak ada / tidak ada yang terjadi bila proses di cancel\n\n\n# KAMUS\n# type type gadget : \n\n# type dbG : array of gadget\n\n# type type consumable : \n\n# type dbC : array of consumable\n\n# databaseG : dbG \n# databaseC : dbC \n\n# ID : string\n\n\n# FUNGSI DAN PROSEDUR\n\ndef searchDatabase(ID, database):\n# Fungsi yang memeriksa apakah sebuah Id ada di dalam database\n\n# KAMUS LOKAL\n# found : boolean\n\n# ALGORITMA\n found = False # Variable yang menyimpan apakah ID ada di database\n\n for i in database:\n\n if i[0] == ID:\n found = True\n break\n\n return found\n\n\ndef getIndex(ID, database):\n# Fungsi mendapatkan index suatu item di database berdasarkan ID (asumsi ID memang ada di database)\n\n# KAMUS LOKAL\n\n# ALGORITMA\n for i in range(len(database)):\n if database[i][0] == ID:\n return i\n\n\ndef removeDatabase(ID, database):\n# Fungsi yang dijalankan program setelah validasi (asumsi ID memang ada di database)\n\n# KAMUS LOKAL\n# index : integer\n\n# confirmation : string\n# temp: (dbG/dbC)\n\n# ALGORITMA\n index = getIndex(ID, database) # Index dari item\n\n while True: # Proses konfirmasi penghapusan\n confirmation = input(\"Apakah anda yakin ingin menghapus \" + database[index][1] + \" (Y/N)? \")\n \n if (confirmation == \"N\"): # Jika user tidak jadi menghapus\n print(\"\\nPenghapusan dibatalkan.\")\n break\n\n elif (confirmation == \"Y\"): # Jika user jadi menghapus\n\n # Kode penghapusan dengan index \n temp = database[:index] \n for i in database[(index + 1):]:\n temp.append(i)\n database = temp\n\n print(\"\\nItem telah berhasil dihapus dari database\")\n break\n \n else: # Inpput tidak valid. Pesan error dan input diulang\n print(\"\\nInput tidak valid. Masukkan lagi!\")\n \n return database\n\n\n# PROGRAM UTAMA\ndef hapusItem(databaseG, databaseC):\n\n # Input ID\n ID = input(\"Masukan ID: \")\n \n # Pengecekan ID ada di database\n if not(searchDatabase(ID, databaseG)) and not(searchDatabase(ID, databaseC)): # Bila ID tidak ada di kedua database\n print(\"\\nTidak ada item dengan ID tersebut\")\n\n else:\n if ID[0] == \"G\": # Jika item gadget\n databaseG = removeDatabase(ID, databaseG)\n\n else: # Jika item consumable\n databaseC = removeDatabase(ID, databaseC)\n\n return (databaseG, databaseC) \n\n\n\n ","sub_path":"ModulFungsi/F06.py","file_name":"F06.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"202358697","text":"from ftw.testbrowser import browsing\nfrom ftw.testing import freeze\nfrom opengever.officeconnector.testing import FREEZE_DATE\nfrom opengever.officeconnector.testing import JWT_SIGNING_SECRET_PLONE\nfrom opengever.officeconnector.testing import OCSolrIntegrationTestCase\nimport jwt\n\n\nclass TestOfficeconnectorForwardingAPIWithAttach(OCSolrIntegrationTestCase):\n features = (\n 'officeconnector-attach',\n )\n\n @browsing\n def test_attach_to_email_open_without_file(self, browser):\n self.login(self.secretariat_user, browser)\n self.inbox_forwarding_document.file = None\n\n with browser.expect_http_error(404):\n oc_url = self.fetch_document_attach_oc_url(\n browser,\n self.inbox_forwarding_document,\n )\n\n self.assertIsNone(oc_url)\n\n @browsing\n def test_attach_to_email_open_with_file(self, browser):\n self.login(self.secretariat_user, browser)\n\n with freeze(FREEZE_DATE):\n oc_url = self.fetch_document_attach_oc_url(browser, self.inbox_forwarding_document)\n\n self.assertIsNotNone(oc_url)\n self.assertEquals(200, browser.status_code)\n\n expected_token = {\n u'action': u'attach',\n u'documents': [u'createinboxfa0000000000000000004'],\n u'exp': 4121033100,\n u'sub': u'jurgen.konig',\n u'url': u'http://nohost/plone/oc_attach',\n }\n raw_token = oc_url.split(':')[-1]\n token = jwt.decode(raw_token, JWT_SIGNING_SECRET_PLONE, algorithms=('HS256',))\n self.assertEqual(expected_token, token)\n\n expected_payloads = [{\n u'content-type': u'text/plain',\n u'csrf-token': u'86ecf9b4135514f8c94c61ce336a4b98b4aaed8a',\n u'document-url': u'http://nohost/plone/eingangskorb/eingangskorb_fa/forwarding-1/document-13',\n u'download': u'download',\n u'filename': u'Dokument im Eingangskoerbliweiterleitung.txt',\n u'title': u'Dokument im Eingangsk\\xf6rbliweiterleitung',\n u'uuid': u'createinboxfa0000000000000000004',\n u'version': None,\n }]\n payloads = self.fetch_document_attach_payloads(browser, raw_token, token)\n self.assertEquals(200, browser.status_code)\n self.assertEqual(payloads, expected_payloads)\n\n file_contents = self.download_document(browser, raw_token, payloads[0])\n self.assertEquals(file_contents, self.inbox_forwarding_document.file.data)\n","sub_path":"opengever/officeconnector/tests/test_api_forwarding_attach.py","file_name":"test_api_forwarding_attach.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"395144853","text":"from __future__ import print_function, division\n\nfrom matplotlib import pyplot as plt\nimport jigsaws\nimport numpy as np\nimport quaternion\n\n\ndt = 1 / 30\n\n\ndef jigplot(rec, dev):\n if dev == 'psm1':\n positions = rec.kinematics.psm1.position.values\n linear_velocities = rec.kinematics.psm1.trans_vel.values\n gripper_angles = rec.kinematics.psm1.gripper_angle.values\n rotation_matrices = rec.kinematics.psm1.rotation.values.reshape((-1, 3, 3))\n angular_velocities = rec.kinematics.psm1.rot_vel.values\n elif dev == 'psm2':\n positions = rec.kinematics.psm2.position.values\n linear_velocities = rec.kinematics.psm2.trans_vel.values\n gripper_angles = rec.kinematics.psm2.gripper_angle.values\n rotation_matrices = rec.kinematics.psm2.rotation.values.reshape((-1, 3, 3))\n angular_velocities = rec.kinematics.psm2.rot_vel.values\n else:\n raise ValueError\n\n quats = quaternion.from_rotation_matrix(rotation_matrices, nonorthogonal=False)\n\n n_samples = len(positions)\n ts = np.linspace(0, n_samples * dt, n_samples)\n\n t_events_start = [i * dt for i in rec.transcription.frame_start]\n t_events_end = [i * dt for i in rec.transcription.frame_end]\n event_labels = rec.transcription.gesture_id\n\n fig, ax = plt.subplots(6, sharex=True)\n fig.subplots_adjust(hspace=0)\n ax[-1].set_xlabel('time (s)')\n\n # Event timeline\n tmp = np.concatenate((t_events_start, t_events_end))\n ax[0].plot((tmp.min(), tmp.max()), (0, 0), 'k', alpha=.5)\n levels = np.array([-1, 1])\n\n for i, (t, label) in enumerate(zip(t_events_start, event_labels)):\n h = levels[i % len(levels)]\n vert = 'top' if h < 0 else 'bottom'\n ax[0].scatter(t, 0, s=30, facecolor='w', edgecolor='k', zorder=99)\n ax[0].plot((t, t), (0, h), 'r', alpha=.5)\n ax[0].text(t, h, label, horizontalalignment='center', verticalalignment=vert, fontsize=9, zorder=99)\n\n ax[0].set_ylim((levels.min()-2, levels.max()+2))\n plt.setp((ax[0].get_yticklabels() + ax[0].get_yticklines() + list(ax[0].spines.values())), visible=False)\n\n # Positions\n ax[1].plot(ts, positions[:,0], 'C0', label=r'$x$')\n ax[1].plot(ts, positions[:,1], 'C2', label=r'$y$')\n ax[1].plot(ts, positions[:,2], 'C1', label=r'$z$')\n ax[1].set_ylabel('position (m)')\n ax[1].legend(loc='upper right')\n\n # Velocities\n # linear_speed = np.linalg.norm(linear_velocities, axis=1)\n # ax[2].plot(ts, linear_speed, 'C0', label=r'$v$')\n # ax[2].set_ylabel('speed (m/s)')\n ax[2].plot(ts, linear_velocities[:,0], 'C0', label=r'$\\omega_{x}$')\n ax[2].plot(ts, linear_velocities[:,1], 'C2', label=r'$\\omega_{y}$')\n ax[2].plot(ts, linear_velocities[:,2], 'C1', label=r'$\\omega_{z}$')\n ax[2].set_ylabel('velocity (m/s)')\n ax[2].legend(loc='upper right')\n\n # Gripper angles\n ax[3].plot((0, ts[-1]), (0, 0), 'k', alpha=.5) # line at y=0\n ax[3].plot(ts, np.degrees(gripper_angles))\n ax[3].set_ylabel('gripper angle (deg)')\n\n # Quaternion\n ax[4].plot(ts, quaternion.as_float_array(quats))\n ax[4].set_ylabel('quaternion')\n ax[4].legend([r'$q_{}$'.format(i) for i in 'wxyz'], loc='upper right')\n\n # Angular velocities\n # angular_speed = np.linalg.norm(angular_velocities, axis=1)\n # ax[5].plot(ts, angular_speed, 'C0', label=r'$\\omega$')\n # ax[5].set_ylabel('ang. speed (m/s)')\n ax[5].plot(ts, angular_velocities[:,0], 'C0', label=r'$\\omega_{x}$')\n ax[5].plot(ts, angular_velocities[:,1], 'C2', label=r'$\\omega_{y}$')\n ax[5].plot(ts, angular_velocities[:,2], 'C1', label=r'$\\omega_{z}$')\n ax[5].set_ylabel('ang. vel. (m/s)')\n ax[5].legend(loc='upper right')\n\n # Mark events on each axis\n for axx in ax[1:]:\n yl = axx.get_ylim()\n\n for t, label in zip(t_events_start, event_labels):\n axx.plot((t, t), yl, 'r--', alpha=.5)\n\n fig.suptitle('{} ({})'.format(rec.meta.record_id[0], dev))\n\n return fig\n\n\nif __name__ == '__main__':\n loader = jigsaws.Loader('/home/kls/data/JIGSAWS')\n rec = loader.load_record('Suturing_D002')\n fig = jigplot(rec, 'psm2')\n plt.show()\n","sub_path":"python/samples/plot_jigsaws.py","file_name":"plot_jigsaws.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"621577781","text":"from flask import Flask, request, session, send_file, redirect, send_from_directory\nfrom twilio.twiml.messaging_response import MessagingResponse\nimport mysql.connector\nimport random\nimport pdfkit\n# table de user chama-se user_info\n# Por enqnt são 10 questões e só existe teste de matemática\n# telefone guardado por +55XXXXXXXX (testar com romero se dá problema)\ndb = mysql.connector.connect(\n host='database-chatbot.ctcyc2sm3y3o.us-west-2.rds.amazonaws.com',\n user='pedro',\n password='SENHA_DB',\n auth_plugin='mysql_native_password',\n database='chatbotuser',\n port=3306\n)\ncursor = db.cursor()\napplication = Flask(__name__, static_url_path='', static_folder='static')\napplication.secret_key = 'secretpassword'\n\n\n@application.route(\"/\")\ndef root():\n return application.send_static_file(\"out.pdf\")\n\n\n@application.route('/bot', methods=['POST'])\ndef bot():\n print(request.values)\n from_tel = request.values.get('From', '').replace(\"whatsapp:\", \"\")\n to_tel = request.values.get('To', '').replace('whatsapp:', \"\")\n if from_tel == '+14155238886':\n chatbot_tel = from_tel\n user_tel = to_tel\n else:\n chatbot_tel = to_tel\n user_tel = from_tel\n print(\"Numero do user:\" + user_tel)\n print(\"Esse é o numero do bot: \" + chatbot_tel)\n query = \"SELECT nome FROM user_info WHERE telefone = %s\"\n cursor.execute(query, (user_tel,))\n query_results = cursor.fetchall()\n if cursor.rowcount != 0:\n for nome in query_results:\n full_name = nome\n first_name = str(full_name).split()[0]\n session['nome'] = first_name\n session['user'] = 1\n incoming_msg = request.values.get('Body', '')\n if incoming_msg == 'RESET':\n session.clear()\n responded = True\n else:\n responded = False\n resp = MessagingResponse()\n msg = resp.message()\n try:\n if session['user'] == 1:\n responded = True\n if session['path'] == 'receive':\n if session['purpose'] == 'name':\n # REGISTRO NOME\n full_name = incoming_msg\n msg.body(\"Obrigado por escolher contar com nossa ajuda,\" + full_name + '''.\nNosso objetivo é conseguir te diagnosticar nas matérias para fornecer o material adequado à você! \nEssas são as opções de testes até então:\nDigite 1 se você quer um teste de matemática!(*por favor, digite somente o número*)''')\n query_name = \"INSERT INTO user_info(telefone, nome) values(%s,%s)\"\n values_name = [user_tel, full_name]\n cursor.execute(query_name, values_name)\n db.commit()\n session['path'] = 1\n session['purpose'] = 'Nada'\n elif session['purpose'] == 'answers':\n # ENVIO QUESTÕES\n respostas = incoming_msg\n respostas = respostas.upper()\n # Aqui vem análise de respostas\n # Calculos doidos\n msg.body('''Em algum momento teremos uma nota pra você, e uma lista de assuntos em ordem crescente de % de acerto\n1 - Geometria - %\n2 - Algebra Básica - %\n3 - Mat. Financeria - %\n4 - Alglin - %\n5 - Grupos e Aneís - %\nSua proficiência em Matemática vale:\nDigite o número que deseja focar essa semana.''')\n session['path'] = 2\n session['purpose'] = 'Nada'\n elif session['path'] == 1:\n if incoming_msg == '1':\n # TESTE MATEMÁTICA\n query_simulado_faceis = \"SELECT nome_drive FROM math_questions WHERE dificuldade<5\"\n query_simulado_medias = \"SELECT nome_drive FROM math_questions WHERE dificuldade<5\"\n query_simulado_dificeis = \"SELECT nome_drive FROM math_questions WHERE dificuldade<5\"\n escolha_facil = random.sample(range(1, len(query_simulado_faceis)), 5)\n nome_faceis = []\n escolha_media = random.sample(range(1, len(query_simulado_medias)), 3)\n nome_medias = []\n escolha_dificil = random.sample(range(1, len(query_simulado_dificeis)), 2)\n nome_dificeis = []\n cursor.execute(query_simulado_faceis)\n questoes_simu = cursor.fetchall()\n print(cursor.rowcount)\n i = 1\n for nome_drive in questoes_simu:\n if i in escolha_facil:\n nome_faceis.append(nome_drive)\n i = i + 1\n cursor.execute(query_simulado_medias)\n questoes_simu = cursor.fetchall()\n i = 1\n for nome_drive in questoes_simu:\n if i in escolha_media:\n nome_medias.append(nome_drive)\n i = i + 1\n cursor.execute(query_simulado_dificeis)\n questoes_simu = cursor.fetchall()\n i = 1\n for nome_drive in questoes_simu:\n if i in escolha_dificil:\n nome_dificeis.append(nome_drive)\n i = i + 1\n # AQUI ENTRA XANDE CRIANDO HTML\n # retornou um arquivo html no meu diretorio.\n file_path = 'file.html'\n pdfkit.from_file(input='/home/pedro/PycharmProjects/hackathon/file.html',\n output_path='static/out.pdf')\n msg.media('/')\n session['path'] = 'receive'\n session['purpose'] = 'answers'\n # FINAL ENVIO SIMULADO\n elif session['path'] == 2:\n if incoming_msg is not None:\n msg.body('''Aqui terá uma recomendação massa de vídeos\nApós estudo do material, venha novamente testar seus conhecimentos!''')\n session.clear()\n except KeyError:\n if not responded:\n msg.body('''Seja bem-vindo ao seu ajudante virtual de estudo!\nNosso objetivo é criar uma curadoria personalizada com base no seu conhecimento em cada matéria, facilitando\nseu estudo à distância de forma inteligente, buscando sempre auxiliar na construção gradativa de conteúdo para que você não se sinta nem pouco desafiado nem desestimulado!\nFazemos isso com um teste diagnóstico de 10 questões de uma matérias específica. Com base nos seus acertos, vamos começar a predizer seu nível de conhecimento por meio da TRI!\nSim, o mesmo método usado pelo *ENEM*. Dessa forma conseguirmos medir sua proficiência em certa matéria, garantindo que você tenha conteúdo de acordo com seu nível!\nPercebemos que você *não possui cadastro conosco*, por favor, nos diga na sua próxima mensagem qual seu nome.''')\n session['user'] = 1\n session['path'] = 'receive'\n session['purpose'] = 'name'\n else:\n msg.body('''Tivemos um problema em nossa base de dados, contudo, ainda temos seu cadastro!\n\nEssas são as opções de testes até então:\nDigite 1 se você quer um teste de matemática!(*por favor, digite somente o número*)''')\n session['user'] = 1\n session['path'] = 1\n return str(resp)\n\n\nif __name__ == '__main__':\n application.run()\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":7495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"373599744","text":"from django.urls import path\nfrom myapp.views import hello, goodbye\n\nurlpatterns = [\n # myapp/\n path('', hello, name='hello'),\n # myapp/\n path('', hello, name='hello_name'),\n # myapp/goodbye/\n path('goodbye/', goodbye, name='goodbye_name')\n]","sub_path":"gregg/todoproject/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"190423016","text":"EMP_FILE = \"ex34_employees\"\n\"\"\"\nemployees = [\"John Smith\",\n \"Jackie Jackson\",\n \"Chris Jones\",\n \"Amanda Cullen\",\n \"Jeremy Goodwin\"\n ]\n\"\"\"\nf = open(EMP_FILE,encoding='utf-8',mode=\"r\")\nemployees = f.readlines()\nf.close()\nemployees = [x.strip(' \"\\n,') for x in employees]\nprint(\"There are {} employees:\".format(len(employees)))\nprint(*employees,sep=\"\\n\")\nprint(\"Enter an employee name to remove: \",end=\" \")\nremove_target=\"\"\ntry:\n remove_target = input()\n if remove_target not in employees:\n raise BaseException(\"There's no employee named \" + remove_target)\nexcept BaseException:\n print(Exception)\n\nemployees.remove(remove_target)\nprint(*employees,sep=\"\\n\")\nf = open(EMP_FILE,'w',encoding=\"utf-8\")\nfor name in employees:\n f.write(name+\"\\n\")\nf.close()\n","sub_path":"ch7/ex34.py","file_name":"ex34.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"74360006","text":"\"\"\"006_alter_hhold_quota_address\n\nRevision ID: 2c783fa6d485\nRevises: 59794070f759\nCreate Date: 2019-09-09 13:15:09.315587\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2c783fa6d485'\ndown_revision = '59794070f759'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.drop_column('sample', 'quota')\n op.drop_column('sample', 'addressno')\n op.add_column('case', sa.Column('quota', sa.INTEGER()))\n op.add_column('case', sa.Column('address', sa.INTEGER()))\n op.alter_column('case', 'household', existing_type=sa.INTEGER(), new_column_name='hhold')\n\n\ndef downgrade():\n op.alter_column('case', 'hhold', existing_type=sa.INTEGER(), new_column_name='household')\n op.drop_column('case', 'quota')\n op.drop_column('case', 'address')\n op.add_column('sample', sa.Column('quota', sa.VARCHAR(length=10)))\n op.add_column('sample', sa.Column('addressno', sa.VARCHAR(length=10)))\n","sub_path":"alembic/versions/2c783fa6d485_006_alter_hhold_quota_address.py","file_name":"2c783fa6d485_006_alter_hhold_quota_address.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"108599426","text":"from django.shortcuts import render, Http404\nfrom django.http import JsonResponse\nfrom ..tasks import *\nfrom django.db.models import Q, Avg, Max, Min, Sum\nfrom django.views.decorators.cache import cache_page\nfrom django.conf import settings\n\n\n@cache_page(settings.VIEW_DONATIONS_CACHE)\ndef v_donations(request):\n orderByVar = request.GET.get('orderBy', 'id')\n filterByVar = request.GET.get('filterBy', 'none')\n recordCountVar = request.GET.get('recordCount', '0')\n recordCountInt = int(recordCountVar)\n update_donations_if_needed.delay()\n listedDonos = DonationModel.objects.order_by(orderByVar)\n if recordCountInt > 0 and recordCountInt <= settings.MAX_API_ROWS:\n listedDonos = listedDonos[:recordCountInt]\n else:\n listedDonos = listedDonos[:settings.MAX_API_ROWS]\n if filterByVar != 'none':\n listedDonos = listedDonos.filter(participant_id=filterByVar)\n return JsonResponse(\n [d for d in listedDonos.values()],\n safe=False,\n )\n\n\n@cache_page(settings.VIEW_DONATIONS_CACHE)\ndef v_tracked_donations(request):\n orderByVar = request.GET.get('orderBy', 'id')\n update_donations_if_needed.delay()\n return JsonResponse(\n [d for d in\n DonationModel.objects.filter(DonationModel.tracked_q()).order_by(orderByVar)[:settings.MAX_API_ROWS].values()],\n safe=False,\n )\n","sub_path":"ffdonations/views/donations.py","file_name":"donations.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"314671656","text":"import pandas as pd\nimport numpy as np\nfrom pypfopt.efficient_frontier import EfficientFrontier\nfrom pypfopt import risk_models\nfrom pypfopt import expected_returns\nfrom pypfopt import discrete_allocation\n\n# Reading in the data; preparing expected returns and a risk model\ndf = pd.read_csv(\"tests/stock_prices.csv\", parse_dates=True, index_col=\"date\")\nassets_classes = {\n \"GOOG\":\"Tech\",\n \"AAPL\":\"Tech\",\n \"FB\":\"Tech\",\n \"BABA\":\"Tech\",\n \"AMZN\":\"Tech\",\n \"GE\":\"Consumer\",\n \"AMD\":\"Tech\",\n \"WMT\":\"Consumer\",\n \"BAC\":\"Financial\",\n \"GM\":\"Consumer\",\n \"T\":\"Consumer\",\n \"UAA\":\"Consumer\",\n \"SHLD\":\"Consumer\",\n \"XOM\":\"Energy\",\n \"RRC\":\"Energy\",\n \"BBY\":\"Consumer\",\n \"MA\":\"Financial\",\n \"PFE\":\"Healthcare\",\n \"JPM\":\"Financial\",\n \"SBUX\":\"Consumer\"\n}\nassets_allocation = {\n \"Tech\": (.10, .30),\n \"Consumer\": (.10, .50),\n \"Financial\": (.10, .20),\n \"Energy\": (0.05, .10),\n \"Healthcare\": (0.05, .10)\n}\nassert(all([tick in df.columns for tick in assets_classes]))\n\nreturns = df.pct_change().dropna(how=\"all\")\nmu = expected_returns.mean_historical_return(df)\nS = risk_models.sample_cov(df)\nef = EfficientFrontier(mu, S, asset_classes=assets_classes, asset_allocation=assets_allocation)\nweights = ef.max_sharpe()\nef.portfolio_performance(verbose=True)\nprint(weights)\n\n\ndf = df[['AAPL','BAC','GE']]\nassets_classes = {\n \"AAPL\":\"Tech\",\n \"GE\":\"Consumer\",\n \"BAC\":\"Financial\",\n}\nassets_allocation = {\n \"Tech\": (.30, .30),\n \"Consumer\": (.50, .50),\n \"Financial\": (.20, .20),\n}\n\nreturns = df.pct_change().dropna(how=\"all\")\nmu = expected_returns.mean_historical_return(df)\nS = risk_models.sample_cov(df)\nef = EfficientFrontier(mu, S, asset_classes=assets_classes, asset_allocation=assets_allocation)\nweights = ef.max_sharpe()\nef.portfolio_performance(verbose=True)\nprint(weights)\n","sub_path":"example_ext.py","file_name":"example_ext.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"510143185","text":"import airflow_utils\n\nfrom airflow.models import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.docker_operator import DockerOperator\n\n\ndag = airflow_utils.generate_dag(id='speculative_options')\nkwargs = airflow_utils.get_dag_kwargs(dag=dag)\ndbt_kwargs = airflow_utils.get_dag_kwargs(dag=dag, type='dbt')\nprediction_kwargs = airflow_utils.get_dag_kwargs(dag=dag, type='prediction')\n\nstart_time = BashOperator(\n task_id='start_pipeline',\n bash_command='date',\n dag=dag,\n)\n\ndbt_train = DockerOperator(\n task_id='update_dbt_training_tables',\n command='dbt run -m speculation --profiles-dir .',\n **dbt_kwargs,\n)\n\nhigh_price_predictor = DockerOperator(\n task_id='high_price_predictor',\n command='python -m science.executor s3 --archive-files',\n **prediction_kwargs,\n)\n\nlow_price_predictor = DockerOperator(\n task_id='low_price_predictor',\n command='python -m science.executor s4 --archive-files',\n **prediction_kwargs,\n)\n\nload_stock_predictions = DockerOperator(\n task_id='stock_prediction_loader',\n command='python data/science/predictions/loader.py',\n **kwargs,\n)\n\ndbt_trade = DockerOperator(\n task_id='update_dbt_trading_tables',\n command='dbt run -m speculative_options --profiles-dir .',\n **dbt_kwargs,\n)\n\nreport_speculative_options = DockerOperator(\n task_id='speculative_options_report',\n command='python data/science/reports/speculative_options.py',\n **kwargs,\n)\n\nend_time = BashOperator(\n task_id='end_pipeline',\n bash_command='date',\n dag=dag,\n)\n\ndbt_train.set_upstream(start_time)\nhigh_price_predictor.set_upstream(dbt_train)\nlow_price_predictor.set_upstream(high_price_predictor)\nload_stock_predictions.set_upstream(low_price_predictor)\ndbt_trade.set_upstream(load_stock_predictions)\nreport_speculative_options.set_upstream(dbt_trade)\nend_time.set_upstream(report_speculative_options)\n","sub_path":"dags/speculative_options.py","file_name":"speculative_options.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"456031639","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'n_kovganko@wargaming.net'\n\nimport win32event\nimport pywintypes\nfrom threading import Thread\nfrom time import time, sleep\nfrom os import path\n\nimport win32file\nimport win32con\n\n\nACTIONS = {\n 1: \"created\",\n 2: \"deleted\",\n 3: \"modified\",\n 4: \"renamed from\",\n 5: \"renamed to\"\n}\n\nFILE_LIST_DIRECTORY = 0x0001\nFILE_NOTIFY_CHANGE = win32con.FILE_NOTIFY_CHANGE_FILE_NAME | \\\n win32con.FILE_NOTIFY_CHANGE_DIR_NAME | \\\n win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES | \\\n win32con.FILE_NOTIFY_CHANGE_SIZE | \\\n win32con.FILE_NOTIFY_CHANGE_LAST_WRITE | \\\n win32con.FILE_NOTIFY_CHANGE_SECURITY\n\n\nclass Event(object):\n def __init__(self, event_type, event_path):\n \"\"\"\n Initialize Event object.\n\n :param str event_type: system event type e.g. \"modified\", \"created\".\n :param str event_path: source path.\n \"\"\"\n self.event_type = event_type\n self.event_path = event_path\n self.event_timestamp = time()\n\n def __str__(self):\n return '%s: %s at %s' % (self.event_type,\n self.event_path, self.event_timestamp)\n\n def __repr__(self):\n return '%s: %s at %s' % (self.event_type,\n self.event_path, self.event_timestamp)\n\n def __cmp__(self, other):\n if self.event_path == other.event_path and \\\n round(self.event_timestamp, 1) == round(\n other.event_timestamp, 1) and \\\n self.event_type == other.event_type:\n return 0\n elif round(self.event_timestamp, 1) > round(other.event_timestamp, 1):\n return 1\n else:\n return -1\n\n def __eq__(self, other):\n if self.event_path == other.event_path and \\\n round(self.event_timestamp, 1) == round(\n other.event_timestamp, 1) and \\\n self.event_type == other.event_type:\n return True\n else:\n return False\n\n def __hash__(self):\n return hash(self.event_path) ^ hash(self.event_type) ^ \\\n hash(round(self.event_timestamp, 1))\n\n\nclass EventHelper(object):\n _stop_handler = False\n _thread = None\n events = []\n\n @classmethod\n def _dispatch(cls, path_to_watch=None, flags=None):\n handle_dir = win32file.CreateFile(\n path_to_watch,\n FILE_LIST_DIRECTORY,\n win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE |\n win32con.FILE_SHARE_DELETE,\n None,\n win32con.OPEN_EXISTING,\n win32con.FILE_FLAG_BACKUP_SEMANTICS | win32con.FILE_FLAG_OVERLAPPED,\n None\n )\n overlapped = pywintypes.OVERLAPPED()\n overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)\n _buffer = win32file.AllocateReadBuffer(8192)\n while not cls._stop_handler:\n win32file.ReadDirectoryChangesW(\n handle_dir, _buffer, True, flags, overlapped)\n rc = win32event.WaitForSingleObject(overlapped.hEvent, 1000)\n if rc == win32event.WAIT_OBJECT_0:\n n_bytes = win32file.GetOverlappedResult(\n handle_dir, overlapped, True)\n if n_bytes:\n results = win32file.FILE_NOTIFY_INFORMATION(\n _buffer, n_bytes)\n for action, file_ in results:\n full_filename = path.join(path_to_watch, file_)\n cls.events.append(\n Event(ACTIONS.get(action, \"unknown\"),\n full_filename))\n\n @classmethod\n def enable(cls, path_to_watch=None, flags=None):\n flags = flags or FILE_NOTIFY_CHANGE\n if not path.exists(path_to_watch):\n raise WGCException('You can not subscribe to folder \"%s\" '\n 'because it does not exists.' % path_to_watch)\n cls._stop_handler = False\n cls._thread = Thread(\n target=cls._dispatch, args=(path_to_watch, flags, ))\n cls._thread.daemon = True\n cls._thread.start()\n\n @classmethod\n def disable(cls):\n cls._stop_handler = True\n\n @classmethod\n def clear_events(cls):\n \"\"\"\n Clears events.\n \"\"\"\n cls.events[:] = []\n\n @classmethod\n def log_to_file(cls, file_dir):\n \"\"\"\n Log all events to file.\n \"\"\"\n events = list(set(cls.events))\n with open(path.join(file_dir, 'events.log'), 'w') as event_file:\n for event in events:\n event_file.write(str(event) + '\\r\\n')\n\n\nif __name__ == '__main__':\n\t# Set test folder on next line\n path_ = 'g:\\\\Games\\\\World_of_Tanks_launcherstest\\\\'\n EventHelper.enable(path_)\n sleep(10)\n EventHelper.log_to_file(path_)\n","sub_path":"event_helper.py","file_name":"event_helper.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"34383178","text":"#!/usr/bin/env python\n# !/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2022/2/4 4:53 下午\n# @Author : jhyfugug\n# @File : for2.py\n\n\"\"\"\n用for循环实现1~100之间的偶数求和\n\"\"\"\n\nSum = 0\n\nfor x in range(2, 101, 2):\n print(x)\n if x % 2 == 0:\n Sum += x\n\nprint(Sum)\n","sub_path":"Day01-15/Day04_01/for2.py","file_name":"for2.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"392700196","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport sys\nimport os\nfrom utils import Utilities\nimport re\nimport matplotlib\nimport csv\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nfrom scipy.interpolate import spline\nimport json\n\nreload(sys) \nsys.setdefaultencoding('utf-8')\n\ndef get_contributor(repo_name):\n url = 'https://api.github.com/repos/'+repo_name+'/stats/contributors'\n r = requests.get(url)\n author_dict = {}\n for person in r.json():\n author_dict[person[\"author\"][\"id\"]] = person[\"author\"][\"login\"]\n return author_dict \n\nclass Analyzer(object):\n def __init__(self):\n self.display = True\n # Load the positive and negative words\n self.words = {}\n self.negs = [\"isn't\",\"not\",\"aren't\",\"don't\",\"doesn't\",\"didn't\",\"can't\",\"hardly\",\"no\"]\n with open(\"words/positive.txt\") as file:\n for line in file:\n self.words[line.rstrip()] = 1\n with open(\"words/negative.txt\") as file:\n for line in file:\n self.words[line.rstrip()] = -1\n\n def analyze(self, message):\n score = 0\n found = 0\n disp = \"\"\n\n i = 0\n # try:\n parts = Utilities.split(message)\n\n # except AttributeError as e:\n # print message #None\n neg = False\n j = 3\n for w in parts:\n j += 1\n if w in self.negs:\n j = 0\n neg = True\n if w in self.words:\n if j > 2 or neg == False:\n score += self.words[w]\n found += 1\n if self.display:\n i = message.lower().find(w, i)\n # if self.words[w] == -1:\n # neg[message[i:i+len(w)]] = neg.get(message[i:i+len(w)], 0) + 1\n # if self.words[w] == 1:\n # pos[message[i:i+len(w)]] = pos.get(message[i:i+len(w)], 0) + 1\n d = Utilities.get_colored_text(self.words[w], message[i:i+len(w)])\n message = message[:i] + d + message[i+len(w):]\n i = i + len(d)\n disp += d + \" \"\n else:\n # print w\n neg = False\n score += -self.words[w]\n found += 1\n if self.display:\n i = message.lower().find(w, i)\n # if self.words[w] == -1:\n # neg[message[i:i+len(w)]] = neg.get(message[i:i+len(w)], 0) + 1\n # if self.words[w] == 1:\n # pos[message[i:i+len(w)]] = pos.get(message[i:i+len(w)], 0) + 1\n d = Utilities.get_colored_text(-self.words[w], message[i:i+len(w)])\n message = message[:i] + d + message[i+len(w):]\n i = i + len(d)\n disp += d + \" \" \n\n label = score / float(found) if found != 0 else 0.0\n return (label, disp, message)\n\n def output(self, message, label, disp, time):\n text = \"\"\n\n text = \"\\t{}| {}\".format(disp, message)\n message = \"

%s

\"%(text)\n\n print(\"{:.2f}{}\\t{}\".format(label, text, time))\n\ndef read_json(file):\n for jsonObject in file:\n try:\n # Allow control characters which are sometimes in the strings.\n data = json.loads(jsonObject, strict=False)\n except ValueError as e:\n raise(ValueError(\"Incorrect JSON string: '{}' with error '{}'\".format(jsonObject, e)))\n\n if \"body\" in data:\n if data['body'] == None:\n data[\"body\"] = data[\"body\"].replace('\\r\\n', '\\n')\n else: continue;\n\n fields = {}\n fields['body'] = data['body']\n if 'commit_id' in data.keys():\n fields['commit_id'] = data['commit_id'] \n if 'issue_id' in data.keys():\n fields['issue_id'] = data['issue_id']\n fields['time'] = data['created_at']\n yield fields\n\ndef get_name(repo_id):\n with open('repos_index2017.json','r') as load_f:\n data = json.load(load_f)\n for k,v in data.items():\n if v == int(repo_id):return k;\n return '#'\n\ndef analyze_an_actor(actor_history):\n text = \"\"\n tmp = \"\"\n positive = {}\n negative ={}\n neutral = {}\n for data in actor_history:\n analyzer = Analyzer()\n (label, disp, message) = analyzer.analyze(data['body'])\n if label > 0:\n positive[message] = label\n if label < 0:\n negative[message] = label \n if label == 0.0:\n neutral[message] = label\n\n pos = positive.values()\n neg = negative.values()\n if len(neg)!=0:\n text += \"

%s

\"%str(float(len(pos))/len(neg))\n text += \"

postive (%d): %s\"%(len(pos), str((round(np.mean(pos)if pos!=[]else 0.0,2))))\n text += \"\\t\\tnegative (%d): %s

\"%(len(neg), str((round(np.mean(neg)if neg!=[]else 0.0,2))))\n for m in positive.keys():\n text += \"

%s

\"%(m)\n for m in negative.keys():\n text += \"

%s

\"%(m)\n for m in neutral.keys():\n text += \"

%s

\"%(m)\n\n return text\n\ndef get_issue_time(open_time, close_time):\n opendate = re.findall(r'(.+?)T',open_time)[0]\n closedate = re.findall(r'(.+?)T',close_time)[0]\n oy = re.findall(r'^(.+?)-',opendate)[0]\n cy = re.findall(r'^(.+?)-',closedate)[0]\n if oy != cy:\n return False\n\n if opendate == closedate:\n oh = int(re.findall(r'T(.+?):',open_time)[0])\n ch = int(re.findall(r'T(.+?):',close_time)[0])\n if oh == ch:\n return False\n else:\n om = int(re.findall(r':(.+?):',open_time)[0])\n cm = int(re.findall(r':(.+?):',close_time)[0])\n return str((ch-oh-1)*60+60-om+cm)+'m'\n else:\n open_month = int(re.findall(r'-(.+?)-',opendate)[0])\n open_date = int(re.findall(r'-(.+?)$',opendate)[0][-2:])\n\n close_month = int(re.findall(r'-(.+?)-',closedate)[0])\n close_date = int(re.findall(r'-(.+?)$',closedate)[0][-2:])\n\n oh = int(re.findall(r'T(.+?):',open_time)[0])\n ch = int(re.findall(r'T(.+?):',close_time)[0]) \n om = int(re.findall(r':(.+?):',open_time)[0])\n cm = int(re.findall(r':(.+?):',close_time)[0])\n if close_month == open_month:\n return str(close_date-open_date-1)+'d'+str(24-oh+ch)+'h'\n else:\n return str((close_month-open_month-1)*30+31-open_date+close_date-1)+'d'+str(24-oh+ch)+'h'\n\ndef GEN_html(repo_name, repo_id):\n path = 'DataPro/'+str(repo_id)+'/'\n output = \"plots/issues/\"+str(repo_id)+'/'\n if not os.path.exists(output):\n os.makedirs(output)\n print ('Processing '+ repo_name)\n filename = re.findall(r'/(.+?)$',repo_name)[0]+'_issues.json'\n f = open(filename, 'r')\n i = 1\n for jsonObject in f:\n if i%100==0: print (\"%d/%d\")%(i,1600)\n i+=1\n data = json.loads(jsonObject, strict = False)\n if data['closed_at']==None: continue;\n if data['created_at'][:4] != '2017' and data['created_at'][:4]!='2018':continue;\n\n open_time = data['created_at']\n close_time = data['closed_at']\n last_time = get_issue_time(open_time,close_time)\n if last_time == False:\n continue\n issue = data['id']\n title = data['title']\n\n tmp = \"\"\n num = 0\n times = []\n average = []\n for file in os.listdir(path):\n if file[-5:]!='.json':continue;\n f = open(path+file,'r')\n for jsonObject in f:\n data = json.loads(jsonObject, strict = False)\n if 'issue_id' not in data.keys():continue;\n if data['issue_id'] == issue:\n num += 1\n analyzer = Analyzer()\n (label, disp, message) = analyzer.analyze(data['body'])\n # time = re.findall(r'\\d-(.+?)T',open_time)[0]\n time = data['created_at']\n year = re.findall(r'^(.+?)-',time)[0][-2:]\n month = re.findall(r'-(.+?)-',time)[0]\n day = re.findall(r'-.+?-(.+?)T',time)[0]\n hour = re.findall(r'T(.+?):',time)[0]\n\n if hour <= 12:\n hour = str(int(hour))+'a.m'\n else:\n hour = str(int(hour))+'p.m'\n\n time = year + '/' + month + '/' + day + ' ' + hour\n times.append(time)\n average.append(round(label,2))\n\n \n if num < 10: continue; #the total comment number is fewer than 10\n \n X=range(len(times))\n x =np.arange(0,len(times),1)\n xnew = np.linspace(x.min(),x.max(),300) #300 represents number of points to make between T.min and T.max\n Y_smooth = spline(X,average,xnew)\n\n plt.figure(figsize=(20,8))\n plt.margins(0.08)\n plt.title(\"ID: %d(%s) %s\"%(issue,title,last_time),fontweight = 'semibold')\n plt.plot(xnew,Y_smooth,label='Rate')\n plt.scatter(X,average)\n for a,b in zip(X,average):\n plt.text(a,b,'%.2f'%b,fontsize=10)\n\n plt.ylim(-2.0,2.0)\n\n plt.axhline(0, color='black')\n plt.xticks(X,times,rotation=25)\n plt.grid()\n plt.legend(loc=\"upper right\")\n\n plt.savefig(output+str(i)+'.png')\n plt.draw() \n plt.pause(2)\n plt.close() \n\n\ndef main():\n # repo_id = 11730342\n # repo_id = 10270250\n # repo_id = 11159552\n # repo_id = 41881900\n repo_id = 460078\n repo_name = get_name(repo_id)\n\n GEN_html(repo_name,repo_id) \n # GEN_html(repo_name,repo_id,'2018-03',True,up = True) \n # GEN_html(repo_name,repo_id,'2017-12',True,up = True) \n # GEN_html(repo_name,repo_id,'2018-05',True,up = True) \n\nmain()","sub_path":"Analyze_issue.py","file_name":"Analyze_issue.py","file_ext":"py","file_size_in_byte":10053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"609168705","text":"\"\"\" Manage the event api's message make \"\"\"\n# -*-coding: utf-8 -*-\nimport time\nimport pythoncom\nimport datetime\nimport math\nimport win32com.client as winAPI\nimport config.server_config as server_config\nimport config.login_config as login_config\nfrom config.log_config import date_file_logger\nfrom metaclass.metaclass import Singleton\n\nlogger_api_event = date_file_logger(\"API_event\")\n\nSTAND_BY = 0\nRECEIVED = 1\n\n\nclass XASessionEvents:\n login_state = STAND_BY\n\n def OnLogin(self, code, msg):\n XASessionEvents.login_state = RECEIVED\n logger_api_event.info(msg)\n\n def OnDisconnect(self, code, msg):\n pass\n\n\nclass XAQueryEvents:\n query_state = STAND_BY\n\n def OnReceiveData(self, code):\n XAQueryEvents.query_state = RECEIVED\n\n def OnReceiveMessage(self, error, nMessageCode, szMessage):\n logger_api_event.info(szMessage)\n\n\nSERVER_PORT = 20001\nSHOW_CERTIFICATE_ERROR_DIALOG = False\nREPEATED_DATA_QUERY = 1\nTRANSACTION_REQUEST_EXCESS = -21\nTODAY = datetime.datetime.now().strftime('%Y%m%d')\n\n\nclass TRQuery(object, metaclass=Singleton):\n \"\"\"\n The third argument of SetDataFIeld means the starting index of data of repeated query ( API said this is OCCUR)\n Setting this argument 0 will receive the whole repeated data starting from index 0\n \"\"\"\n def __init__(self):\n self._load_config()\n self._load_login_info()\n self._api_login()\n print(self)\n\n def _load_config(self):\n server = server_config.HTS\n self._server_type = server['server_type']\n self._server_addr = server['server_addr']\n\n def _load_login_info(self):\n login = login_config.KYKK0010\n self._id = login['id']\n self._password = login['password']\n self._certificate_password = login['certificate_password']\n self._account_password = login['account_password']\n\n def _request(self, xa_query, sequence_request=False):\n while True:\n ret = xa_query.Request(sequence_request)\n \"\"\" Receiving error message, keep requesting until accepted \"\"\"\n if ret is TRANSACTION_REQUEST_EXCESS: # -34\n time.sleep(0.8)\n else:\n break\n \"\"\" Wait window's event message \"\"\"\n while XAQueryEvents.query_state is STAND_BY:\n pythoncom.PumpWaitingMessages()\n XAQueryEvents.query_state = STAND_BY\n\n def _api_login(self):\n pythoncom.CoInitialize()\n xa_session = winAPI.DispatchWithEvents(\"XA_Session.XASession\", XASessionEvents)\n if xa_session.IsConnected() is True:\n xa_session.DisconnectServer()\n\n xa_session.ConnectServer(self._server_addr, SERVER_PORT)\n xa_session.Login(self._id, self._password, self._certificate_password, SERVER_PORT,\n SHOW_CERTIFICATE_ERROR_DIALOG)\n \"\"\" Wait window's event message \"\"\"\n while XASessionEvents.login_state is STAND_BY:\n pythoncom.PumpWaitingMessages()\n XASessionEvents.login_state = STAND_BY\n\n return xa_session.IsConnected()\n\n def query(self, TR, *request_arg):\n xa_query = winAPI.DispatchWithEvents(\"XA_DataSet.XAQuery\", XAQueryEvents)\n xa_query.ResFileName = \"C:\\\\eBEST\\\\xingAPI\\\\Res\\\\\" + TR + \".res\"\n\n if TR is \"t0424\":\n \"\"\"\n Get the information of accounts \n 0 -> my account number\n \"\"\"\n xa_query.SetFieldData(\"t0424InBlock\", \"accno\", 0, request_arg[0])\n if self._server_type is 0:\n xa_query.SetFieldData(\"t0424InBlock\", \"passwd\", 0, \"0000\")\n else:\n xa_query.SetFieldData(\"t0424InBlock\", \"passwd\", 0, self._account_pwd)\n xa_query.SetFieldData(\"t0424InBlock\", \"prcgb\", 0, \"1\")\n xa_query.SetFieldData(\"t0424InBlock\", \"chegb\", 0, \"0\")\n xa_query.SetFieldData(\"t0424InBlock\", \"dangb\", 0, \"0\")\n xa_query.SetFieldData(\"t0424InBlock\", \"charge\", 0, \"1\")\n xa_query.SetFieldData(\"t0424InBlock\", \"cts_expcode\", 0, \"\")\n self._request(xa_query)\n\n deposits = []\n for idx in range(xa_query.GetBlockCount(\"t0424OutBlock\")):\n deposits.append(xa_query.GetFieldData('t0424OutBlock', 'sunamt', idx))\n return deposits\n\n elif TR is \"t1511\":\n \"\"\"\n Get today's index data\n 0 -> stock_code\n \"\"\"\n xa_query.SetFieldData(\"t1511InBlock\", \"upcode\", 0, request_arg[0])\n self._request(xa_query)\n stock_name = \"종합(KOSPI)\" if request_arg[0] is \"001\" else \"코스닥(KOSDAQ)\"\n return [(stock_name,\n xa_query.GetFieldData('t1511OutBlock', 'pricejisu', 0),\n xa_query.GetFieldData('t1511OutBlock', 'openjisu', 0),\n xa_query.GetFieldData('t1511OutBlock', 'value', 0),\n xa_query.GetFieldData('t1511OutBlock', 'volume', 0),\n xa_query.GetFieldData('t1511OutBlock', 'highjisu', 0),\n xa_query.GetFieldData('t1511OutBlock', 'lowjisu', 0),\n TODAY)]\n\n elif TR is \"t2101\":\n \"\"\" Get today's option data \"\"\"\n option_codes = self.query(\"t9944\")\n\n option_data = []\n for option_name, option_code in option_codes:\n xa_query.SetFieldData(\"t2101InBlock\", \"focode\", 0, option_code)\n self._request(xa_query)\n for idx in range(xa_query.GetBlockCount(\"t2101OutBlock\")):\n option_data.append((\"코스피200 \" + option_name,\n xa_query.GetFieldData(\"t2101OutBlock\", \"price\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"diff\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"volume\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"value\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"open\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"high\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"low\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"basis\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"recprice\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"theoryprice\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"glyl\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"lastmonth\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"jandatecnt\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"pricejisu\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"kospijisu\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"delt\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"gama\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"ceta\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"vega\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"rhox\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"histimpv\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"impv\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"actprice\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"sbasis\", idx),\n xa_query.GetFieldData(\"t2101OutBlock\", \"ibasis\", idx),\n TODAY))\n return option_data\n\n elif TR is \"t8407\":\n \"\"\"\n Get today's stock data\n 0 -> the list of stock\n \"\"\"\n \"\"\" Remove kospi index code, t8407 doesn't accept index code \"\"\"\n del request_arg[0][0]\n updated_stock_data = []\n for idx in range(math.ceil(len(request_arg[0]) / 50)):\n requested_data = request_arg[0][50 * (idx):50 * (idx + 1)]\n request_50_stocks_data = ''.join(str(code) for code in list(zip(*requested_data))[0])\n xa_query.SetFieldData(\"t8407InBlock\", \"nrec\", 0, len(request_arg[0][50 * (idx):50 * (idx + 1)]))\n xa_query.SetFieldData(\"t8407InBlock\", \"shcode\", 0, request_50_stocks_data)\n self._request(xa_query)\n for idy in range(xa_query.GetBlockCount(\"t8407OutBlock1\")):\n updated_stock_data.append((xa_query.GetFieldData('t8407OutBlock1', 'hname', idy),\n xa_query.GetFieldData('t8407OutBlock1', 'price', idy),\n xa_query.GetFieldData('t8407OutBlock1', 'open', idy),\n xa_query.GetFieldData('t8407OutBlock1', 'value', idy),\n xa_query.GetFieldData('t8407OutBlock1', 'volume', idy),\n xa_query.GetFieldData('t8407OutBlock1', 'high', idy),\n xa_query.GetFieldData('t8407OutBlock1', 'low', idy),\n TODAY))\n return updated_stock_data\n\n elif TR is \"t8413\":\n \"\"\"\n Get time series stock data\n 0 -> stock code, 1 -> stock name\n \"\"\"\n xa_query.SetFieldData(\"t8413InBlock\", \"shcode\", 0, request_arg[0])\n xa_query.SetFieldData(\"t8413InBlock\", \"gubun\", 0, '2')\n xa_query.SetFieldData(\"t8413InBlock\", \"sdate\", 0, '20140101')\n xa_query.SetFieldData(\"t8413InBlock\", \"edate\", 0, TODAY)\n xa_query.SetFieldData(\"t8413InBlock\", \"comp_yn\", 0, 'N')\n self._request(xa_query)\n\n time_series_data = []\n for idx in range(xa_query.GetBlockCount(\"t8413OutBlock1\")):\n time_series_data.append((request_arg[1],\n xa_query.GetFieldData('t8413OutBlock1', 'close', idx),\n xa_query.GetFieldData('t8413OutBlock1', 'open', idx),\n xa_query.GetFieldData('t8413OutBlock1', 'value', idx),\n xa_query.GetFieldData('t8413OutBlock1', 'jdiff_vol', idx),\n xa_query.GetFieldData('t8413OutBlock1', 'high', idx),\n xa_query.GetFieldData('t8413OutBlock1', 'low', idx),\n xa_query.GetFieldData('t8413OutBlock1', 'date', idx)))\n return time_series_data\n\n elif TR is \"t8419\":\n \"\"\"\n Get time series index data\n 0 -> stock code, 1 -> stock name\n \"\"\"\n xa_query.SetFieldData(\"t8419InBlock\", \"shcode\", 0, request_arg[0])\n xa_query.SetFieldData(\"t8419InBlock\", \"gubun\", 0, '2')\n xa_query.SetFieldData(\"t8419InBlock\", \"sdate\", 0, '20140101')\n xa_query.SetFieldData(\"t8419InBlock\", \"edate\", 0, TODAY)\n xa_query.SetFieldData(\"t8419InBlock\", \"comp_yn\", 0, 'N')\n self._request(xa_query)\n\n time_series_data = []\n for idx in range(xa_query.GetBlockCount(\"t8419OutBlock1\")):\n time_series_data.append((request_arg[1],\n xa_query.GetFieldData('t8419OutBlock1', 'close', idx),\n xa_query.GetFieldData('t8419OutBlock1', 'open', idx),\n xa_query.GetFieldData('t8419OutBlock1', 'value', idx),\n xa_query.GetFieldData('t8419OutBlock1', 'jdiff_vol', idx),\n xa_query.GetFieldData('t8419OutBlock1', 'high', idx),\n xa_query.GetFieldData('t8419OutBlock1', 'low', idx),\n xa_query.GetFieldData('t8419OutBlock1', 'date', idx)))\n return time_series_data\n\n elif TR is \"t8430\":\n \"\"\" Return kospi stock codes, kosdaq stock codes \"\"\"\n xa_query.SetFieldData(\"t8430InBlock\", \"gubun\", 0, 1)\n self._request(xa_query)\n\n kospi_codes = [('001', '종합(KOSPI)')]\n for idx in range(xa_query.GetBlockCount(\"t8430OutBlock\")):\n kospi_codes.append((xa_query.GetFieldData('t8430OutBlock', 'shcode', idx),\n xa_query.GetFieldData('t8430OutBlock', 'hname', idx)))\n\n xa_query.SetFieldData(\"t8430InBlock\", \"gubun\", 0, 2)\n self._request(xa_query)\n\n kosdaq_codes = [('301', '코스닥(KOSDAQ)')]\n for idx in range(xa_query.GetBlockCount(\"t8430OutBlock\")):\n kosdaq_codes.append((xa_query.GetFieldData('t8430OutBlock', 'shcode', idx),\n xa_query.GetFieldData('t8430OutBlock', 'hname', idx)))\n return kospi_codes, kosdaq_codes\n\n elif TR is \"t9944\":\n \"\"\" Return option codes \"\"\"\n xa_query.SetFieldData(\"t9944InBlock\", \"dummy\", 0, \"dum\")\n self._request(xa_query)\n\n option_codes = []\n for idx in range(xa_query.GetBlockCount(\"t9944OutBlock\")):\n option_codes.append([xa_query.GetFieldData(\"t9944OutBlock\", \"hname\", idx),\n xa_query.GetFieldData(\"t9944OutBlock\", \"shcode\", idx)])\n return option_codes\n\n\napi_request = TRQuery()\nKOSPI_LIST, KOSDAQ_LIST = api_request.query(\"t8430\")","sub_path":"src/api/ebest_api.py","file_name":"ebest_api.py","file_ext":"py","file_size_in_byte":14063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"259997783","text":"import os\nimport itertools\nimport tensorflow as tf\nimport early_stopping\nfrom keras import backend as K\nfrom sklearn.metrics import f1_score\nfrom sklearn.utils import shuffle\nimport tools_with_GMP as tools\nimport plot_confusion_matrix_Copy1 as plot_confusion_matrix\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\nnp.set_printoptions(suppress=True)\n\ntraining_data, training_label, validation_data, validation_label, validation_cate_label = tools.get_data()\n\nfrom keras_radam.optimizer_v2 import RAdam\nks = 3\nnum_layer = 5\nbs = 30\nlr = 0.0001\nepochs = 100\n\ndef create_model(learning_rate, bs, ks, num_layer):\n num_filter = 32\n \n in_data = tf.keras.Input(shape=(None, 1), dtype=\"float64\")\n x = tf.keras.layers.Conv1D(filters = num_filter, kernel_size = ks, activation=\"relu\")(in_data)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.MaxPool1D(2)(x)\n\n for i in range(2,num_layer+1):\n try:\n if i==num_layer:\n x = tf.keras.layers.Conv1D(filters = num_filter, kernel_size = ks, activation=\"relu\")(x)\n break\n if i%2 != 0:\n num_filter = num_filter *2\n x = tf.keras.layers.Conv1D(filters = num_filter, kernel_size = ks, activation=\"relu\")(x)\n x = tf.keras.layers.MaxPool1D(2)(x)\n if i in [6, 8, 9]:\n x = tf.keras.layers.Dropout(0.5)(x)\n except ValueError:\n print(\"model overflow[lr, bs, ks, #layer]: \",[learning_rate, bs, ks, i+1])\n return False\n \n x = tf.keras.layers.GlobalMaxPool1D()(x)\n x = tf.keras.layers.Dense(32, activation = 'relu')(x)\n outputs = tf.keras.layers.Dense(4, activation = 'softmax')(x)\n \n model = tf.keras.Model(inputs=in_data, outputs=outputs)\n print(model.summary())\n return model\n\ndef run(bs, lr, ks, num_layer):\n scores = []\n fold=1\n for index, (X_train, Y_train, X_val, Y_val, val_cat) in enumerate(zip(training_data,\n training_label,\n validation_data,\n validation_label,\n validation_cate_label)):\n\n X_val, Y_val, val_cat = shuffle(X_val, Y_val, val_cat, random_state=50)\n model = create_model(lr, bs, ks, num_layer)\n optimizer = tf.keras.optimizers.Adam(lr = lr)\n ES = early_stopping.EarlyStopping(patience=6)\n losses = []\n \n for epoch in range(1, epochs+1, 1):\n print(\"Epoch: {}\".format(epoch))\n epoch_losses = []\n \n prog_bar = tf.keras.utils.Progbar(X_train.shape[0])\n prog_bar_val = tf.keras.utils.Progbar(X_val.shape[0])\n train_acc_metric = tf.keras.metrics.Accuracy()\n val_acc_metric = tf.keras.metrics.Accuracy()\n \n X_train, Y_train = shuffle(X_train, Y_train, random_state=0)\n \n for ind, (input_data, input_label) in enumerate(zip(X_train, Y_train)):\n \n with tf.GradientTape() as tape:\n input_data = input_data.reshape((1, input_data.shape[0], 1))\n input_label = input_label.reshape((1, input_label.shape[0]))\n logits = model(input_data)\n loss_value = tf.keras.losses.categorical_crossentropy(input_label, logits)\n epoch_losses.append(float(loss_value))\n \n train_acc_metric.update_state(np.argmax(input_label), np.argmax(logits))\n prog_bar.add(1, values=[(\"train loss\", float(loss_value)), (\"train accuracy\", float(train_acc_metric.result()))])\n \n # update weights using mini-batch mechanism\n if (ind+1)%bs == 0:\n grads = tape.gradient(loss_value, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n\n avg_epoch_loss = sum(epoch_losses) / (1.0 * len(epoch_losses))\n \n print(\"{}: {}\".format(epoch, avg_epoch_loss))\n losses.append(avg_epoch_loss)\n \n for ind, (input_data, input_label) in enumerate(zip(X_val, Y_val)):\n input_data = input_data.reshape((1, input_data.shape[0], 1))\n input_label = input_label.reshape((1, input_label.shape[0]))\n val_logits = model(input_data)\n val_loss_val = tf.keras.losses.categorical_crossentropy(input_label, val_logits)\n val_acc_metric.update_state(np.argmax(input_label), np.argmax(logits))\n prog_bar_val.add(1, values=[(\"val loss\", float(val_loss_val)), (\"val accuracy\", float(val_acc_metric.result()))])\n \n print(\"\\n\")\n \n ES(float(val_loss_val), model, fold)\n if ES.early_stop:\n print(\"Early Stopping!\")\n break\n \n val_pred_cat = []\n for ind, (input_data, input_label) in enumerate(zip(X_val, Y_val)):\n input_data = input_data.reshape((1, input_data.shape[0], 1))\n input_label = input_label.reshape((1, input_label.shape[0]))\n val_pred = model.predict(input_data)\n val_pred_cat.append(np.argmax(val_pred))\n \n score = f1_score(val_cat, val_pred_cat, average=None)\n scores.append(score)\n print(score)\n \n fold = fold + 1\n \n# cnf_matrix = confusion_matrix(val_cat, val_pred_cat)\n# plot_confusion_matrix.plot_confusion_matrix(cnf_matrix, classes=['AF','Noise','Normal','Other'], save_png=True)\n \n print(\"\\n\")\n return scores\n\nscores = run(bs, lr, ks, num_layer)","sub_path":"experimental/structure_with_GMP.py","file_name":"structure_with_GMP.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"123194060","text":"# -*- coding: utf-8 -*-\n# Python 3.6.1\n\nimport community\n# http://perso.crans.org/aynaud/communities/api.html\nimport networkx as nx\n# https://networkx.readthedocs.io/en/networkx-1.11/install.html\nimport matplotlib.pyplot as plt\n\nfrom limiao.uic.common.file_util import get_file_list\nfrom limiao.uic.common.time_util import get_time\nfrom limiao.uic.common.dir_util import root_dir\n\n#better with karate_graph() as defined in networkx example.\n#erdos renyi don't have true community structure\n\n\ndef load_adjacent_matrix(file_map, similarity_min, ineterval_max, dis_od_max):\n \"\"\"\n load the adjacency matrix\n :param file_map: 构建的图文件\n :param similarity_min: 最小相似度\n :return:\n \"\"\"\n print(get_time())\n print(\"load graph\")\n edge_list = []\n for line in open(file_map, \"r\", encoding=\"UTF-8\"):\n edge = line.strip(\"\\n\").split(\"\\t\")\n if float(edge[2]) >= similarity_min and int(edge[3]) <= ineterval_max and float(edge[4]) <= dis_od_max and float(edge[5]) <= dis_od_max:\n item = (int(edge[0]), int(edge[1]), float(edge[2]))\n edge_list.append(item)\n print(len(edge_list))\n print(get_time())\n return edge_list\n\n\ndef lou(file_odt, edge_list, coarse_cluster_index):\n \"\"\"\n Louvain\n :param file_odt: 被聚类的ODT文件, 必须与edge_list对应的ODT相同\n :param edge_list: 图的边集\n :param coarse_cluster_index: 对应的coarse_cluster的编号\n :return:\n \"\"\"\n G = nx.Graph()\n G.add_weighted_edges_from(edge_list)\n\n print(get_time())\n #first compute the best partition\n partition = community.best_partition(G)\n dendrogram = community.generate_dendrogram(G)\n print(\"cluster level count: \"+str(len(dendrogram)))\n print(\"edge count: \"+str(len(G.edges())))\n # partition = community.partition_at_level(dendrogram, 0)\n\n print(\"all cluster count: \"+str(len(set(partition.values()))))\n modularity = community.modularity(partition, G)\n print(\"modularity: \"+str(modularity))\n # print str(len(set(partition.values())))\n print(get_time())\n\n # 必须与generate_similarity_graph中相同\n trip_list = []\n for line in open(file_odt, \"r\", encoding=\"UTF-8\"):\n trip_list.append(line)\n count_clusters = 0\n for com in set(partition.values()):\n cluster_trips = []\n cluster_trips_show = []\n cluster_dic = {}\n for node in partition.keys():\n if partition[node] == com:\n cluster_trips.append(str(node) + \"\\t\" + trip_list[node])\n shows = trip_list[node].split(\"\\t\")\n cluster_trips_show.append(shows[4] + \"\\t\" + shows[5] + \"\\t\" + shows[8] + \"\\t\" + shows[9] + \"\\n\")\n cluster_dic[shows[3] + \"\\t\" + shows[7]] = cluster_dic.get(shows[3] + \"\\t\" + shows[7], 0) + 1\n if len(cluster_trips) >= 20 and len(cluster_dic.keys()) >= 3:\n file = open(root_dir + \"\\\\uic_all\\\\fine_clusters\\\\\"\n + str(coarse_cluster_index) + \"_\" + str(count_clusters)\n + \".txt\", \"w\", encoding=\"UTF-8\")\n file.writelines(cluster_trips)\n file.close()\n\n file_show_o = open(root_dir + \"\\\\uic_all\\plot\\o\\\\\"\n + str(coarse_cluster_index) + \"_\" + str(count_clusters)\n + \".txt\", \"w\", encoding=\"UTF-8\")\n file_show_o.writelines(cluster_trips_show)\n file_show_o.close()\n\n file_show_d = open(root_dir + \"\\\\uic_all\\plot\\d\\\\\"\n + str(coarse_cluster_index) + \"_\" + str(count_clusters)\n + \".txt\", \"w\", encoding=\"UTF-8\")\n file_show_d.writelines(cluster_trips_show)\n file_show_d.close()\n\n count_clusters += 1\n print(\"cluster count: \"+str(count_clusters))\n\n\n # #drawing\n # size = float(len(set(partition.values())))\n # pos = nx.spring_layout(G)\n # count = 0.\n # for com in set(partition.values()):\n # count = count + 1\n # list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com]\n # nx.draw_networkx_nodes(G, pos, list_nodes, node_size=20, node_color=str(count / size))\n # nx.draw_networkx_edges(G, pos, alpha=0.5)\n # plt.show()\n\n\nif __name__ == \"__main__\":\n f_list = get_file_list(root_dir + \"\\\\uic_all\\coarse_clusters\", [])\n for fi in f_list:\n if \"_12\" in fi:\n coarse_index = int(fi.split(\"\\\\\")[-1].split(\"_\")[0])\n if coarse_index == 16:\n print(fi)\n file_graph = root_dir + \"\\\\uic_all\\similarity_graph\\\\\" + str(coarse_index) + \".txt\"\n lou(fi, load_adjacent_matrix(file_graph, 0, 30, 2.5), coarse_index)\n","sub_path":"limiao/uic/odtc_all/louvain.py","file_name":"louvain.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"217013568","text":"\"\"\"Convert GeoJSON to CSV with WK.\"\"\"\n\nimport json\nimport geopandas as gpd\nimport click\n\n\n@click.command()\n@click.argument(\"infile\")\n@click.argument(\"outfile\")\ndef main(infile, outfile):\n with open(infile) as f:\n gpd.GeoDataFrame(json.load(f)).to_csv(outfile)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"data/json_to_csv.py","file_name":"json_to_csv.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"562538675","text":"#!/usr/bin/env python3\n\nimport argparse\nimport difflib\n\n\ndef process_hunk(diff, hunk, raw_text1, raw_text2):\n result = hunk\n hunk = hunk.split()[1:3]\n before_idx = before_start = abs(int(hunk[0].split(',')[0])) - 1\n before_count = abs(int(hunk[0].split(',')[1]))\n after_idx = after_start = abs(int(hunk[1].split(',')[0])) - 1\n after_count = abs(int(hunk[1].split(',')[1]))\n while (before_idx < (before_start + before_count)) or \\\n (after_idx < (after_start + after_count)):\n line = diff.__next__().rstrip()\n if line[0] == '-':\n result = '\\n'.join([result, '-' + raw_text1[before_idx]])\n before_idx += 1\n elif line[0] == '+':\n result = '\\n'.join([result, '+' + raw_text2[before_idx]])\n after_idx += 1\n else:\n try:\n result = '\\n'.join([result, ' ' + raw_text1[before_idx]])\n except IndexError:\n result = '\\n'.join([result, ' ' + raw_text2[before_idx]])\n before_idx += 1\n after_idx += 1\n\n return result\n\n\ndef process_diff(raw_text1, raw_text2, args):\n text1 = [args.delimiter.join(l.split()[args.field:]) for l in raw_text1]\n text2 = [args.delimiter.join(l.split()[args.field:]) for l in raw_text2]\n diff = difflib.unified_diff(text1, text2, args.file1, args.file2)\n\n # The first 2 lines are just the 2 input files. Just print them and\n # continue.\n for _ in range(2):\n print(diff.__next__().rstrip())\n\n while 1:\n try:\n hunk = diff.__next__().rstrip()\n output = process_hunk(diff, hunk, raw_text1, raw_text2)\n print(output)\n except StopIteration:\n break\n\n\ndef dump_diff(raw_text1, raw_text2, args):\n text1 = [args.delimiter.join(l.split()[args.field:]) for l in raw_text1]\n text2 = [args.delimiter.join(l.split()[args.field:]) for l in raw_text2]\n diff = difflib.unified_diff(text1, text2, args.file1, args.file2)\n print('*' * 40)\n for _ in range(3):\n print(diff.__next__().rstrip())\n idx = 0\n for i in diff:\n print('{}'.format(i.rstrip()))\n idx += 1\n print('*' * 40)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Diff input files while ignoring initial fields.')\n parser.add_argument('file1', type=str)\n parser.add_argument('file2', type=str)\n parser.add_argument('-d', '--delimiter', type=str, default=' ',\n help='Use DELIMITER as the field delimiter character instead of space')\n parser.add_argument('-f', '--field', type=int, default=0,\n help='Specify the number of fields to skip, separated in the input by the delimiter character.')\n parser.add_argument('--debug', action='store_true',\n help=argparse.SUPPRESS)\n return parser.parse_args()\n\n\ndef main(args):\n with open(args.file1, 'r') as f:\n raw_text1 = [l.rstrip() for l in f.readlines()]\n\n with open(args.file2, 'r') as f:\n raw_text2 = [l.rstrip() for l in f.readlines()]\n\n if args.debug:\n dump_diff(raw_text1, raw_text2, args)\n\n process_diff(raw_text1, raw_text2, args)\n\n\nif __name__ == '__main__':\n main(parse_args())\n","sub_path":"logdiff.py","file_name":"logdiff.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"78107084","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom imgaug import augmenters as iaa\nimport os\n\ndef affine_transform_x(b_x,dx):\n try:\n sz1, sz2 = b_x.shape\n except:\n sz1=1; sz2=784\n images = b_x.reshape([sz1, 28, 28, 1])\n seq = iaa.Sequential([iaa.Affine( translate_percent={\"x\": dx/56})])\n images_aug = seq.augment_images(images)\n b_x = np.reshape(images_aug, [sz1, sz2])\n return b_x\ndef affine_transform_s(b_x,s):\n sz1, sz2 = b_x.shape\n images = b_x.reshape([sz1, 28, 28, 1])\n seq = iaa.Sequential(\n [iaa.Affine( scale={\"x\": s, \"y\": s})])\n images_aug = seq.augment_images(images)\n b_x = np.reshape(images_aug, [sz1, sz2])\n return b_x\n\nXX = 0.2\nSX1 = .5\nSX2 = 0.7\nRX = 0.2\ndef affine_transform1(b_x):\n xx = np.random.uniform(low=-XX, high=XX, size=1) # (-0.5,0.5)\n yy = np.random.uniform(low=-XX, high=XX, size=1)\n sx = np.random.uniform(low=SX1, high=SX2, size=1) # (.5,1.2)\n sy = np.random.uniform(low=SX1, high=SX2, size=1)\n rr = np.random.uniform(low=-RX, high=RX, size=1) # (-0.5,0.5)\n sz1, sz2 = b_x.shape\n images = b_x.reshape([sz1, 28, 28, 1])\n seq = iaa.Sequential(\n [iaa.Affine(translate_percent={\"x\": xx, \"y\": yy}, scale={\"x\": sx, \"y\": sy}, rotate=rr * 180, )])\n images_aug = seq.augment_images(images)\n b_x = np.reshape(images_aug, [sz1, sz2])\n return b_x, np.concatenate((xx, yy,sx,sy, rr), axis=0)\n\ndef affine_transform_r(b_x,rr):\n sz1, sz2 = b_x.shape\n images = b_x.reshape([sz1, 28, 28, 1])\n seq = iaa.Sequential(\n [iaa.Affine(rotate=rr * 180, )])\n images_aug = seq.augment_images(images)\n b_x = np.reshape(images_aug, [sz1, sz2])\n return b_x\n\n\ndef affine_transform60(b_x):\n sz1, sz2 = b_x.shape\n images = b_x.reshape([sz1, 28, 28, 1])\n seq = iaa.Sequential(\n [iaa.Affine(scale={\"x\": 0.6, \"y\": 0.6} )])\n images_aug = seq.augment_images(images)\n b_x = np.reshape(images_aug, [sz1, sz2])\n return b_x\n\n\ndef char_to_bi(x):\n l=' 0123456789abcdefghijklmnopqrstuvwxyz.+-*~!@$%^&()'\n for i5 in range(2):\n for i4 in range(2):\n for i3 in range(2):\n for i2 in range(2):\n for i1 in range(2):\n for i0 in range(2):\n ind=i0+i1*2+i2*4+i3*8+i4*16+i5*32\n if l[ind]==x:\n return np.transpose([i5,i4,i3,i2,i1,i0])\ndef aud_code_to_txt(code):\n l = ' 0123456789abcdefghijklmnopqrstuvwxyz.+-*~!@$%^&()'\n ind = np.int(code[5] + code[4]* 2 + code[3] * 4 + code[2] * 8 + code[1] * 16 + code[0] * 32)\n if ind<0 or ind>50:\n ind=0\n return l[ind]\ndef sentence_to_vec(X):\n sz=len(X)\n y=np.zeros([sz,6])\n for ii in range(sz):\n y[ii,:]=char_to_bi(X[ii])\n return y\nFull_len=30\nAud_sz=6\ndef Gen_lng_move():\n d1 = np.random.randint(28)\n d3=d1\n LR=0\n #train move left/right 28\n if np.random.random([1]) > 0.5:\n d2 = 'left '\n d3 = d3 * -1\n LR=-1\n else:\n d2 = 'right '\n LR=1\n str_d1 = str(d1)\n cmd = 'move ' + d2 + str_d1\n cmd_sz1 = len(cmd)\n for ii in range(Full_len-cmd_sz1):\n cmd=cmd+' '\n return cmd,d3,LR,cmd_sz1\n\ndef Gen_lng_rott():\n a=[-30,-15,15,30]\n d1 = np.random.randint(4)\n d2 = a[d1]\n str_d = str(d2)\n cmd = 'rotate ' + str_d\n cmd_sz1 = len(cmd)\n for ii in range(Full_len-cmd_sz1):\n cmd=cmd+' '\n return cmd,d2/180,cmd_sz1\n\ndef Gen_lng_scale():\n #train move left/right 28\n if np.random.random([1]) > 0.5:\n d2 = 'shrink'\n d3 = 0.75\n\n else:\n d2 = 'enlarge'\n d3=1.5\n cmd = d2\n cmd_sz1 = len(cmd)\n for ii in range(Full_len-cmd_sz1):\n cmd=cmd+' '\n return cmd,d3,cmd_sz1\n\n\ndef Gen_lng_thisis(num):\n d3=num\n cmd='this is '+str(num)\n cmd_sz1 = len(cmd)\n for ii in range(Full_len - cmd_sz1):\n cmd = cmd + ' '\n return cmd,d3,cmd_sz1\n\ndef Gen_lng_size():\n sz=np.random.random([1])\n if sz > 0.5:\n a0=1.2;a1=1.5\n scale=np.random.random(1)*(a1-a0)+a0\n d2 = 'big '\n else:\n a0 = 0.7;a1 = 0.85\n scale = np.random.random(1) * (a1 - a0) + a0\n d2 = 'small '\n cmd='the size is '+d2\n cmd_sz1 = len(cmd)\n for ii in range(Full_len - cmd_sz1):\n cmd = cmd + ' '\n return cmd,scale,cmd_sz1\n\ndef Gen_lng_size_not():\n sz=np.random.random([1])\n if sz > 0.5:\n a0=1.2;a1=1.5\n scale=np.random.random(1)*(a1-a0)+a0\n d2 = 'small '\n else:\n a0 = 0.7;a1 = 0.85\n scale = np.random.random(1) * (a1 - a0) + a0\n d2 = 'big '\n cmd='the size is not '+d2\n cmd_sz1 = len(cmd)\n for ii in range(Full_len - cmd_sz1):\n cmd = cmd + ' '\n return cmd,scale,cmd_sz1\ndef Gen_lng_giveme(num):\n d3=num\n cmd='give me a '+str(num)\n cmd_sz1 = len(cmd)\n for ii in range(Full_len - cmd_sz1):\n cmd = cmd + ' '\n return cmd,d3,cmd_sz1\n\n\nTIME_STEP=30;\nBATCH_SIZE=128;\ntot_episodes=100000\n\n\n\nwe0=np.load('/Users/fengqi/Pycharm_py36/QF/we0.npy')\nwe1=np.load('/Users/fengqi/Pycharm_py36/QF/we1.npy')\nwe2=np.load('/Users/fengqi/Pycharm_py36/QF/we2.npy')\nwe3=np.load('/Users/fengqi/Pycharm_py36/QF/we3.npy')\nwem=np.load('/Users/fengqi/Pycharm_py36/QF/wem.npy')\nwd0=np.load('/Users/fengqi/Pycharm_py36/QF/wd0.npy')\nwd1=np.load('/Users/fengqi/Pycharm_py36/QF/wd1.npy')\nwd2=np.load('/Users/fengqi/Pycharm_py36/QF/wd2.npy')\nwd3=np.load('/Users/fengqi/Pycharm_py36/QF/wd3.npy')\nwdd=np.load('/Users/fengqi/Pycharm_py36/QF/wdd.npy')\n\nbe0=np.load('/Users/fengqi/Pycharm_py36/QF/be0.npy')\nbe1=np.load('/Users/fengqi/Pycharm_py36/QF/be1.npy')\nbe2=np.load('/Users/fengqi/Pycharm_py36/QF/be2.npy')\nbe3=np.load('/Users/fengqi/Pycharm_py36/QF/be3.npy')\nbem=np.load('/Users/fengqi/Pycharm_py36/QF/bem.npy')\nbd0=np.load('/Users/fengqi/Pycharm_py36/QF/bd0.npy')\nbd1=np.load('/Users/fengqi/Pycharm_py36/QF/bd1.npy')\nbd2=np.load('/Users/fengqi/Pycharm_py36/QF/bd2.npy')\nbd3=np.load('/Users/fengqi/Pycharm_py36/QF/bd3.npy')\nbdd=np.load('/Users/fengqi/Pycharm_py36/QF/bdd.npy')\nIPS_lstm_kernel=np.load('/Users/fengqi/Pycharm_py36/QF/IPS_lstm_kernel.npy')\nIPS_lstm_bias=np.load('/Users/fengqi/Pycharm_py36/QF/IPS_lstm_bias.npy')\nIPS_dense_kernel=np.load('/Users/fengqi/Pycharm_py36/QF/IPS_dense_kernel.npy')\nIPS_dense_bias=np.load('/Users/fengqi/Pycharm_py36/QF/IPS_dense_bias.npy')\n\n\nscale1 = 4\nn_l0 = 64 * scale1;\nn_l1 = 32 * scale1;\nn_l2 = 16 * scale1;\nn_l3 = 8 * scale1;\nn_encoded = 32 # 4*scale1#pow(4,ii)\nn_d0 = 8 * scale1;\nn_d1 = 16 * scale1;\nn_d2 = 32 * scale1;\nn_d3 = 64 * scale1;\nn_decoded = 784\n\n# tf placeholder\ntf_x = tf.placeholder(tf.float32, [None, 28 * 28]) # value in the range of (0, 1)\nph_encoded = tf.placeholder(tf.float32, [None, n_encoded])\nph_switch = tf.placeholder(tf.float32, [1])\nph_lr = tf.placeholder(tf.float32, [])\nph_dis_e = tf.placeholder(tf.float32, [None, n_encoded])\n# encoder\n\n\nen0 = tf.layers.dense(tf_x, n_l0, tf.nn.sigmoid, kernel_initializer=tf.constant_initializer(we0),bias_initializer=tf.constant_initializer(be0),trainable=False)\nen1 = tf.layers.dense(en0, n_l1, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(we1),bias_initializer=tf.constant_initializer(be1),trainable=False)\nen2 = tf.layers.dense(en1, n_l2, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(we2),bias_initializer=tf.constant_initializer(be2),trainable=False)\nen3 = tf.layers.dense(en2, n_l3, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(we3),bias_initializer=tf.constant_initializer(be3),trainable=False)\nff1 = tf.layers.dense(en3, n_encoded, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(wem),bias_initializer=tf.constant_initializer(bem),trainable=False)\n\ntf.set_random_seed(1)\n\n\n# IPS speech->number\n\nIPS_INPUT_SIZE=6;IPS_CELL_SIZE=10;IPS_OUT_SIZE=1;\n\nIPS_x = tf.placeholder(tf.float32, [None, TIME_STEP,IPS_INPUT_SIZE])\n\nIPS_cell = tf.contrib.rnn.BasicLSTMCell(num_units=IPS_CELL_SIZE)\ninit_s = IPS_cell.zero_state(batch_size=BATCH_SIZE, dtype=tf.float32) # very first hidden state\nIPS_outputs, IPS_final_s = tf.nn.dynamic_rnn(\n IPS_cell, # cell you have chosen\n IPS_x, # input\n initial_state=init_s, # the initial hidden state\n time_major=False, # False: (batch, time step, input); True: (time step, batch, input)\n )\nIPS_outs2D = tf.reshape(IPS_outputs, [-1, IPS_CELL_SIZE]) # reshape 3D output to 2D for fully connected layer\nIPS_net_outs2D = tf.layers.dense(IPS_outs2D, IPS_OUT_SIZE,kernel_initializer=tf.constant_initializer(IPS_dense_kernel),\n bias_initializer=tf.constant_initializer(IPS_dense_bias),trainable=False)\nIPS_outs = tf.reshape(IPS_net_outs2D, [-1, TIME_STEP, IPS_OUT_SIZE]) # reshape back to 3D\n\n\n\n# dlPFC\ndlPFC_INPUT_SIZE = 71;\ndlPFC_CELL_SIZE = 128;\nPFC_LearningRate = 0.001;\ndlPFC_OUT_SIZE = 32 + 6;\n\ndlPFC_x = tf.placeholder(tf.float32, [None, TIME_STEP - 1, dlPFC_INPUT_SIZE])\ndlPFC_y = tf.placeholder(tf.float32, [None, TIME_STEP - 1, dlPFC_OUT_SIZE])\n\nwith tf.variable_scope('dlPFC'):\n dlPFC_cell = tf.contrib.rnn.BasicLSTMCell(num_units=dlPFC_CELL_SIZE)\n dlPFC_init_s = dlPFC_cell.zero_state(batch_size=BATCH_SIZE, dtype=tf.float32) # very first hidden state\n dlPFC_outputs, dlPFC_final_s = tf.nn.dynamic_rnn(dlPFC_cell, dlPFC_x, initial_state=dlPFC_init_s, # the initial hidden state\n time_major=False, )# False: (batch, time step, input); True: (time step, batch, input)\n dlPFC_outs2D = tf.reshape(dlPFC_outputs, [-1, dlPFC_CELL_SIZE]) # reshape 3D output to 2D for fully connected layer\n dlPFC_net_outs2D = tf.layers.dense(dlPFC_outs2D, dlPFC_OUT_SIZE)\n dlPFC_outs = tf.reshape(dlPFC_net_outs2D, [-1, TIME_STEP-1, dlPFC_OUT_SIZE]) # reshape back to 3D\n\n dlPFC_loss = tf.losses.mean_squared_error(labels=dlPFC_y, predictions=dlPFC_outs)\n #tf.losses.mean_squared_error(labels=dlPFC_y[:,25,:], predictions=dlPFC_outs[:,25,:]) # tf.losses.mean_squared_error(labels=dlPFC_y, predictions=dlPFC_outs)+\n dlPFC_train = tf.train.AdamOptimizer(learning_rate=PFC_LearningRate).minimize(dlPFC_loss)\n\n\n# Imagination network\ndcd_in=tf.placeholder(tf.float32,[None,32])\n# decoder\nff2 = tf.layers.dense(dcd_in, n_encoded, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(wem),bias_initializer=tf.constant_initializer(bem),trainable=False)\nde0 = tf.layers.dense(ff2, n_d0, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(wd0),bias_initializer=tf.constant_initializer(bd0),trainable=False)\nde1 = tf.layers.dense(de0, n_d1, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(wd1),bias_initializer=tf.constant_initializer(bd1),trainable=False)\nde2 = tf.layers.dense(de1, n_d2, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(wd2),bias_initializer=tf.constant_initializer(bd2),trainable=False)\nde3 = tf.layers.dense(de2, n_d3, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(wd3),bias_initializer=tf.constant_initializer(bd3),trainable=False)\ndecoded = tf.layers.dense(de3, n_decoded, tf.nn.sigmoid, tf.nn.sigmoid,kernel_initializer=tf.constant_initializer(wdd),bias_initializer=tf.constant_initializer(bdd),trainable=False)\n\n\n\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nweights_lstm_w = tf.get_default_graph().get_tensor_by_name('rnn/basic_lstm_cell/kernel:0')\nweights_lstm_b = tf.get_default_graph().get_tensor_by_name('rnn/basic_lstm_cell/bias:0')\nsess.run(tf.assign(weights_lstm_w, IPS_lstm_kernel))\nsess.run(tf.assign(weights_lstm_b, IPS_lstm_bias))\n# print(sess.run(weights_lstm_w))\n# print(sess.run(weights_lstm_b))\nchk_num=2000\nsaver = tf.train.Saver()\npath = '/Users/fengqi/Pycharm_py36/QF/ThisMoveSizeNotGiveScale' + str(chk_num) + '/'\nfile_='CKPT001'\nsaver.restore(sess, path+file_)\n\nspc_bi=char_to_bi(' ')\n# f,a=plt.subplots(2,TIME_STEP-1)\nb_x_1=np.zeros([BATCH_SIZE,784])\nb_x_2=np.zeros([BATCH_SIZE,784])\ncmd_bi_=np.zeros([BATCH_SIZE,TIME_STEP,6])\ndel_x=np.zeros([BATCH_SIZE])\nen3_1=np.zeros([BATCH_SIZE,n_l3])\nff1_1 = np.zeros([BATCH_SIZE,n_encoded])\nen3_2=np.zeros([BATCH_SIZE,n_l3])\nff1_2=np.zeros([BATCH_SIZE,n_encoded])\ncmd_sz=np.zeros([BATCH_SIZE])\n\ncost_his=np.zeros([tot_episodes])\nph_encoded_ = np.zeros(shape=[BATCH_SIZE, n_encoded])\nph_switch_ = np.ones(shape=[1])\nph_dis_e_ = np.ones(shape=[BATCH_SIZE, n_encoded])\nph_encoded_1 = np.zeros(shape=[1, n_encoded])\nph_switch_1 = np.ones(shape=[1])\nph_dis_e_1 = np.ones(shape=[1, n_encoded])\nimg_orig=np.ones([BATCH_SIZE,TIME_STEP-1,784])\nimg_pred=np.ones([BATCH_SIZE,TIME_STEP-1,784])\naud_=np.zeros([BATCH_SIZE,TIME_STEP,IPS_INPUT_SIZE])\nen3_=np.zeros([BATCH_SIZE,TIME_STEP,n_l3])\nff1_=np.zeros([BATCH_SIZE,TIME_STEP,n_encoded])\nwhole_=np.zeros([BATCH_SIZE,TIME_STEP,n_encoded+n_l3+1+IPS_INPUT_SIZE])\nLearningRate = 0.0001;ph_lr_ = np.ones(shape=[]) * LearningRate\nmnist = input_data.read_data_sets('./mnist', one_hot=False)\nLR=np.zeros([BATCH_SIZE])\nfor ep in range (tot_episodes):#episode\n # prep inp [en3_32,ATL32,aud6,num1] total 71\n rand_0 = np.random.randint(6)\n rand_1=np.random.random(1)\n if rand_1>0.5:\n rand_0=5\n\n\n b_x, b_y = mnist.train.next_batch(BATCH_SIZE)\n # Prepare SOUND\n for ii in range(BATCH_SIZE):\n if rand_0 == 0:\n cmd, del_x[ii], LR[ii], cmd_sz[ii] = Gen_lng_move()\n elif rand_0 == 1:\n cmd, del_x[ii], cmd_sz[ii] = Gen_lng_thisis(b_y[ii])\n elif rand_0 == 2:\n cmd, del_x[ii], cmd_sz[ii] = Gen_lng_size()\n elif rand_0==3:\n cmd, del_x[ii], cmd_sz[ii] = Gen_lng_size_not()\n elif rand_0==4:\n cmd, del_x[ii], cmd_sz[ii] = Gen_lng_giveme(b_y[ii])\n elif rand_0==5:\n cmd, del_x[ii], cmd_sz[ii] = Gen_lng_scale()\n\n\n aud_[ii, :, :] = sentence_to_vec(cmd)\n\n # Prep Image\n for ii in range(BATCH_SIZE):\n b_x_1[ii], para = affine_transform1(np.reshape(b_x[ii], [1, 784]))\n if rand_0 == 0:\n b_x_2[ii] = affine_transform_x(b_x_1[ii], del_x[ii])\n elif rand_0 == 1:\n b_x_2[ii] = b_x_1[ii]\n elif rand_0 == 2:\n b_x_1[ii] = affine_transform_s(np.reshape(b_x_1[ii], [1, 784]), del_x[ii])\n b_x_2[ii] = b_x_1[ii]\n elif rand_0==3:\n b_x_1[ii] = affine_transform_s(np.reshape(b_x_1[ii], [1, 784]), del_x[ii])\n b_x_2[ii] = b_x_1[ii]\n elif rand_0==4:\n b_x_1[ii] = affine_transform60(np.reshape(b_x[ii], [1, 784]))\n b_x_2[ii]=b_x_1[ii]\n b_x_1[ii]=b_x_1[ii] *0\n elif rand_0==5:\n b_x_2[ii]=affine_transform_s(np.reshape(b_x_1[ii], [1, 784]), del_x[ii])\n\n\n\n en3_1[ii], ff1_1[ii] = sess.run([en3, ff1], {tf_x: np.reshape(b_x_1[ii], [1, 784]), ph_encoded: ph_encoded_1,\n ph_switch: ph_switch_1, ph_dis_e: ph_dis_e_1})\n en3_2[ii], ff1_2[ii] = sess.run([en3, ff1], {tf_x: np.reshape(b_x_2[ii], [1, 784]), ph_encoded: ph_encoded_1,\n ph_switch: ph_switch_1, ph_dis_e: ph_dis_e_1})\n\n ips_out_ = sess.run(IPS_outs, {IPS_x: aud_}) # ips_out_ (128, 30, 1)\n for jj in range(BATCH_SIZE):\n for ii in range(TIME_STEP-1):\n img_orig[jj, ii] = b_x_1[jj]\n if rand_0 == 0 or rand_0==4 or rand_0==5:\n if ii <= cmd_sz[jj]-1: # cmd='move right 5', cmd_sz=12, cmd[11]=5\n whole_[jj, ii, 6:38] = en3_1[jj]\n whole_[jj, ii, 38:70] = ff1_1[jj]\n else:\n whole_[jj, ii, 6:38] = en3_2[jj]\n whole_[jj, ii, 38:70] = ff1_2[jj]\n img_orig[jj, ii] = b_x_2[jj]\n else:\n whole_[jj, ii, 6:38] = en3_1[jj]\n whole_[jj, ii, 38:70] = ff1_1[jj]\n\n whole_[:, :, 0:6] = aud_\n whole_[:, :, 70:71] = ips_out_\n\n dlPFC_outs_ = sess.run(dlPFC_outs, {dlPFC_x: whole_[:, 0:29, :], })\n f, a = plt.subplots(2, 29)\n for ii in range(29):\n img_pred[BATCH_SIZE - 1, ii] = sess.run(decoded,{dcd_in: np.reshape(dlPFC_outs_[BATCH_SIZE - 1, ii, 6:38], [1, 32])})\n aud_pred = aud_code_to_txt(np.round(dlPFC_outs_[BATCH_SIZE - 1, ii, 0:6]))\n a[0][ii].imshow(np.reshape(img_orig[BATCH_SIZE - 1, ii], [28, 28]))\n a[1][ii].imshow(np.reshape(img_pred[BATCH_SIZE - 1, ii], [28, 28]))\n a[0][ii].set_title(aud_code_to_txt(aud_[BATCH_SIZE - 1, ii]))\n a[1][ii].set_title(aud_pred)\n a[0][ii].set_xticks(());\n a[0][ii].set_yticks(());\n a[1][ii].set_xticks(());\n a[1][ii].set_yticks(());\n rand_1 = np.random.random(1)\n file_nm = '/Users/fengqi/Pycharm_py36/QF/ThisMovieSizeNotGiveScale' + str(chk_num) + str(rand_1) + '.jpeg'\n fig = plt.gcf()\n fig.set_size_inches(18.5, 10.5)\n fig.savefig(file_nm, dpi=100)\n plt.close('all')\n\n\n\n\n\n","sub_path":"314_2test_ThisIs_Move_Size_Not_Giveme_Scale.py","file_name":"314_2test_ThisIs_Move_Size_Not_Giveme_Scale.py","file_ext":"py","file_size_in_byte":17185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"493650912","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nfrom stream import views\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', views.index, name='index'),\n url(r'^(?P\\d+)/$', views.DetailView.as_view(), name='detail'),\n url(r'^(?P\\d+)/results/$', views.ResultsView.as_view(), name='results'),\n url(r'^(?P\\d+)/vote/$', views.vote, name='vote'),\n url(r'poll/$', views.poll, name='poll'),\n)","sub_path":"stream/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"293271022","text":"# -*- coding:utf-8 -*-\nfrom django.conf.urls import url\nfrom django.views.static import serve\n\nfrom .config import APP_NAME\nfrom .views import activity\nfrom .views import auth\nfrom .views import carousel_figure\nfrom .views import common\nfrom .views import community_assess\nfrom .views import coupon\nfrom .views import coupon_term\nfrom .views import cus_region\nfrom .views import group\nfrom .views import manager\nfrom .views import manager_region\nfrom .views import micro_credit_contract\nfrom .views import param\nfrom .views import pre_credit_line\nfrom .views import reconciliation\nfrom .views import report_coupon\nfrom .views import report_cus_trade_detail\nfrom .views import report_reservation\nfrom .views import report_shop_trade_detail\nfrom .views import report_shop_trade_total\nfrom .views import shop\nfrom .views import signin_rule\nfrom .views import withdraw_application\nfrom .views import reservation\nfrom .views import branch\nfrom .views import lottery_award\nfrom .views import writeoff\nfrom .views import lottery_rule\n\napp_name = APP_NAME\n\nurlpatterns = [\n url(r'^media/(?P.*)$', serve, {'document_root': '/home/nanxun/weixin_mana/mana_app/media/'}),\n url(r'^staticfile/(?P[a-z_\\d]+)/$', common.static, name='staticfile'),\n\n # 登录登出\n url(r'^login/$', auth.login, name='login'),\n url(r'^login/is_manager_exist/$', auth.is_manager_exist, name='is_manager_exist'),\n url(r'^logout/$', auth.logout, name='logout'),\n url(r'^password_change/$', auth.password_change, name='password_change'),\n\n # 默认的后台界面\n url(r'^index/$', auth.index, name='index'),\n\n # 系统管理\n # 用户组管理\n url(r'^group/page/$', group.page, name='group_page'),\n url(r'^group/add/$', group.add, name='group_add'),\n url(r'^group/delete/$', group.delete, name='group_delete'),\n url(r'^group/update/$', group.update, name='group_update'),\n url(r'^group/query/$', group.query, name='group_query'),\n url(r'^group/menu/$', group.menu, name='group_menu'),\n url(r'^group/menu/update/$', group.menu_update, name='group_menu_update'),\n\n # 用户管理\n url(r'^manager/page/$', manager.page, name='manager_page'),\n url(r'^manager/add/$', manager.add, name='manager_add'),\n # url(r'^manager/delete/$', manager.delete, name='manager_delete'),\n url(r'^manager/update/$', manager.update, name='manager_update'),\n url(r'^manager/query/$', manager.query, name='manager_query'),\n url(r'^manager/reset_password/$', manager.reset_password, name='manager_reset_password'),\n\n # 系统参数\n url(r'^param/page/$', param.page, name='param_page'),\n url(r'^param/add/$', param.add, name='param_add'),\n url(r'^param/delete/$', param.delete, name='param_delete'),\n url(r'^param/update/$', param.update, name='param_update'),\n url(r'^param/query/$', param.query, name='param_query'),\n\n # 签到规则设置\n url(r'^signin_rule/page/$', signin_rule.page, name='signin_rule_page'),\n url(r'^signin_rule/add/$', signin_rule.add, name='signin_rule_add'),\n url(r'^signin_rule/delete/$', signin_rule.delete, name='signin_rule_delete'),\n url(r'^signin_rule/update/$', signin_rule.update, name='signin_rule_update'),\n\n url(r'^coupon_reward/page/', lottery_award.coupon_reward_page, name='coupon_reward_page'),\n url(r'^coupon_reward/query/', lottery_award.coupon_reward_query, name='coupon_reward_query'),\n url(r'^coupon_send_recode/page/', coupon.coupon_send_recode_page, name='coupon_send_recode_page'),\n url(r'^coupon_send_recode/table/', coupon.coupon_send_recode_query, name='coupon_send_recode_query'),\n # 轮播图管理\n url(r'^carousel_figure/page/$', carousel_figure.page, name='carousel_figure_page'),\n url(r'^carousel_figure/update/$', carousel_figure.update, name='carousel_figure_update'),\n\n # 抽奖消耗表\n url(r'^lottery/award/use/page/$', lottery_award.use_page, name='lottery_award_use_page'),\n url(r'^lottery/award/use/query/$', lottery_award.use_query, name='lottery_award_use_query'),\n # 抽奖奖品设置\n url(r'^lottery/award/set/page/$', lottery_award.set_page, name='lottery_award_set_page'),\n url(r'^lottery/award/add/$', lottery_award.add, name='lottery_award_add'),\n url(r'^lottery/award/update/$', lottery_award.update, name='lottery_award_update'),\n url(r'^lottery/award/delete/$', lottery_award.delete, name='lottery_award_delete'),\n url(r'^lottery/award/distribute/$', lottery_award.distribute, name='lottery_award_distribute'),\n url(r'^lottery/award/distribute/query/$', lottery_award.distribute_query, name='lottery_award_distribute_query'),\n # 抽奖规则设置\n url(r'^lottery/award/rule/set/page/$', lottery_award.rule_set_page, name='lottery_award_rule_set_page'),\n url(r'^lottery/award/rule2/set/page/$', lottery_award.rule2_set_page, name='lottery_award_rule2_set_page'),\n url(r'^lottery/award/rule/add/$', lottery_award.rule_add, name='lottery_award_rule_add'),\n url(r'^lottery/award/rule/update/$', lottery_award.rule_update, name='lottery_award_rule_update'),\n url(r'^lottery/award/rule/delete/$', lottery_award.rule_delete, name='lottery_award_rule_delete'),\n url(r'^lottery/award/rule2/add/$', lottery_award.rule2_add, name='lottery_award_rule2_add'),\n url(r'^lottery/award/rule2/update/$', lottery_award.rule2_update, name='lottery_award_rule2_update'),\n url(r'^lottery/award/rule2/delete/$', lottery_award.rule2_delete, name='lottery_award_rule2_delete'),\n # 商户信息\n url(r'^shop/page/$', shop.page, name='shop_page'),\n # url(r'^shop/stick/$', shop.stick, name='shop_stick'),\n url(r'^shop/query/$', shop.query, name='shop_query'),\n url(r'^shop/update/$', shop.update, name='shop_update'),\n\n # 提现审核\n url(r'^withdraw_application/page/$', withdraw_application.page, name='withdraw_application_page'),\n url(r'^withdraw_application/query/$', withdraw_application.query, name='withdraw_application_query'),\n url(r'^withdraw_application/audit/$', withdraw_application.audit, name='withdraw_application_audit'),\n\n # 优惠券\n url(r'^coupon/page/$', coupon.page, name='coupon_page'),\n url(r'^coupon/add/$', coupon.add, name='coupon_add'),\n url(r'^coupon/delete/$', coupon.delete, name='coupon_delete'),\n url(r'^coupon/update/$', coupon.update, name='coupon_update'),\n url(r'^coupon/query/$', coupon.query, name='coupon_query'),\n url(r'^coupon/send/$', coupon.send, name='coupon_send'),\n url(r'^coupon/sendsome/page/$', coupon.coupon_send_some_page, name='coupon_send_some'),\n url(r'^coupon/sendsome/query/$', coupon.coupon_send_some_query, name='coupon_send_some_query'),\n url(r'^coupon/sendsome/$', coupon.coupon_upload_send_some, name='coupon_upload_send_some'),\n url(r'^coupon/sendsome/download/$', coupon.download_temp, name='coupon_sendsome_download'),\n # url(r'^coupon/detail/$', coupon.detail, name='coupon_detail'),\n url(r'^coupon/upload/$', coupon.upload, name='coupon_upload'),\n\n # 抽奖规则\n url(r'^lottery/rule/page/$', lottery_rule.page, name='lottery_rule_page'),\n url(r'^lottery/rule/query/$', lottery_rule.query, name='lottery_rule_query'),\n url(r'^lottery/rule/add/$', lottery_rule.add, name='lottery_rule_add'),\n url(r'^lottery/rule/delete/$', lottery_rule.delete, name='lottery_rule_delete'),\n url(r'^lottery/rule/update/$', lottery_rule.update, name='lottery_rule_update'),\n # 优惠券条件参数\n url(r'^coupon_term/page/$', coupon_term.page, name='coupon_term_page'),\n url(r'^coupon_term/update/$', coupon_term.update, name='coupon_term_update'),\n url(r'^coupon_term/query/$', coupon_term.query, name='coupon_term_query'),\n\n # 对账管理\n url(r'^reconciliation/log/page/$', reconciliation.log_page, name='reconciliation_log_page'),\n url(r'^reconciliation/log/query/$', reconciliation.log_query, name='reconciliation_log_query'),\n url(r'^reconciliation/amend/page/$', reconciliation.amend_page, name='reconciliation_amend_page'),\n url(r'^reconciliation/amend/query/$', reconciliation.amend_query, name='reconciliation_amend_query'),\n url(r'^reconciliation/amend_by_hand/$', reconciliation.amend_by_hand, name='reconciliation_amend_by_hand'),\n url(r'^reconciliation/auto_amend/$', reconciliation.auto_amend, name='reconciliation_auto_amend'),\n\n # 银行网点管理\n url(r'^branch/page/$', branch.page, name='branch_page'),\n url(r'^branch/add/$', branch.add, name='branch_add'),\n url(r'^branch/update/$', branch.update, name='branch_update'),\n url(r'^branch/query/$', branch.query, name='branch_query'),\n url(r'^branch/delete/$', branch.delete, name='branch_delete'),\n\n # 报表管理\n url(r'^report/coupon/page/$', report_coupon.page, name='report_coupon_page'),\n url(r'^report/coupon/query/$', report_coupon.query, name='report_coupon_query'),\n url(r'^report/cus_trade_detail/page/$', report_cus_trade_detail.page, name='report_cus_trade_detail_page'),\n url(r'^report/cus_trade_detail/query/$', report_cus_trade_detail.query, name='report_cus_trade_detail_query'),\n url(r'^report/reservation/page/$', report_reservation.page, name='report_reservation_page'),\n url(r'^report/reservation/query/$', report_reservation.query, name='report_reservation_query'),\n url(r'^report/shop_trade_detail/page/$', report_shop_trade_detail.page, name='report_shop_trade_detail_page'),\n url(r'^report/shop_trade_detail/query/$', report_shop_trade_detail.query, name='report_shop_trade_detail_query'),\n url(r'^report/shop_trade_total/page/$', report_shop_trade_total.page, name='report_shop_trade_total_page'),\n url(r'^report/shop_trade_total/query/$', report_shop_trade_total.query, name='report_shop_trade_total_query'),\n\n # 预约日期配置\n url(r'^reservation/date/page/$', reservation.page, name=\"reservation_date_page\"),\n url(r'^reservation/date/add/$', reservation.add, name=\"reservation_date_submit\"),\n url(r'^reservation/date/delete/$', reservation.delete, name='reservation_delete'),\n url(r'^reservation/date/update/$', reservation.update, name='reservation_update'),\n url(r'^reservation/date/query/$', reservation.query, name='reservation_query'),\n url(r'^reservation/second/page/$', reservation.setting_page, name='reservation_setting_page'),\n url(r'reservation/second/add/', reservation.setting_add, name='reservation_setting_add'),\n url(r'reservation/second/delete/$', reservation.setting_delete, name='reservation_delete'),\n url(r'reservation/second/update/$', reservation.setting_update, name='reservation_update'),\n url(r'reservation/second/query/$', reservation.setting_query, name='reservation_query'),\n\n # 客户地址关系\n url(r'^cus_region/page/$', cus_region.page, name='cus_region_page'),\n url(r'^cus_region/add/$', cus_region.add, name='cus_region_add'),\n url(r'^cus_region/delete/$', cus_region.delete, name='cus_region_delete'),\n url(r'^cus_region/update/$', cus_region.update, name='cus_region_update'),\n url(r'^cus_region/query/$', cus_region.query, name='cus_region_query'),\n url(r'^cus_region/upload/$', cus_region.upload, name='cus_region_upload'),\n\n # 客户经理地址关系\n url(r'^manager_region/page/$', manager_region.page, name='manager_region_page'),\n url(r'^manager_region/add/$', manager_region.add, name='manager_region_add'),\n url(r'^manager_region/delete/$', manager_region.delete, name='manager_region_delete'),\n url(r'^manager_region/update/$', manager_region.update, name='manager_region_update'),\n url(r'^manager_region/query/$', manager_region.query, name='manager_region_query'),\n url(r'^manager_region/upload/$', manager_region.upload, name='manager_region_upload'),\n\n # 客户预授信额度\n url(r'^pre_credit_line/page/$', pre_credit_line.page, name='pre_credit_line_page'),\n url(r'^pre_credit_line/add/$', pre_credit_line.add, name='pre_credit_line_add'),\n url(r'^pre_credit_line/delete/$', pre_credit_line.delete, name='pre_credit_line_delete'),\n url(r'^pre_credit_line/update/$', pre_credit_line.update, name='pre_credit_line_update'),\n url(r'^pre_credit_line/query/$', pre_credit_line.query, name='pre_credit_line_query'),\n url(r'^pre_credit_line/upload/$', pre_credit_line.upload, name='pre_credit_line_upload'),\n\n # 小额贷款合同统计\n url(r'^micro_credit_contract/page/$', micro_credit_contract.page, name='micro_credit_contract_page'),\n url(r'^micro_credit_contract/add/$', micro_credit_contract.add, name='micro_credit_contract_add'),\n url(r'^micro_credit_contract/delete/$', micro_credit_contract.delete, name='micro_credit_contract_delete'),\n url(r'^micro_credit_contract/update/$', micro_credit_contract.update, name='micro_credit_contract_update'),\n url(r'^micro_credit_contract/query/$', micro_credit_contract.query, name='micro_credit_contract_query'),\n url(r'^micro_credit_contract/upload/$', micro_credit_contract.upload, name='micro_credit_contract_upload'),\n\n # 客户地址关系\n url(r'^community_assess/page/$', community_assess.page, name='community_assess_page'),\n url(r'^community_assess/add/$', community_assess.add, name='community_assess_add'),\n url(r'^community_assess/delete/$', community_assess.delete, name='community_assess_delete'),\n url(r'^community_assess/update/$', community_assess.update, name='community_assess_update'),\n url(r'^community_assess/query/$', community_assess.query, name='community_assess_query'),\n url(r'^community_assess/upload/$', community_assess.upload, name='community_assess_upload'),\n # 核销类\n url(r'^seller/coupon/off/page/$', writeoff.page, name='seller_coupon_off'),\n url(r'^seller/coupon/off/query/$', writeoff.query, name='seller_coupon_off_query'),\n\n # 字段回复活动\n url(r'^activity/page$', activity.page, name='activity_page'),\n url(r'^activity/query', activity.query, name='activity_query'),\n url(r'^activity/add', activity.add, name='activity_add'),\n url(r'^activity/delete', activity.delete, name='activity_delete'),\n url(r'^activity/update', activity.update, name='activity_update'),\n url(r'^activity/upload', activity.upload, name='activity_upload'),\n url(r'^activity/ext/query', activity.ext_query, name='activity_ext_query'),\n url(r'^activity/ext/add', activity.ext_add, name='activity_ext_add'),\n url(r'^activity/ext/delete', activity.ext_delete, name='activity_ext_delete'),\n url(r'^activity/statistics/page$', activity.statistics_page, name='activity_statistics_page'),\n url(r'^activity/statistics/query', activity.statistics_query, name='activity_statistics_query'),\n\n # 开发中\n url(r'^todo/page/$', common.todo, name='todo'),\n]\n","sub_path":"weixin_mana/mana_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":14673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"481911696","text":"import dicom2nifti\nimport os\nfrom skimage.io import imread_collection, concatenate_images\nimport nibabel as nib\nimport numpy as np\n\nDATA_FOLDER = \"/home/fayd/Data/CHAOS_Train_Sets/Train_Sets/MR\"\nOUTPUT_FOLDER = '/home/fayd/Data/CHAOS'\nTYPE = 'T1DUAL'\n\ndef convert_ct():\n\n # Create output folder\n if not os.path.isdir(OUTPUT_FOLDER):\n os.mkdir(OUTPUT_FOLDER)\n\n for folder_name in os.listdir(DATA_FOLDER):\n\n output_folder = f'{OUTPUT_FOLDER}/{folder_name}'\n input_folder = f'{DATA_FOLDER}/{folder_name}/{TYPE}'\n\n if not os.path.isdir(output_folder):\n os.mkdir(output_folder)\n\n for phase in ['InPhase', 'OutPhase']:\n dicom_dir = input_folder + '/DICOM_anon/' + phase\n dicom2nifti.convert_directory(dicom_dir, output_folder, compression=False)\n\n for file in os.listdir(output_folder):\n if file.endswith(\".nii\") and not file.startswith(\"InPhase\"):\n os.rename(output_folder + '/' + file ,output_folder + '/' + phase + '.nii')\n\n data = list()\n for phase in ['InPhase', 'OutPhase']:\n data.append(nib.load(output_folder + '/' + phase + '.nii').get_fdata().astype(np.float32))\n\n # Combine the phases as channels\n data = np.stack(data, axis=-1)\n nii_data = nib.Nifti1Image(data, affine=np.eye(4))\n nib.save(nii_data, output_folder + '/Combined.nii')\n\n # Combine collection of 2D images\n images = input_folder + '/Ground/*.png'\n label = concatenate_images(imread_collection(images))\n label = np.moveaxis(label, 0, -1) # put depth last\n label = np.rot90(label, k=3) # 270 degree rotation\n\n # Save image 3D array as nii\n nii_label = nib.Nifti1Image(label, affine=np.eye(4))\n nii_label.to_filename(output_folder + '/ground.nii')\n\n\nif __name__ == '__main__':\n convert_ct()","sub_path":"old/data/convert_mri_data.py","file_name":"convert_mri_data.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"193943952","text":"import numpy as np\nimport scipy\nfrom clex11 import *\nfrom clex11h import *\nfrom doublej import *\n\n\n[nb,nh]=lambda_.shape\n[nd,nc]=phic.shape\n[nz,nw]=c2.shape\n[junk,ng]=phig.shape\n[nk,ni] =thetak.shape\n\n\n\n# SEARCH ROUTINE\n\nbal1 = 5\ni = range(1, 2)\nalpha = .5\ntol = 1e-5\n\nwhile abs(bal1) > tol:\n\n # STEP 2:\n # Creation of the composite state vector x(t)'=[h1(t-1)',h2(t-1)',k(t-1)',z(t)']\n # and its law of motion: x(t+1) = A*x(t) + B*u(t) + C*w(t+1).\n\n phiin = linalg.inv(hstack((phic, phig)))\n phiinc = dot(hstack((identity(nc),zeros((nc,ng)))), phiin)\n phiing = dot(hstack((zeros((ng, nc)), identity(ng))), phiin)\n a1 = hstack([deltah1, zeros((nh, nh + nk))])\n a2 = hstack([[zeros((nh, nh)), deltah2, dot(dot(thetah2, phiinc), gamma)]])\n a3 = hstack([zeros((nk, nh + nh)), deltak])\n a4 = dot(dot(thetah2, phiinc), ud)\n b5 = - dot(dot(thetah2, phiinc), phii)\n b6 = hstack([thetah1, zeros((nh, ni))])\n b7 = hstack([thetah2, b5])\n b8 = hstack((zeros((nk, nc)), thetak))\n\n # Creation of the A matrix:\n\n a11=vstack([[a1,a2,a3]])\n\n #a12 = hstack((zeros((nh, nz)), a4, zeros((nk, nz))))\n a12 = vstack([[zeros((nh, nz)), a4 ,zeros((nk,nz))]])\n\n # a = vstack([hstack([a11, a12]), hstack([zeros((nz, nk + nh + nh)), a22])])\n # a= vstack((hstack((a11,a12)), hstack((zeros((nz,nk+nh+nh)), a22))))\n\n # Creation of the B matrix:\n\n b1 = vstack([b6, b7, b8])\n b = vstack((b1, zeros((nz, nc + ni))))\n\n b1= np.vstack([[b1, thetak]])\n b=vstack((b1,zeros((nz, nc+ni))))\n\n # Creation of the C matrix:\n\n c = vstack((zeros((nk + nh + nh, nw)), c2))\n\n # Scaling of the law of motion by the square root of the discount factor to\n # account for the discounting in the optimization problem:\n\n # a = dot(a, math.sqrt(beta))\n\n a11 = dot(a11, math.sqrt(beta))\n\n b1 = dot(b1, math.sqrt(beta))\n\n b = dot(b, math.sqrt(beta))\n\n\n # STEP 3:\n # Creation of the matrices Q and R for the objective function of the optimal\n # linear regulator problem. The matrices Q, R, S are first created where S\n # allows for the presence of cross products between the control and the state\n # in the objective function. The control is then transformed in the last step\n # to remove the cross product and to put the problem in the proper form of the\n # optimal linear regulator problem.\n\n # STEP 3a:\n # Form matrices Q, R, S where S contains cross products:\n\n nu = nc + ni\n nx = nh + nh + nk + nz\n n = nu + nx\n\n h1 = hstack([[pih1, zeros((nb, ni)), lambda1, zeros((nb, nh + nk)), - ub1]])\n h21 = hstack([[-pih2, -pih2.dot(phiinc).dot(phii)]])\n h22 = hstack([[zeros((nb, nh)), lambda2, pih2.dot(phiinc).dot(gamma), - ub2 + pih2.dot(phiinc).dot(ud)]])\n h2 = hstack([[h21, h22]])\n\n g1 = hstack([[zeros((ng, nc)), - phiing.dot(phii), zeros((ng, nh + nh)), phiing.dot(gamma), phiing.dot(ud)]])\n\n r = (alpha * dot(h1.transpose(), h1)) + ((1-alpha) * (dot(h2.transpose(), h2))) + (1-alpha) * (alpha * (dot(g1.transpose(), g1)))\n q= r[1:nu][:,1:nu]\n\n s = r[1:nu][:, nu:n]\n\n r = r[nu:n][:,nu:n]\n\n # STEP 3b:\n # Transform control to remove cross product:\n\n # First check if q is non-singular:\n\n if rank(q) < asarray(q.shape).max(0):\n print('EXECUTION STOPPED. The 1-1 block of the R matrix is singular')\n\n qi = linalg.inv(q)\n\n a = a - dot(dot(b, qi), s)\n\n r = r - dot(dot(s.transpose(), qi), s)\n\n a11 = a[0:nh + nh + nk, 0:nh + nh + nk]\n\n r1 = r[0:nh + nh + nk, 0:nh + nh + nk]\n\n\n # STEP 4:\n # Solve the Ricatti equation using two doubling algorithms.\n # We exploit the fact that the solution to the Riccatti equation can be solved\n # in parts (endogenous and exogenous).\n\n print('Calculating, please wait')\n\n # STEP 4a:\n # Calculate the (1,1) block of the Riccatti matrix. The result is a matrix kg\n # such that kg'*[h1(t), h2(t), k(t)] is the feedback part of the control law.\n # The matrix a11o gives the (1,1) (all endogenous variables) part of ao.\n\n [kg, v11] = doubleo(a11.transpose(), b1.transpose(), r1, q)\n\n a11o = a11 - dot(b1, kg.transpose())\n\n # STEP 4b:\n # Calculate the (1,2) block of the Riccatti matrix. The result is the matrix\n # kg2 such that kg2*z(t) is the feedforward part of the control law.\n\n a22t = a[nk + nh + nh:nx, nk + nh + nh:nx]\n\n r2 = r[0:nh + nh + nk, nh + nh + nk:nx]\n\n a12 = a[0:nh + nh + nk, nh + nh + nk:nx]\n\n qopt = linalg.inv(q + dot(dot(b1.transpose(), v11), b1))\n\n r12 = r2 - a11.transpose().dot(v11).dot(b1).dot(qopt).dot(b1.transpose()).dot(v11).dot(a12) + a11.transpose().dot(\n v11).dot(a12)\n\n v12 = doublej2(a11o.transpose(), r12, a22t.transpose(), identity(nz))\n\n kg2 = qopt.dot(b1.transpose()).dot(v12.dot(a22t) + v11.dot(a12))\n\n\n # STEP 4c:\n # Compute the matrix ao which gives the reduced form law of motion for x(t)\n # of the form: x(t+1) = ao*x(t) + c*w(t+1). The matrix ao also has the\n # square root of beta rescaling unwound.\n\n\n ao = (a - b.dot(hstack([kg.transpose(), kg2]))) / math.sqrt(beta)\n # Square root of beta unwound.\n\n a11o = a11o / math.sqrt(beta)\n\n # STEP 5:\n # Compute endogenous and exogenous eigenvalues.\n\n # Endogenous eigenvalues.\n\n endo = linalg.eig(a11o)\n\n # Exogenous eigenvalues.\n\n exo = linalg.eig(a22)\n\n\n # STEP 6:\n # Calculation of the solution matrices.\n\n sh1 = ao[0:nh, 0:nx]\n sh2 = ao[nh:nh + nh, 0:nx]\n sk = ao[nh + nh:nh + nh + nk, 0:nx]\n skl = hstack([zeros((nk, nh + nh)), identity(nk), zeros((nk, nz))])\n sd = hstack([zeros((nd, nh + nh + nk)), ud])\n sb1 = hstack([zeros((nb, nh + nh + nk)), ub1])\n sb2 = hstack([zeros((nb, nh + nh + nk)), ub2])\n su = -hstack([kg.transpose(), kg2]) - qi.dot(s)\n sc1 = su[0:nc, 0:nx]\n si = su[nc:nu, 0:nx]\n sc = phiinc.dot((gamma.dot(skl) + sd - phii.dot(si)))\n sg = phiing.dot((gamma).dot(skl) + sd - phii.dot(si))\n sc2 = sc - sc1\n sg1 = (1 - alpha).dot(sg)\n sg2 = sg - sg1\n ss1 = lambda1.dot(hstack([identity(nh), zeros((nh, nh + nk + nz))])) + pih1.dot(sc1)\n ss2 = lambda2.dot(hstack((zeros((nh, nh + nk + nz)), identity(nh)))) + pih2.dot(sc2)\n\n\n # STEP 7\n # Calculation of the Lagrange multipliers.\n\n mh1 = - beta * (hstack((identity(nh), zeros((nh, nh + nk))))).dot(hstack((v11, v12))).dot(ao)\n mh2 = - beta * (hstack((zeros((nh, nh + nk)), identity(nh)))).dot(hstack((v11, v12))).dot(ao)\n mk = - beta * (hstack((zeros((nk, nh + nh)), identity(nk)))).dot(hstack((v11, v12))).dot(ao)\n ms1 = alpha.dot((sb1 - ss1))\n ms2 = (1 - alpha).dot((sb2 - ss2))\n mc1 = thetah1.transpose().dot(mh1) + pih1.transpose().dot(ms1)\n mc2 = thetah2.transpose().dot(mh2) + pih2.transpose().dot(ms2)\n mc = mc1\n mi = thetak.transpose().dot(mk)\n mg = (1 - alpha).dot(sg2)\n md = phiin.transpose().dot(vstack((mc, -mg)))\n mg1 = mg\n mg2 = mg\n\n # VALUES.M a script file (not a function)\n #\tVALUES computes the values of the consumers' expenditure and wealth\n #\tfor the sake of budget constraint evaluation, i.e.\n #\n #\t\tE Sum {beta^t*p0(t).ci(t)}| I0 = E Sum {beta^t*(w0(t).li(t)\n # + alph0(t).di(t)0}| I0 +v0ki(-1)\n #\n #\tThe program SOLVEHET.M must be run prior to running this program.\n #\tThe values of udi, k0i, and x0 must also be in memory.\n\n # Compute the generalized variance:\n\n v = doublej2(beta * ao, c, ao, c)\n v = (v * beta) * (1/ (1 - beta))\n\n # Compute the value of the technology shock:\n\n sd1 = hstack([zeros((nd, nh + nh + nk)), ud1])\n sd2 = hstack([zeros((nd, nh + nh + nk)), ud2])\n\n wd1 = doublej2(beta.dot(ao.transpose(), md.transpose(), ao.transpose(), sd1.transpose()))\n wd1 = x0.transpose.dot(wd1).dot(x0) + trace(md.dot(v).dot(sd1.transpose()))\n\n wd2 = doublej2(beta.dot(ao.transpose(), md.transpose(), ao.transpose(), sd2.transpose()))\n wd2 = x0.transpose.dot(wd2).dot(x0) + trace(md.dot(v).dot(sd2.transpose()))\n\n\n # Compute the value of the household input:\n\n wg1 = doublej2(beta.dot(ao.transpose(), mg.transpose(), ao.transpose(), sg1.transpose()))\n wg1 = (x0.transpose.dot(wg1).dot(x0)) + (trace(md.dot(v).dot(sg1.transpose())))\n\n wg2 = doublej2(beta.dot(ao.transpose(), md.transpose(), ao.transpose(), sg2.transpose()))\n wg2 = (x0.transpose.dot(wg2).dot(x0)) + trace(md.dot(v).dot(sg2.transpose()))\n\n # Compute the value of physical capital:\n\n\n wk1 = k01.transpose().dot((deltak.transpose().dot(mk)) + gamma.t().dot(md)).dot(x0)\n wk2 = k02.transpose().dot((deltak.transpose().dot(mk)) + gamma.t().dot(md)).dot(x0)\n\n # Compute the value of consumption goods:\n\n wc1 = doublej2(beta.dot(ao.transpose(), mc.transpose(), ao.transpose(), sc1.transpose()))\n wc1 = (x0.transpose.dot(wc1).dot(x0)) + (trace(mc.dot(v).dot(sc1.transpose())))\n\n wc2 = doublej2(beta.dot(ao.transpose(), mc.transpose(), ao.transpose(), sc2.transpose()))\n wc2 = (x0.transpose.dot(wc2).dot(x0)) + trace(mc.dot(v).dot(sc2.transpose()))\n\n # Compute expenditure and wealth:\n\n spend1 = wc1\n spend2 = wc2\n\n wealth1 = wd1 + wg1 + wk1\n wealth2 = wd2 + wg2 + wk2\n\n # Compute budget balance:\n\n bal1 = wealth1 - spend1\n bal2 = wealth2 - spend2\n\n if bal1 > 0:\n alpha = alpha + 2 ^ (-i)\n\n else:\n alpha = alpha - 2 ^ (-i);\n\n print(alpha)\n","sub_path":"Non_Gorman_Heterogeneity/Functional/solvehet.py","file_name":"solvehet.py","file_ext":"py","file_size_in_byte":9351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"134212300","text":"\"\"\"\nefetch.py\nUse Entrez's API to search for co-occurring compound-effect terms.\n\"\"\"\nfrom Bio import Entrez\nfrom neo4j.v1 import GraphDatabase, basic_auth\nimport time\nEntrez.email = \"couchd@musc.edu\"\n\nwith open('.env', 'r') as infile:\n addr = infile.readline().strip().split('=')[1]\n user = infile.readline().strip().split('=')[1]\n pwd = infile.readline().strip().split('=')[1]\n\ndriver = GraphDatabase.driver(addr, auth=basic_auth(user, pwd))\nsession = driver.session()\n\nall_drugs = session.run(\"MATCH (n: Drug) RETURN n.name AS name, n.product_name AS product_name\")\nall_drugs_data = [(d['name'], d['product_name']) for d in all_drugs]\nall_effects = session.run(\"MATCH (n: Effect) RETURN n.name AS name\")\nall_effects_data = [d['name'] for d in all_effects]\nfor drug_data in all_drugs_data:\n query = '\"%s\"[title/abstract] OR \"%s\"[title/abstract]' % drug_data\n result = Entrez.esearch(db=\"pubmed\", term=query, retmax=1000000)\n id_list = Entrez.read(result)[\"IdList\"]\n time.sleep(3)\n","sub_path":"efetch.py","file_name":"efetch.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"476702509","text":"import time\nfrom datetime import datetime\nimport json\nimport csv\n\nall_filter_json = \"/Volumes/PEPPE_DT/WorkspacePyCharm/VirusTotalUmbrellaTop1M/py_prova_merge/prova_merge/all_filter.json\"\ntranco_csv = \"/Volumes/PEPPE_DT/WorkspacePyCharm/VirusTotalUmbrellaTop1M/py_prova_merge/tranco-top-1m.csv\"\n\nwith open(all_filter_json) as file:\n json_data = json.load(file)\n\nprint(type(json_data))\n\n'''\nlist of domain name scanned (key values of dict 'json_data')\n'''\n\nindex = []\nfor a in json_data.items():\n index.append(a[0])\n\n'''\nfor x in index:\n print(x)\n'''\n\n\n'''\nfor x in index:\n for p in json_data[x]:\n print(\"domain: \" + str(x) + \", score: \" + str(p['score']))\n'''\n\n'''\nkeep second column of tranco csv\n'''\ndelimiter = ','\ntranco_domain = []\nwith open(tranco_csv, 'r') as tranco:\n reader = csv.reader(tranco, delimiter=delimiter)\n for row in reader:\n tranco_domain.append(row[1])\n\n'''\n - check the presence of domain name in tranco csv with a score number > 0, and add them to a new dict.\n - calulate the time to analyse \n \n'''\nstart = datetime.now()\nfp_dict = {}\nscore_l = []\nfor x in index:\n for p in json_data[x]:\n if p['score'] > 0:\n print(x)\n for dom in tranco_domain:\n if dom == x:\n fp_dict[x] = p['score']\n score_l.append(p['score'])\nend = datetime.now()\nfor x,y in fp_dict.items():\n print(x, y)\n\nprint('Duration: {}'.format(end - start))\nprint(len(index))\nprint(len(fp_dict))\nperc = (len(fp_dict)/len(index))\nprint(\"% :\" + str(perc))\nmaximum = max(score_l)\nprint(\"Max score: \" + str(maximum))\n\n\nfor x in range(1,maximum+1):\n eq = sum(num == x for num in score_l)\n print(\"Num of score equal to \" + str(x) + \" is :\" + str(eq))\n\n","sub_path":"VirusTotalUmbrellaTop1M/tranco_prova.py","file_name":"tranco_prova.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"591567344","text":"from flask import Flask, render_template\nfrom data import db_session\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n param = {}\n param['username'] = \"Ученик Яндекс.Лицея\"\n param['title'] = 'Приветствие'\n return render_template('index.html', **param)\n\n\ndef main():\n db_session.global_init(\"db/blogs.sqlite\")\n app.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"314996306","text":"# Tai Sakuma \nimport os\nimport sys\nimport logging\nimport textwrap\n\nimport pytest\n\ntry:\n import unittest.mock as mock\nexcept ImportError:\n import mock\n\nfrom alphatwirl.concurrently import HTCondorJobSubmitter\n\n##__________________________________________________________________||\njob_desc_template_with_extra = \"\"\"\nExecutable = run.py\noutput = results/$(resultdir)/stdout.txt\nerror = results/$(resultdir)/stderr.txt\nlog = results/$(resultdir)/log.txt\nArguments = $(resultdir).p.gz\nshould_transfer_files = YES\nwhen_to_transfer_output = ON_EXIT\ntransfer_input_files = {input_files}\ntransfer_output_files = results\nUniverse = vanilla\nnotification = Error\ngetenv = True\nrequest_memory = 900\nqueue resultdir in {resultdirs}\n\"\"\"\njob_desc_template_with_extra = textwrap.dedent(job_desc_template_with_extra).strip()\n\n##__________________________________________________________________||\n@pytest.fixture()\ndef subprocess():\n proc_submit = mock.MagicMock(name='proc_condor_submit')\n proc_submit.communicate.return_value = (b'1 job(s) submitted to cluster 1012.', b'')\n\n proc_prio = mock.MagicMock(name='proc_condor_prio')\n proc_prio.communicate.return_value = ('', '')\n proc_prio.returncode = 0\n\n ret = mock.MagicMock(name='subprocess')\n ret.Popen.side_effect = [proc_submit, proc_prio]\n return ret\n\n@pytest.fixture()\ndef obj(monkeypatch, subprocess):\n module = sys.modules['alphatwirl.concurrently.HTCondorJobSubmitter']\n monkeypatch.setattr(module, 'subprocess', subprocess)\n module = sys.modules['alphatwirl.concurrently.exec_util']\n monkeypatch.setattr(module, 'subprocess', subprocess)\n return HTCondorJobSubmitter()\n\ndef test_repr(obj):\n repr(obj)\n\ndef test_init_job_desc_extra(obj):\n job_desc_extra = ['request_memory = 900']\n obj = HTCondorJobSubmitter(job_desc_extra=job_desc_extra)\n assert job_desc_template_with_extra == obj.job_desc_template\n\ndef test_run(obj, tmpdir_factory, caplog):\n workingarea = mock.MagicMock()\n workingarea.path = str(tmpdir_factory.mktemp(''))\n workingarea.package_path.return_value = 'aaa'\n with caplog.at_level(logging.WARNING, logger = 'alphatwirl'):\n assert '1012.0' == obj.run(workingArea=workingarea, package_index=0)\n\n##__________________________________________________________________||\n","sub_path":"tests/unit/concurrently/test_HTCondorJobSubmitter.py","file_name":"test_HTCondorJobSubmitter.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"584905570","text":"import glob\nimport os\nimport random\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import load_model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils.visualize_util import plot\nfrom model import simplified_nvidia_model_with_dropout, nvidia_model_with_dropout\nfrom visualization_helpers import save_image_pair, save_steering_histogram, save_steering_signal, save_training_images\nfrom csv_helpers import load_data_from_csv, save_data_to_csv\nfrom process_camera_image import process_camera_image\nfrom camera_helpers import load_image, show_image_color, image_shift_horiz\nfrom camera_helpers import save_image, random_brightness, image_shear\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('data_dir', 'sample_data/', \"Directory where the sample data is\")\nflags.DEFINE_integer('epochs', 10, 'Number of epochs to train')\nflags.DEFINE_integer('batch', 32, 'Size of each training batch')\nflags.DEFINE_string('model_out', 'model.h5', 'File to save the model')\nflags.DEFINE_boolean('resume', False, 'If Training should resume')\nflags.DEFINE_string('model_in', 'model.h5', 'The model to use when resuming')\nflags.DEFINE_string('session', '0', 'The session name, else uses timestamp in sec')\n\n\ndef load_data_for_training(data_dir, validation_split=0.2, session='abc'):\n\n # Load data\n rows = load_rows_from_dir(data_dir)\n\n # Visualize the training data distribution\n title = 'Distribution of Steering Angles in Training Data'\n save_steering_histogram([r[3] for r in rows], title, 'static/histogram_steering_' + session + '.jpg')\n save_steering_signal([r[3] for r in rows], (0, 1000), 'static/signal_steering_0_' + session + '.jpg')\n save_steering_signal([r[3] for r in rows], (3200, 4200), 'static/signal_steering_1_' + session + '.jpg')\n\n # Split for validation\n train_rows, validation_rows = train_test_split(rows, test_size=validation_split)\n\n # process three images to see its shape, visualization\n for i in range(3):\n index = random.randint(0, len(train_rows) - 1)\n source_image = load_image(train_rows[index][np.random.randint(0, 3)])\n processed_image = process_camera_image(source_image)\n save_image_pair((source_image, \"Source Image\", None),\n (processed_image, \"Processed Image\", None),\n 'static/',\n 'processed_' + session + '_' + str(i) + '.jpg')\n\n return train_rows, validation_rows, processed_image.shape\n\n\ndef load_rows_from_dir(data_dir):\n # Load the data from both the sample and collected data\n csv_files = glob.glob(data_dir + '*.csv')\n rows = []\n for csv_file in csv_files:\n rows.extend(load_data_from_csv(csv_file))\n return rows\n\n\ndef train_model(model, train_rows, validation_rows, batch_size=32, epochs=5, save_checkpoints=False):\n train_generator = generator_train(train_rows, batch_size=batch_size)\n validation_generator = generator_validation(validation_rows, batch_size=batch_size)\n checkpoints = []\n if save_checkpoints:\n checkpoints.append(ModelCheckpoint(\"models/model_new-{epoch:02d}.h5\"))\n\n model.fit_generator(train_generator,\n samples_per_epoch=10240,\n validation_data=validation_generator,\n nb_val_samples=2048,\n nb_epoch=epochs,\n callbacks=checkpoints)\n\n\ndef generator_train(datarows, batch_size=32):\n while 1: # Loop forever so the generator never terminates\n x_train = []\n y_train = []\n for i in range(batch_size):\n row = datarows[np.random.randint(0, len(datarows))]\n steer_angle = float(row[3])\n\n # Pick images with greater steer angles to handle straight driving bias\n\n # Pick an image left, right or center, 3x for center cam\n i = np.random.randint(1, 6)\n if i <= 3:\n image = load_image(row[0])\n if i == 4:\n image = load_image(row[1])\n steer_angle += 0.15\n if i == 5:\n image = load_image(row[2])\n steer_angle -= 0.15\n\n # Flip image horizontally to balance sample data driving bias 50%\n if np.random.uniform() > 0.5:\n image = np.fliplr(image)\n steer_angle *= -1\n\n # Alter brightness 40%\n #if np.random.uniform() > 0.6:\n # image = random_brightness(image, (0.8, 1.4))\n\n # Shift horizontally 50%\n #if np.random.uniform() > 0.5:\n # shift_by = np.random.uniform(-0.06, 0.06) # max 25/360 as we are chopping 25 on each side\n # image = image_shift_horiz(image, shift_by, fill_mode='constant')\n # steer_angle += (-0.5)*shift_by\n\n # Shearing 20%\n #if np.random.uniform() > 0.8:\n # shear_by = np.random.uniform(-0.1, 0.1)\n # image = image_shear(image, shear_by, fill_mode='constant')\n # steer_angle += -0.5*shear_by\n\n x_train.append(process_camera_image(image))\n y_train.append(steer_angle)\n\n x_train = np.asarray(x_train)\n y_train = np.asarray(y_train)\n yield x_train, y_train\n\n\ndef generator_validation(datarows, batch_size=32):\n while 1: # Loop forever so the generator never terminates\n x_train = []\n y_train = []\n for i in range(batch_size):\n row = datarows[np.random.randint(0, len(datarows))]\n x_train.append(process_camera_image(load_image(row[0])))\n y_train.append(float(row[3]))\n yield np.asarray(x_train), np.asarray(y_train)\n\n\ndef main(_):\n # Load data\n train_rows, validation_rows, input_shape = load_data_for_training(data_dir=FLAGS.data_dir,\n session=FLAGS.session)\n print(\"Model input shape is: \", input_shape)\n print(\"Train Samples: : \", len(train_rows))\n print(\"Validation Samples: \", len(validation_rows))\n\n # visualization\n i = 0\n imgs = []\n steer = []\n for res in generator_train(train_rows, batch_size=1):\n if i >= 16:\n break\n imgs.append(((res[0][0]+0.5)*255).astype(np.uint8))\n steer.append(res[1][0])\n i += 1\n save_training_images(imgs, steer, 'static/training_' + FLAGS.session + '.jpg')\n\n # Build / Load model\n if FLAGS.resume:\n if not os.path.isfile(FLAGS.model_in):\n print(\"ERROR: Input model file '{}' does not exist\", FLAGS.model_in)\n exit()\n print(\"Resuming training from model {}...\".format(FLAGS.model_in))\n print()\n model = load_model(FLAGS.model_in)\n plot(model, to_file='static/model.png', show_shapes=True)\n else:\n print(\"Starting training into model {}\".format(FLAGS.model_in))\n print()\n model = simplified_nvidia_model_with_dropout(input_shape)\n plot(model, to_file='static/model.png', show_shapes=True)\n\n # Train and save\n model.summary()\n train_model(model=model, train_rows=train_rows, validation_rows=validation_rows,\n batch_size=FLAGS.batch, epochs=FLAGS.epochs, save_checkpoints=True)\n model.save(FLAGS.model_out)\n\n\nif __name__ == '__main__':\n tf.app.run()\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"366441593","text":"#!/usr/bin/env python3\n\nimport os\nimport math\nimport pyautogui\nfrom PIL import Image\nimport winsound\nimport time\n\nfor number in range(5, 0, -1):\n print(\"wait \", number)\n time.sleep(1)\n\nchange_of_size = 1\ncrop_x = 1672\ncrop_y = 919\n# crop_x=1980\n# crop_y=1080\n\nss = pyautogui.screenshot()\nss.save(\"ss.png\")\n# ss = Image.open('ss.png')\nss = ss.crop((0, 0, crop_x, crop_y))\nss = ss.resize((int(crop_x / change_of_size), int(crop_y / change_of_size)), 0)\nss.save(\"ss_resize.png\")\npos = pyautogui.mouseinfo.position()\nwinsound.Beep(2500, 200)\n\n\ndef enoughred(p):\n return p[0] >= 96 and p[1] < 32 and p[2] < 32\n\n\ndef is_the_square_red(\n region, i, j, n\n): # as soon as we find a non red pixel we immediately exit the loop\n try:\n for x in range(1, n):\n for y in range(1, n):\n if not enoughred(region.getpixel((i + x, j + y))):\n return False\n except IndexError as e:\n return False\n return True\n\n\ni = 0\nj = 0\nfirst_square_found = dict()\n\n\ndef findnxn_new(region, n):\n # we remember the coordinates of the previously found square and continue searching for these coordinates\n global i, j, first_square_found\n # print('x=',i,' y=',j,' size=',n)\n while i < math.floor(region.width) - 1:\n while j < math.floor(region.height) - 1:\n p = region.getpixel((i, j))\n if enoughred(p):\n if is_the_square_red(region, i, j, n):\n first_square_found[n] = [i, j]\n # print('x=',i,' y=',j)\n return True\n j += 1\n i += 1\n j = 0\n # print('FALSE x=',i,' y=',j,' size=',n)\n return False\n\n\ndef findnxns(region, n):\n corners = []\n region = region.copy()\n # we continue the search with the coordinates that we have memorized in findnxn_new(region, n)\n i = first_square_found[n][0]\n j = first_square_found[n][1]\n while i < math.floor(region.width) - 1:\n while j < math.floor(region.height) - 1:\n p = region.getpixel((i, j))\n if enoughred(p):\n try:\n if is_the_square_red(region, i, j, n): # fill in the rectangle\n corners += [[int(i + n * 7 / 5 / 2), int(j + n / 2)]]\n for x in range(0, int(n * 7 / 5)):\n for y in range(0, n):\n region.putpixel((i + x, j + y), (0, 0, 0))\n except IndexError as e:\n pass\n j += 1\n i += 1\n j = 0\n return corners, region\n\n\nsize = 1\nwhile findnxn_new(ss, size):\n print(\"size = \", size)\n size += 1\n\n# print(first_square_found)\nprint(\"\")\nprint(\"target search\")\norgreds = []\nallreds = []\nsizes = []\nwhile size > 4: # need to pick up\n size -= 1\n print(\" size=\", size)\n corners, ss = findnxns(ss, math.ceil(size))\n sizes += [[size, len(corners)]]\n targets = [(x, y, size) for (x, y) in corners]\n orgreds += [(size, targets)]\n allreds += targets\n # filename = 'ss_%d.png' % size\n # ss.save( filename );\n\nfor s in sizes:\n print(\"size={:>4} targets={:>6}\".format(s[0], s[1]))\n\navgsize = sum([x[0] * x[1] for x in sizes]) / len(allreds)\nprint(\"avgsize = \", avgsize)\n\n\ndef dist(p1, p2):\n return math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2))\n\n\ndef arcdist(p1, p2):\n return math.atan2(p1[0] - p2[0], p1[1] - p2[1])\n\n\ndef distpos(p):\n return dist(p, pos)\n\n\ndef arcdistpos(p):\n return arcdist(p, pos)\n\n\nreds = allreds\n# reds = sorted(reds, key=arcdistpos)\ntargets = [[False, x] for x in reds]\n\nprint(\"Targets: \", len(targets))\ncount = 1\nfor t in targets:\n if not t[0]:\n pyautogui.click(x=t[1][0], y=t[1][1])\n time.sleep(0.1)\n print(\"{:>3}-{:>4} {:>4} (size={:>4})\".format(count, t[1][0], t[1][1], t[1][2]))\n count += 1\n for x in targets:\n if dist(t[1], x[1]) < avgsize * 2:\n x[0] = True\n\nprint(\"done\")\nwinsound.Beep(2500, 200)\n","sub_path":"vaporize.py","file_name":"vaporize.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"52337949","text":"\nimport gensim\nimport os\n\n# get Report text\nclass MySentences(object):\n def __init__(self, dirname):\n self.dirname = dirname\n\n def __iter__(self):\n for fname in os.listdir(self.dirname):\n for line in open(os.path.join(self.dirname, fname,), encoding=\"utf-8\", errors=\"replace\"):\n yield line.split()\n\n\nsentence = MySentences(\"./data/rt-polaritydata/\")\nwordmodel = gensim.models.Word2Vec(sentence, workers=4)\n\nwordmodel.save(\"./word2vec/wordmodel\")\n","sub_path":"word2vecSentences.py","file_name":"word2vecSentences.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"93752208","text":"#Q1.\nli=['1','2','3','4','5']\nprint(li[::-1])\n\n#Q2\na='viShavDeeP SingH sanDHu'\nfor ch in a:\n if(ch >= 'A' and ch <= 'Z'):\n print(ch, end=\" \")\n\n'''#Q3\na=['1','2','3','4','5']\nb=[]\nfor i in a:\n b.append(int(i))\na=b\nprint(b)\n\n#Q4\na='vahav'\nb=(a[::-1])\nif a==b:\n print('THE STRING IS PALLINDROMIC')\nelse:\n print('THE STRING IS NOT PALLINDROMIC')\n\n#Q5\nimport copy\nx=[1,2, [3,4,5],6]\ny=copy.deepcopy(x)\ny[2][1]=7\nprint(x)\nprint(y)'''\n\n'''SHALLOW COPY: A Shallow copy does not creates a copy of nested objects, instead it just copies the reference of nested loop.\n DEEP COPY: A deep copy creates a new object and recursively adds the copies of nested objects present in the original elements'''\n","sub_path":"assignment4.py.py","file_name":"assignment4.py.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"23213488","text":"\"\"\"\n Tiền xử lý dữ liệu với những từ viết tắt thuộc Tuyển Sinh\n\"\"\"\nimport re\narr_old_words = [u'nv',u'NV', u'Nv1', u'nv1',u'NV1', u'nv2',u'NV2', u'nv3',u'NV3',u'đk', u'dk', u'dh', u'đh',u'ĐH',\n u'DH', u'ts', u'ntn', u'kv', u'KV', u'KV1', u'kv1', u'KV2',u'kv2', u'KV3', u'kv3', u'kí',u'CD', u'CĐ',\n u'cd', u'KHXH', u'GDĐT', u'GD&ĐT', u'QTKD', u'ĐHQG', u'ĐHQGHN', u'ĐHQGTP.HCM',u'TPHCM', u'HCM', u'hcm',\n u'HN', u'TP',u'tp',u'Tp', u'THPT', u'GD', u'ĐT', u'ĐKDT', u'DKDT', u'SP', u'LĐ', u'DHNN', u'ĐHNN',\n u'CNTT', u'ĐHKHXHNV', u'HV', u'ĐHSP', u'ĐHBK', u'ĐHCNTT', u'KHTN', u'CĐSP', u'GDTX', u'TTDN']\narr_new_words = [u'nguyện vọng',u'nguyện vọng', u'nguyện vọng 1',u'nguyện vọng 1', u'nguyện vọng 1', u'nguyện vọng 2',u'nguyện vọng 2',\n u'nguyện vọng 3',u'nguyện vọng 3',u'đăng ký', u'đăng ký',u'đại học', u'đại học',u'đại học',u'đại học',\n u'tuyển sinh', u'như thế nào', u'khu vực', u'khu vực', u'khu vực 1',u'khu vực 1', u'khu vực 2', u'khu vực 2',\n u'khu vực 3',u'khu vực 3', u'ký',u'Cao đẳng', u'Cao đẳng', u'Cao đẳng', u'Khoa học xã hội',\n u'Giáo dục và đào tạo', u'Giáo dục và đào tạo', u'Quản trị kinh doanh', u'Đại học Quốc gia', u'Đại học Quốc gia Hà Nội',\n u'Đại học Quốc gia Thành phố Hồ Chí Minh', u'Thành phố Hồ Chí Minh', u'Hồ Chí Minh',u'Hồ Chí Minh',\n u'Hà Nội', u'Thành phố', u'Thành phố', u'Thành phố', u'Trung học phổ thông', u'Giáo dục',\n u'Đào tạo', u'đăng ký dự thi', u'đăng ký dự thi', u'Sư phạm', u'Lao động', u'Đại học Ngoại Ngữ',\n u'Đại học Ngoại Ngữ', u'Công nghệ thông tin', u'Đại học Khoa học Xã hội và Nhân văn', u'Học viện',\n u'Đại học Sư phạm', u'Đại học Bách khoa', u'Đại học Công Nghệ Thông Tin',\n u'Khoa học Tự nhiên', u'Cao đẳng Sư Phạm', u'Giáo dục thường xuyên', u'Truyền Thông Doanh Nghiệp']\n\ndef format_word(s):\n result = ''\n s = re.findall(\"\"\"\\w+|[,;\\-:.!()\\[\\]\\\\\\\"\\+\\*/?'<>{}]\"\"\", s, re.I)# tách từ\n for i in s:\n for j in range(len(arr_old_words)):\n if i == arr_old_words[j]:\n i = arr_new_words[j]\n result += i + ' '\n\n return result\n\nif __name__ == '__main__':\n\n s = input('Nhập câu chuỗi s: ')\n print(format_word(s))","sub_path":"KhoaLuan/TuyenSinh/selenium_tuyen_sinh/standardized_data.py","file_name":"standardized_data.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"609601096","text":"import re\r\nimport re\r\nimport json\r\nimport gzip\r\nimport os\r\nimport glob\r\n# from pyumls import api\r\nimport utils.file_util as file_util\r\nimport utils.text_util as text_util\r\nimport time\r\nfrom scipy import sparse\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom scipy import sparse\r\n# from pattern3.text.en import singularize\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport utils.text_util\r\nimport math\r\nstop_words=list(set(stopwords.words('english')))\r\nstop_words=[x for x in stop_words]\r\nstop_words.append('a')\r\nstop_words.append('an')\r\nstop_words.append('the')\r\nstop_words.append('"')#\"\r\nfrom scipy.sparse import coo_matrix\r\nfrom scipy.sparse import csc_matrix\r\n# print(stop_words)\r\nwnl = WordNetLemmatizer()\r\napi_key='1bd1f2c2-fbab-4d54-aed9-fb442791d49d'\r\nimport pandas as pd\r\nimport numpy as np\r\ndef convert2csc1(info_data):\r\n src_ids=[]\r\n dst_ids=[]\r\n values=[]\r\n for row_num, row_data in enumerate(info_data):\r\n for column_num, column_data in enumerate(row_data):\r\n if column_data != 0:\r\n src_ids.append(row_num)\r\n dst_ids.append(column_num)\r\n values.append(column_data)\r\n print(row_num,'/',len(info_data))\r\n info_matrix = csc_matrix((values, (src_ids, dst_ids)), shape=info_data.shape) # .toarray()# shape=(len(sources),len(dests))\r\n return info_matrix.nonzero()\r\n\r\n\r\ndef update_csc_matrix(count,src_ids,dst_ids,values,new_src,new_dst,new_val):\r\n src_ids.extend(new_src)\r\n dst_ids.extend(new_dst)\r\n values.extend(new_val)\r\n print(count)\r\ndef convert2csc2(info_data,pattern=\"sim\"):\r\n\r\n src_ids = []\r\n dst_ids = []\r\n values = []\r\n # [src_ids.extend(len([y for y in enumerate(x.flat) if y != 0]) * [i]) for i, x in enumerate(np.array(info_data))]\r\n # [dst_ids.extend([j for j, y in enumerate(x.flat) if y != 0]) for i, x in enumerate(np.array(info_data))]\r\n # [values.extend([y for y in x.flat if y != 0]) for i, x in enumerate(np.array(info_data))]\r\n # file_util.dump(src_ids,\"src_ids.pck\")\r\n # file_util.dump(dst_ids, \"dst_ids.pck\")\r\n # file_util.dump(values, \"values.pck\")\r\n # print(np.array(src_ids).shape(),np.array(dst_ids.shape()), np.array(values.shape()))\r\n # info_matrix = csc_matrix((values, (src_ids, dst_ids)),\r\n # shape=(len(src_ids),len(dst_ids))) # .toarray()# shape=(len(sources),len(dests))\r\n # return info_matrix.nonzero()\r\n [update_csc_matrix(i,src_ids,dst_ids,values,len([y for y in x.flat ]) * [i],[j for j,y in enumerate(x.flat) ],[y for y in x.flat ]) for i, x in enumerate(np.array(info_data))]\r\n # return sparse.csr_matrix(info_data)\r\n spmat= coo_matrix((values, (src_ids, dst_ids)), shape=(len(info_data),len(info_data[0])))\r\n return spmat,src_ids,dst_ids,values\r\ndef convert2csc3(info_data,pattern=\"sim\"):\r\n return sparse.csr_matrix(info_data)\r\ndef convert_name_mention_UMLS_concepts(name_mention_list,start_index):\r\n name_mention_dict={}\r\n not_found_mentions=[]\r\n start_date=time.time()\r\n i=start_index\r\n while(i900:\r\n day_ticket, auth_client = api.getUTS1dayTicket(api_key)\r\n umls_results = api.search_v1(name_mention, auth_client,day_ticket, version='2019AB', max_pages=5)\r\n for result in umls_results:\r\n current_cui = result['ui']\r\n if result.get('ui',None):\r\n if name_mention_dict.get(current_cui,None):\r\n name_mention_dict[current_cui]['name_mentions'].append(name_mention)\r\n else:\r\n name_mention_dict[current_cui]={'name':result['name'],'name_mentions':[name_mention]}\r\n else:\r\n not_found_mentions.append(name_mention)\r\n file_util.dump(not_found_mentions,\"/home/s4616573/pubmed/pubmed_distill/not_found_mention\"+str(start_index)+\".pck\")\r\n file_util.dump(not_found_mentions, \"/home/s4616573/pubmed/pubmed_distill/not_found_mention\"+str(start_index)+\".json\")\r\n file_util.dump(name_mention_dict,\"/home/s4616573/pubmed/pubmed_distill/name_mention_cui\"+str(start_index)+\".pck\")\r\n file_util.dump_json(name_mention_dict,\"/home/s4616573/pubmed/pubmed_distill/name_mention_cui\"+str(start_index)+\".json\")\r\n i+=1\r\ndef get_all_entities_in_ner_format(input_file):\r\n \"\"\"Read a BIO data!\"\"\"\r\n # stop_words=open(\"/home/s4616573/code/bert/storage/stop_words.txt\",'r').readlines()\r\n # stop_words=[re.sub('\\n','',x) for x in stop_words]\r\n stop_words=['so', 'A', 'those', '"', 'The', 're', 'your', 'after', 'such', 'these', 'not', 'An', 'this', 'Some', 'above', 'all', 'Her', 'any', 'an', 'more', 'THE', 'very', 'ALL', 'Our', 'AS', 'his', 'OTHER', 'All', 'too', 'Very', 'This', 'both', 'the', 'from', 'most', 'only', 'Each', 'Few', 'being', 'their', 'other', 'out', 'DO', 'That', 'His', 'that', 'her', 'Other', 'These', 'Most', 'Further', 'our', 'a', 'He', 'HER', 'few', 'some']\r\n\r\n # print(stop_words)\r\n removed_words=[]\r\n rf = open(input_file, 'r')\r\n lines = [];\r\n words = [];\r\n labels = []\r\n\r\n entity_words=[]\r\n entity_list=[]\r\n for line in rf:\r\n word = line.strip().split(' ')[0]\r\n label = line.strip().split(' ')[-1]\r\n # if word=='surgeries':\r\n # print(\"debug\")\r\n # here we dont do \"DOCSTART\" check\r\n if (len(line.strip()) == 0 and words[-1] == '.') or ( label=='O' ):\r\n l = ' '.join([label for label in labels if len(label) > 0])\r\n w = ' '.join([word for word in words if len(word) > 0])\r\n lines.append((l, w))\r\n if len( entity_words):\r\n if entity_words[-1].endswith('s'):\r\n entity_words[-1]=wnl.lemmatize(entity_words[-1])\r\n if entity_words[0].lower() in stop_words or entity_words[0].isnumeric():\r\n current_entity=\" \".join(entity_words[1:])\r\n removed_words.append(entity_words[0])\r\n elif entity_words[-1].lower() in stop_words:\r\n current_entity = \" \".join(entity_words[0:len(entity_words)-1])\r\n removed_words.append(entity_words[-1])\r\n else:\r\n current_entity = ' '.join([word for word in entity_words if len(word) > 0])\r\n # current_entity=re.sub('\\s{2,}',' ',current_entity)\r\n current_entity=current_entity.strip()\r\n if current_entity!='':\r\n current_entity=re.sub('_','',current_entity)\r\n current_entity=re.sub(''',\"'\",current_entity)\r\n entity_list.append(current_entity)\r\n words = []\r\n labels = []\r\n entity_words = []\r\n\r\n words.append(word)\r\n labels.append(label)\r\n if (label.startswith('B') or label.startswith('I')):\r\n entity_words.append(word)\r\n # print(list(set(removed_words)))\r\n rf.close()\r\n return list(set(entity_list))\r\n # return entity_list\r\n\r\n\r\ndef convert_data_ner_format(file_path,data_path=\"/home/s4616573/data/i2b2/\",pattern_file=\"i2b2\",is_i2b2=True):\r\n lines=open(file_path).readlines()\r\n # data_path=\"/home/s4616573/code/tf_ner/data/example/\"\r\n # data_path=\"/home/s4616573/data/i2b2/\"\r\n\r\n \"\"\"Read a BIO data!\"\"\"\r\n rf = open(file_path, 'r')\r\n lines = [];\r\n words = [];\r\n labels = []\r\n text_lines = []\r\n label_lines = []\r\n\r\n for line in rf:\r\n word = line.strip().split(' ')[0]\r\n label = line.strip().split(' ')[-1]\r\n # here we dont do \"DOCSTART\" check\r\n if is_i2b2:\r\n if len(line.strip()) == 0 and words[-1] == '.':\r\n l = ' '.join([label for label in labels if len(label) > 0])\r\n w = ' '.join([word for word in words if len(word) > 0])+'\\n'\r\n lines.append((l, w))\r\n text_lines.append(w)\r\n words = []\r\n labels = []\r\n else:\r\n if len(line.strip()) == 0:\r\n l = ' '.join([label for label in labels if len(label) > 0])\r\n w = ' '.join([word for word in words if len(word) > 0])+'\\n'\r\n lines.append((l, w))\r\n text_lines.append(w)\r\n words = []\r\n labels = []\r\n words.append(word)\r\n labels.append(label)\r\n rf.close()\r\n # return text_lines, label_lines\r\n if \"train\" in file_path:\r\n new_tag_file=open(data_path+pattern_file+\"_train.tags.txt\",\"w+\")\r\n new_word_file = open(data_path +pattern_file+ \"_train.words.txt\", \"w+\")\r\n else:\r\n new_tag_file = open(data_path +pattern_file+ \"_test.tags.txt\", \"w+\")\r\n new_word_file = open(data_path + pattern_file+\"_test.words.txt\", \"w+\")\r\n new_word_file.writelines(text_lines)\r\n new_tag_file.writelines(label_lines)\r\n\r\n new_word_file.close()\r\n new_tag_file.close()\r\n # lines=open(\"/home/s4616573/code/tf_ner/data/example/i2b2_train.words.txt\",\"r\").readlines()\r\n lines = open(data_path+pattern_file+\"_train.words.txt\", \"r\").readlines()\r\n print(\"line count\",len(lines))\r\n\r\n\r\n\r\ndef merge_CUI_pck(CUI_dir,file_pattern):\r\n CUI_list=[]\r\n CUI_distill_dir=\"/home/s4616573/pubmed/pubmed_distill/\"\r\n if not os.path.exists(CUI_distill_dir):\r\n os.mkdir(CUI_distill_dir)\r\n for file_name in glob.iglob(CUI_dir + '**/*_'+file_pattern):\r\n print (file_name)\r\n CUI_arr=file_util.load(file_name)\r\n CUI_list.extend(list(CUI_arr))\r\n print(\"LEN CUIs\",len(CUI_list))\r\n CUI_list=list(set(CUI_list))\r\n CUI_list=[re.sub('\\'','',x) for x in CUI_list]\r\n file_util.dump(CUI_list,CUI_distill_dir+\"all_CUIs.pck\")\r\n return CUI_list\r\n\r\ndef update_CUI_all_infos(CUI_file,api_key,begin_idx,file_pattern):\r\n CUI_distill_dir = \"/home/s4616573/pubmed/pubmed_distill/\"\r\n CUI_list=file_util.load(CUI_file)\r\n CUI_dict={}\r\n semantic_type_dict={}\r\n concept_dict={}\r\n len_CUI_list=len(CUI_list)\r\n start_date=time.time()\r\n day_ticket,auth_client=api.getUTS1dayTicket(api_key)\r\n i=begin_idx\r\n while (i< len(CUI_list)):\r\n print(\"end_index\",i)\r\n CUI=CUI_list[i]\r\n end_date=time.time()\r\n if (end_date-start_date)>3600*2:\r\n day_ticket, auth_client = api.getUTS1dayTicket(api_key)\r\n print(\"CUI:\",i,\"/\",len_CUI_list,\":\",CUI)\r\n cui_info = api.getByCUI(CUI, day_ticket,auth_client)\r\n process_start_time=time.time()\r\n if cui_info:\r\n CUI_dict[CUI]={\"name\":cui_info[\"name\"],\"sem_type_uis\":[],\"definitions\":[]}\r\n sem_types=cui_info.get(\"semanticTypes\",[])\r\n concept_dict[cui_info[\"name\"]]=CUI\r\n cui_def_link=cui_info.get(\"definitions\",\"NONE\")\r\n if cui_def_link!=\"NONE\":\r\n cui_definitions=api.getDefinitionByCUI(cui_def_link, day_ticket,auth_client)\r\n for cui_def in cui_definitions:\r\n CUI_dict[CUI][\"definitions\"].append(cui_def[\"value\"])\r\n # for x in sem_types:\r\n # sem_type_info=api.getSemanticTypeByCUI(x['uri'],day_ticket,auth_client)\r\n # sem_type_name=sem_type_info[\"name\"]\r\n # sem_type_def=sem_type_info['definition']\r\n # sem_type_ui=sem_type_info['ui']#T047 ui T047\r\n # sem_type_abbr=sem_type_info['abbreviation']\r\n # sem_type_group=sem_type_info['semanticTypeGroup']['expandedForm']\r\n # semantic_type_dict[sem_type_ui]={\"definition\":sem_type_def,\"abbreviation\":sem_type_abbr,\"group\":sem_type_group}\r\n # CUI_dict[CUI][\"sem_type_uis\"].append(sem_type_ui)\r\n # concept_dict[sem_type_name]=sem_type_ui\r\n # concept_dict[sem_type_abbr] = sem_type_ui\r\n # file_util.dump(concept_dict, CUI_distill_dir + \"all_concept_dict\"+file_pattern+\".pck\")\r\n # file_util.dump(semantic_type_dict, CUI_distill_dir + \"semantic_type_dict\"+file_pattern+\".pck\")\r\n # file_util.dump(CUI_dict, CUI_distill_dir + \"CUI_dict\"+file_pattern+\".pck\")\r\n file_util.dump_json(concept_dict, CUI_distill_dir + \"all_concept_dict\"+file_pattern+\".json\")\r\n # file_util.dump_json(semantic_type_dict, CUI_distill_dir + \"semantic_type_dict\"+file_pattern+\".json\")\r\n file_util.dump_json(CUI_dict, CUI_distill_dir + \"CUI_dict\"+file_pattern+\".json\")\r\n process_end_time = time.time()\r\n print(\"Process time\",process_end_time-process_start_time)\r\n else:\r\n print(\"ERROR LINK\",\"end_index\")\r\n i+=1\r\n # file_util.dump(concept_dict,CUI_distill_dir+\"all_concept_dict.pck\")\r\n # file_util.dump(semantic_type_dict, CUI_distill_dir+\"semantic_type_dict.pck\")\r\n # file_util.dump(CUI_dict, CUI_distill_dir + \"CUI_dict.pck\")\r\n # file_util.dump_json(concept_dict, CUI_distill_dir + \"all_concept_dict.json\")\r\n # file_util.dump_json(semantic_type_dict, CUI_distill_dir + \"semantic_type_dict.json\")\r\n # file_util.dump_json(CUI_dict, CUI_distill_dir + \"CUI_dict.json\")\r\n\r\n# merge_CUI_pck(\"/home/s4616573/pubmed/data_extract\",'CUI')\r\n# all_entities=get_all_entities_in_ner_format(\"/home/s4616573/data/i2b2/train.txt\")\r\n# ngram_mentions=text_util.generate_n_gram_from_name_mentions(all_entities,4)\r\n# convert_name_mention_UMLS_concepts(ngram_mentions,9189)\r\n# for entity in all_entities:\r\n# # if '_' in entity:\r\n# print(entity)\r\n# print(\"entities\",len(all_entities))\r\n# file_util.dump(all_entities,\"/home/s4616573/pubmed/pubmed_distill/i2b2_entities.pck\")\r\n# all_entities=[x+'\\n' for x in all_entities]\r\n# with open(\"/home/s4616573/pubmed/pubmed_distill/i2b2_entities.txt\",'w') as f:\r\n# f.writelines(all_entities)\r\n# n_gram_name_mentions = text_util.generate_n_gram_from_name_mentions(all_entities, 4)\r\n# print(\"ngram\",len(n_gram_name_mentions))\r\n# file_util.dump(n_gram_name_mentions,\"/home/s4616573/pubmed/pubmed_distill/i2b2_ngram_entities.pck\")\r\n# result=api.search(\"Tumor Mass\", '1bd1f2c2-fbab-4d54-aed9-fb442791d49d', version='2019AB', max_pages=5)\r\n# print(result)\r\n# print(wnl.lemmatize(\"patches\"))\r\n# update_CUI_all_infos(\"/home/s4616573/pubmed/pubmed_distill/all_CUIs.pck\",'1bd1f2c2-fbab-4d54-aed9-fb442791d49d',0,\"_onlyCUI\")\r\n# print(re.sub('\\'','',\"'C0441655'\"))\r\n# convert_data_ner_format(\"/home/s4616573/data/i2b2/train.txt\")\r\n# convert_data_ner_format(\"/home/s4616573/data/i2b2/test.txt\")\r\n# update_CUI_all_infos(\"/home/s4616573/pubmed/pubmed_distill/all_CUIs.pck\",'1bd1f2c2-fbab-4d54-aed9-fb442791d49d',600000,\"_part_final\")\r\n\r\ndef read_i2b2_file(input_file,is_i2b2):\r\n\r\n \"\"\"Read a BIO data!\"\"\"\r\n rf = open(input_file, 'r')\r\n lines = [];\r\n words = [];\r\n labels = []\r\n text_lines =[]\r\n label_lines = []\r\n\r\n for line in rf:\r\n word = line.strip().split(' ')[0]\r\n label = line.strip().split(' ')[-1]\r\n # here we dont do \"DOCSTART\" check\r\n if is_i2b2:\r\n if len(line.strip()) == 0 :#and words[-1] == '.'\r\n l = ' '.join([label for label in labels if len(label) > 0])\r\n w = ' '.join([word for word in words if len(word) > 0])\r\n lines.append((l, w))\r\n text_lines.append(w)\r\n words = []\r\n labels = []\r\n else:\r\n if len(line.strip()) == 0:\r\n l = ' '.join([label for label in labels if len(label) > 0])\r\n w = ' '.join([word for word in words if len(word) > 0])\r\n lines.append((l, w))\r\n text_lines.append(w)\r\n words = []\r\n labels = []\r\n words.append(word)\r\n labels.append(label)\r\n rf.close()\r\n return text_lines,label_lines\r\n\r\n# convert_data_ner_format(\"/home/s4616573/data/CLEF/train.txt\",\"/home/s4616573/data/CLEF/\",\"CLEF\",False)\r\ndef read_xlsx_cause_effect_alps3(file_name):\r\n xl_file = pd.ExcelFile(file_name)\r\n\r\n # dfs = {sheet_name: xl_file.parse(sheet_name)\r\n # for sheet_name in xl_file.sheet_names}\r\n dfs=xl_file.parse(\"Sheet1\")\r\n\r\n causes=[[re.sub('\\s+','',y) for y in x.split('\\n')if len(y.strip())>0] if type(x)==str else \"\" for x in dfs[\"Root cause(Extract Target)\"]]\r\n effects=[[re.sub('\\s+','',y) for y in x.split('\\n')if len(y.strip())>0] if type(x)==str else \"\" for x in dfs[\"Effects(Extract Target)\"] ]\r\n texts=[re.sub('\\s+','',x) for x in dfs[\"Description(Target Sentence)\"]]\r\n causes1=dfs[\"Cause (new)\"]\r\n\r\n effects1=dfs[\"Effect (new)\"]\r\n print(len(texts),len(causes),len(effects),len(effects1),len(causes1))\r\n for i in range(227):\r\n if type(causes1[i]) ==str:\r\n sentence_causes=[re.sub('\\s+','',x) for x in causes1[i].split('\\n')]\r\n causes[i]=sentence_causes\r\n for x in sentence_causes:\r\n if x not in texts[i]:\r\n print(i, \"ERROR\", \"cause:\", x, \"text:\", texts[i])\r\n if type(effects1[i]) == str:\r\n sentence_effects = [re.sub('\\s+','',x) for x in effects1[i].split('\\n')]\r\n effects[i] = sentence_effects\r\n for x in sentence_effects:\r\n if x not in texts[i]:\r\n print(i,\"ERROR\",\"effect:\",x,\"text:\",texts[i])\r\n\r\n alps3_data={\"texts\":texts,\"causes\":causes,\"effects\":effects}\r\n file_util.dump(alps3_data,\"/home/s4616573/data/alps3/alps3.pck\")\r\n ner_train_file = open(\"/home/s4616573/data/alps3/train.txt\", \"w\")\r\n ner_test_file = open(\"/home/s4616573/data/alps3/test.txt\", \"w\")\r\n all_labels=[]\r\n for t,text in enumerate(texts):\r\n labels=['O']*len(text)\r\n for effect in effects[t]:\r\n if len(effect.strip())>0:\r\n p = re.compile(effect)\r\n for m in p.finditer(text):\r\n matched_end_index=m.end()\r\n matched_start_index=m.start()\r\n print(len(text),m.start(), m.group(),m.end(),effect,':',text)\r\n print(text[m.start():m.end()])\r\n labels[matched_start_index:matched_end_index]=[\"I-effect\"]*(matched_end_index-matched_start_index)\r\n labels[matched_start_index] = \"B-effect\"\r\n if '。' in text[matched_start_index:matched_end_index]:\r\n print(labels[matched_start_index:matched_end_index])\r\n for cause in causes[t]:\r\n if len(cause.strip())>0:\r\n p = re.compile(cause)\r\n for m in p.finditer(text):\r\n matched_end_index = m.end()\r\n matched_start_index = m.start()\r\n print(len(text),':',m.start(),':', m.group(),':',m.end(),':',cause,':',text)\r\n\r\n\r\n labels[matched_start_index:matched_end_index] = [\"I-cause\"] * (matched_end_index-matched_start_index)\r\n labels[matched_start_index] = \"B-cause\"\r\n if '。' in text[matched_start_index:matched_end_index]:\r\n print(labels[matched_start_index:matched_end_index])\r\n all_labels.append(labels)\r\n from sklearn.model_selection import train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(texts, all_labels, test_size=0.2)\r\n for i, x in enumerate(X_train):\r\n for j,char in enumerate(x):\r\n if '。' == char and y_train[i][j] != 'O':\r\n print()\r\n ner_train_file.write(char + ' ' + y_train[i][j] + '\\n')\r\n ner_train_file.write('\\n')\r\n for i, x in enumerate(X_test):\r\n for j, char in enumerate(x):\r\n if '。' == char and y_test[i][j]!='O':\r\n print()\r\n ner_test_file.write(char + ' ' + y_test[i][j] + '\\n')\r\n ner_test_file.write('\\n')\r\n ner_test_file.close()\r\n ner_train_file.close()\r\n# read_xlsx_cause_effect_alps3(\"/home/s4616573/data/alps3/alps_3.xlsx\")\r\n\r\ndef find_max_sequence(seq,is_i2b2=True):\r\n # seq=[5, 3, 4, 4, 3, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n non_label_idx=6 if is_i2b2 else 2\r\n start_idx=[i for i,x in enumerate(seq) if istart_id and x==in_labels[j]:\r\n end_idx[j]=i\r\n elif i>start_id and x!=in_labels[j]:\r\n break\r\n max_chunk_len = [end_idx[i]-x for i, x in enumerate(start_idx)]\r\n if max_chunk_len:\r\n max_chunk_start_id=np.argmax(max_chunk_len)\r\n else:\r\n start_idx=[0]\r\n end_idx=[len(seq)]\r\n max_chunk_start_id=0\r\n one_word_idx=[i for i,x in enumerate(seq) if i0:\r\n start_idx.extend(one_word_idx)\r\n end_idx.extend([x+1 for x in start_idx])\r\n # print(end_idx)\r\n # print(max_chunk_start_id)\r\n\r\n return start_idx, end_idx,max_chunk_start_id\r\ndef get_vocab_tags(tag_file):\r\n vocab_tags = open(tag_file).readlines()\r\n vocab_tags = [re.sub('\\n', '', x) for x in vocab_tags]\r\n vocab_tags = [x for x in vocab_tags if x != 'O']\r\n return vocab_tags\r\ndef find_chunk_score(tags,prob_seq,logit_sequence,id_to_tag_vocab):\r\n '''\r\n\r\n Parameters\r\n ----------\r\n seq\r\n labels\r\n\r\n Returns score_dict of each tag. For ex: {\"ORG\":0.8,\"PER\":0.2}\r\n -------\r\n\r\n '''\r\n tags_wth_prefix=[x.split('-')[-1] for x in tags]\r\n tags_wth_prefix = list(set([x for x in tags_wth_prefix if x!='O']))\r\n tag_score_dict = {x:[] for x in tags_wth_prefix}\r\n tag_sequence=[id_to_tag_vocab[x] for x in logit_sequence]\r\n # seq=[5, 3, 4, 4, 3, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n non_label_idx=len(list(id_to_tag_vocab.keys()))\r\n start_idx=[i for i,x in enumerate(tag_sequence) if istart_id and x_tag==begin_tag and x_pre!=begin_pre:\r\n end_idx[j]=i\r\n chunk_dict[start_id][\"end_id\"]=i\r\n chunk_dict[start_id][\"label\"]=tag_sequence[i].split('-')[-1]\r\n chunk_dict[start_id][\"score\"]=np.sum([math.log1p(x) if x >0 else 0 for x in prob_seq[start_id:i+1]])/len(prob_seq[start_id:i+1])\r\n chunk_dict[start_id][\"len\"] +=1\r\n elif chunk_dict[start_id][\"end_id\"]!=-1:\r\n break\r\n max_chunk_len = [end_idx[i]-x for i, x in enumerate(start_idx)]\r\n if max_chunk_len:\r\n max_chunk_start_id=np.argmax(max_chunk_len)\r\n else:\r\n start_idx=[0]\r\n end_idx=[len(tag_sequence)]\r\n max_chunk_start_id=0\r\n one_word_idx = []\r\n # one_word_idx=[i for i,x in enumerate(tag_sequence) if (i0 else None\r\n next_node=tag_sequence[i+1] if i0:\r\n for one_word_id in one_word_idx:\r\n chunk_dict[one_word_id]={\"end_id\":-1,\"label\":tag_sequence[one_word_id].split('-')[-1],\"score\":math.log1p(prob_seq[one_word_id]) if prob_seq[one_word_id]>0 else 0,\"len\":1}\r\n start_idx.extend(one_word_idx)\r\n end_idx.extend([x+1 for x in start_idx])\r\n # print(end_idx)\r\n # print(max_chunk_start_id)\r\n for start_id,item in chunk_dict.items():\r\n tag_score_dict[item[\"label\"]].append(item[\"score\"]/item[\"len\"])\r\n for tag,item in tag_score_dict.items():\r\n tag_score_dict[tag]=np.amax(item) if len(item)>0 else 0\r\n\r\n source_ids=[]\r\n destination_ids=[]\r\n\r\n return tag_score_dict\r\n# prob_seq = [0.9999993, 0.9999974, 0.9999988, 0.9999958, 0.99999547, 0.99999857, 0.99999857, 0.99994564, 0.9999975, 0.99999726, 0.9999989, 0.99999905, 0.9999969, 0.9999987, 0.99999845, 0.99999547, 0.9999987, 0.9999989, 0.9999962, 0.99999917, 0.9999994, 0.99999905, 0.9999995, 0.99999833, 0.99999905, 0.99999905, 0.9999993, 0.99999917, 0.9999995, 0.9999957, 0.9999964, 0.9999987, 0.9999989, 0.99999595, 0.9999902, 0.99999917, 0.99998677, 0.99999917, 0.9999987, 0.99999654, 0.9999982, 0.99999726, 0.9999981, 0.99999714, 0.9999988, 0.9999949, 0.99999344, 0.9999982, 0.9999987, 0.9999908, 0.99998915, 0.99999774, 0.9999988, 0.99999535, 0.999987, 0.99999785, 0.99999845, 0.9999856, 0.99998987, 0.9999976, 0.9999987, 0.9999901, 0.9999994, 0.9999443]\r\n# logit_seq=[0, 1,4, 4, 4, 3, 4, 2, 5, 5, 5, 5, 3, 4, 3, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 6]\r\n# start_chunk_idx, end_chunk_idx,max_pos=find_max_sequence(label_seq,True)\r\n# start_seq=start_chunk_idx[max_pos]\r\n# end_seq=end_chunk_idx[max_pos]\r\n# prob_max_seq=prob_seq[start_seq:end_seq]\r\n# bald_seq=np.average(prob_max_seq)\r\n# tag_vocab={'B-problem':0,'B-test':1,'B-treatment':2,'I-problem':3,'I-test':4,'I-treatment':5,'O':6}\r\n# id_to_tag={0:'B-problem',1:'B-test',2:'B-treatment',3:'I-problem',4:'I-test',5:'I-treatment',6:'O'}\r\n# chunk_dict=find_chunk_score(prob_seq,logit_seq,id_to_tag)\r\n# print(chunk_dict)id_to_tag={0:'B-problem',1:'B-test',2:'B-treatment',3:'I-problem',4:'I-test',5:'I-treatment',6:'O'}\r\n# chunk_dict=find_chunk_score(prob_seq,logit_seq,id_to_tag)\r\n# print(chunk_dict)\r\n# a=[1,4,5,6,7]\r\n# idx=np.array([0,1])\r\n# b=np.array(a,~idx)\r\n# print(b)","sub_path":"utils/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":27772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"83758505","text":"from flask import request, render_template, make_response, jsonify\nfrom google.appengine.api import channel\nfrom werkzeug.contrib.cache import GAEMemcachedCache\nfrom server.models import Group, Participant, Chat\nfrom server.environments import Config\nimport json\n\n\nclass ChatController:\n\n def __init__(self):\n pass\n\n @classmethod\n def index(cls, group_id):\n # confirm participant id in cookie\n c_group_id = request.cookies.get(\"group_id\")\n participant_id = request.cookies.get(\"participant_id\")\n\n # get group\n group = Group.get_by_id(group_id)\n\n # if participant is none or login to another group, create new participant in group\n if not participant_id or c_group_id != str(group_id):\n # create new participant todo:consider the case that group is None\n participant = Participant()\n participant.group_key = group.key\n participant_id = participant.put().id() # save it to datastore\n\n # create channel\n participant_id_str = str(participant_id)\n cache = GAEMemcachedCache()\n token = cache.get(participant_id_str)\n if token is None:\n token = channel.create_channel(participant_id_str)\n # expiration of channel api token is 2 hour\n # https://developers.google.com/appengine/docs/python/channel/?hl=ja#Python_Tokens_and_security\n cache.set(participant_id_str, token, 3600 * 2)\n\n # return response\n resp = make_response(render_template('chat.html', token=token, group_name=group.name))\n\n # set participant_id to cookie\n resp.set_cookie(\"group_id\", str(group_id), expires=Config.calculate_expiration())\n resp.set_cookie(\"participant_id\", participant_id_str, expires=Config.calculate_expiration())\n\n return resp\n\n @classmethod\n def find(cls, group_id):\n group = Group.get_by_id(group_id)\n chats = Chat.query(Chat.group_key == group.key).order(-Chat.created_at).fetch(100)\n return jsonify(chats=list(map(lambda c: c.to_dict(), chats)))\n\n @classmethod\n def create(cls, group_id, msg_type):\n group = Group.get_by_id(group_id)\n participant_key = None\n participant_id = request.cookies.get(\"participant_id\")\n\n if participant_id:\n participant_key = Participant.get_by_id(long(participant_id)).key\n\n message = request.form.get(\"message\", u\"\", type=unicode)\n reference_id = request.form.get(\"reference\", u\"\", type=unicode)\n\n chat = Chat(\n group_key=group.key,\n participant_key=participant_key,\n type=msg_type,\n message=message\n )\n\n #set reference if exist\n if reference_id:\n reference = Chat.get_by_id(long(reference_id))\n if reference is not None:\n chat.reference = reference.key.id()\n\n chat.put()\n\n # send same group members (include myself)\n cls.__broadcast(group_id, chat)\n\n # message is send by channel, so you don't need return\n return \"\"\n\n @classmethod\n def update(cls, group_id, update_func):\n chat_id = request.form.get(\"id\", u\"\", type=unicode)\n if chat_id:\n chat = Chat.get_by_id(long(chat_id))\n if chat is not None:\n update_func(chat)\n chat.put()\n cls.__broadcast(group_id, chat)\n\n return \"\"\n\n @classmethod\n def __broadcast(cls, group_id, chat):\n group = Group.get_by_id(group_id)\n participants_in_group = Participant.query(Participant.group_key == group.key)\n send = lambda p: channel.send_message(str(p.key.id()), json.dumps(chat.to_dict()))\n map(send, participants_in_group)\n\n @classmethod\n def find_stamps(cls, group_id):\n path = \"/server/assets/img/stamps/\"\n stamps = []\n for num in range(1, 8):\n stamps.append(path + \"stamp{num}.PNG\".format(num=str(num).zfill(2)))\n\n return jsonify(stamps=stamps)\n\n \"\"\"\n @classmethod\n def close_channel(cls, group_id):\n participant_id = request.cookies.get(\"participant_id\")\n token = None\n if participant_id:\n cache = GAEMemcachedCache()\n cache.delete(participant_id)\n token = channel.create_channel(participant_id)\n cache.set(participant_id, token)\n\n return token\n \"\"\"","sub_path":"src/server/controllers/chat_controller.py","file_name":"chat_controller.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"189961376","text":"import socket\nimport sys\n\n\nif __name__ == '__main__':\n\t\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tserver_address = ('localhost',12345)\n\tsock.bind(server_address)\n\tbytesAddressPair = sock.recvfrom(1024)\n\tmessage = bytesAddressPair[0]\n\taddress = bytesAddressPair[1]\n\tprint(\"from %s: %s\"%(address,message))\n\tsock.close()\n","sub_path":"udp/simple/serverSimple.py","file_name":"serverSimple.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"489142353","text":"# -*- coding: utf-8 -*-\n'''\n常量\n'''\nfrom django.utils.translation import ugettext_lazy as _\n\n# ------垃圾邮件投递目录 --------\nSPAMSET_DELIVER_FOLDER = (\n (u'Sequester', _(u'隔离队列')),\n (u'Spam', _(u'垃圾箱')),\n (u'INBOX', _(u'收件箱')),\n)\n\n# ------垃圾邮件参数默认值 --------\nSPAMSET_PARAM_DEFAULT = {\n \"save_days\":\"15\",\n \"is_report\":\"-1\",\n \"spam_report\":\"spamreporter\",\n \"host\":\"\",\n \"spam_folder\":\"Spam\",\n \"spam_flag\":\"[***SPAM***]\",\n\n \"greylist\" : \"-1\",\n \"spf\" : \"-1\",\n \"format\" : \"-1\",\n \"sender_blacklist\" : \"-1\",\n \"subject_blacklist\" : \"-1\",\n \"content_blacklist\" : \"-1\",\n \"attach_blacklist\" : \"-1\",\n \"low_risk_attachment\" : \"-1\",\n \"high_risk_attachment\" : \"-1\",\n \"dspam\" : \"-1\",\n \"ctasd\" : \"-1\",\n \"spamassassin\" : \"-1\",\n}\n\n# ------垃圾邮件参数列表 --------\nSPAMSET_PARAM_NAME = (\n (u'save_days', _(u'隔离邮件保存天数')),\n (u'is_report', _(u'是否发送隔离报告')),\n (u'spam_report', _(u'隔离报告发件人')),\n (u'host', _(u'隔离报告链接地址')),\n (u'spam_folder', _(u'垃圾邮件投递位置')),\n (u'spam_flag', _(u'垃圾邮件主题标识')),\n\n (u'greylist', _(u'灰名单检测')),\n (u'spf', _(u'SPF检测')),\n (u'format', _(u'发信人格式')),\n (u'sender_blacklist', _(u'发件人黑名单')),\n (u'subject_blacklist', _(u'主题黑名单')),\n (u'content_blacklist', _(u'内容黑名单')),\n (u'attach_blacklist', _(u'附件黑名单')),\n (u'low_risk_attachment', _(u'小危附件')),\n (u'high_risk_attachment', _(u'高危附件')),\n (u'dspam', u'Dspam'),\n (u'ctasd', u'Cyber'),\n (u'spamassassin', u'Spamassassion'),\n)\n\n# ------发信频率参数 --------\nFREQUENCYSET_PARAM_DEFAULT = (\n (u'minute', u'5'),\n (u'count', u'200'),\n (u'operate', u'block'),\n (u'hour_count', u'0'),\n (u'day_count', u'0'),\n (u'alert_address', u''),\n)\n\n# ------发信操作选项 --------\nFREQUENCYSET_PARAM_OPERATOR = (\n (u'block', _(u'只可发送本地邮件')),\n #(u'disable', u'永久禁用外发'), 修改的是core_mailbox.limit_send,这个设定目前与组权限冲突!\n)\n\n# ------发信操作选项 --------\nFREQUENCYSET_PARAM_OPERATOR = (\n (u'block', _(u'只可发送本地邮件')),\n #(u'disable', u'永久禁用外发'), 修改的是core_mailbox.limit_send,这个设定目前与组权限冲突!\n)","sub_path":"linuxOperation/app/security/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"516173039","text":"import sys\nsys.path.append(\".\")\nimport pickle\nimport glob\nimport argparse\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport seaborn as sns\nfrom collections import defaultdict\nimport os\nfrom utils.utils import *\nimport pdb\n\ndef read_flags():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--results_dir\", type=str, required=False,\n default=\"./results\")\n return parser.parse_args()\n\ndef get_alg_name(exp_args):\n if exp_args[\"algs\"] == \"nn\":\n name = exp_args[\"nn_type\"]\n if exp_args[\"sampling_priority_alpha\"] > 0:\n name += \"-\" + str(exp_args[\"sampling_priority_alpha\"])\n return name\n else:\n return exp_args[\"algs\"]\n\ndef get_all_jerrs():\n all_dfs = []\n fns = os.listdir(args.results_dir)\n for fn in fns:\n cur_dir = args.results_dir + \"/\" + fn\n try:\n jerrs = load_object(cur_dir + \"/jerr.pkl\")\n except:\n print(\"skipping \", cur_dir)\n continue\n if jerrs is None:\n continue\n exp_args = load_object(cur_dir + \"/args.pkl\")\n exp_args = vars(exp_args)\n exp_args[\"alg\"] = get_alg_name(exp_args)\n\n all_dfs.append(jerrs)\n\n all_jerrs = pd.concat(all_dfs, ignore_index=True)\n print(all_jerrs.groupby([\"sql_key\", \"plan\"]).size())\n print(len(all_jerrs))\n pdb.set_trace()\n return summary_df\n\ndef main():\n jerrs = get_all_jerrs()\n # SUMMARY_TITLE_FMT = \"{ST}-{LT}-{SUMMARY}\"\n # pdf = PdfPages(\"results.pdf\")\n # for samples_type in set(summary_df[\"samples_type\"]):\n # st_df = summary_df[summary_df[\"samples_type\"] == samples_type]\n # for lt in set(st_df[\"loss_type\"]):\n # lt_df = st_df[st_df[\"loss_type\"] == lt]\n # for summary_type in PLOT_SUMMARY_TYPES:\n # plot_df = lt_df[lt_df[\"summary_type\"] == summary_type]\n # plot_df = plot_df[plot_df[\"template\"] == \"all\"]\n # print(set(plot_df[\"alg\"]))\n # title = SUMMARY_TITLE_FMT.format(ST = samples_type,\n # LT = lt,\n # SUMMARY = summary_type)\n # plot_summary(pdf, plot_df, title)\n\n # pdf.close()\n # pdb.set_trace()\n\nargs = read_flags()\nmain()\n","sub_path":"scripts/analyze_join_plans.py","file_name":"analyze_join_plans.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"553550254","text":"import pygame, os, sys, time, random\r\nfrom pygame.locals import *\r\n\r\n#lista de imagens\r\nb1feitico = ['Bruxa1feitico1.png','Bruxa1feitico2.png']\r\nb1walk = ['Bruxa1walk1.png','Bruxa1walk2.png']\r\nb1neutra = ['Bruxa1neutra.png']\r\nb1tiro = ['tiro1b1.png','tiro2b1.png']\r\npocao = ['pocao1.png','pocao2.png','pocao3.png','pocao4.png','pocao5.png']\r\nb2feitico = ['Bruxa2feitico1.png','Bruxa2feitico2.png']\r\nb2walk = ['Bruxa2walk1.png','Bruxa2walk2.png']\r\nb2neutra = ['Bruxa2neutra.png']\r\nb2tiro = ['tiro1b2.png','tiro2b2.png']\r\narmario = ['BruxaArmario1.png','BruxaArmario2.png','BruxaArmario3.png','BruxaArmario4.png','BruxaArmario5.png','BruxaArmario6.png','BruxaArmario7.png','BruxaArmario8.png','BruxaArmario9.png']\r\nneve = ['BruxaBonecoNeve1.png','BruxaBonecoNeve2.png','BruxaBonecoNeve3png']\r\nguarda = ['BruxaKnightAndando1.png','BruxaKnightAndando2.png','BruxaKnightAndando3.png','BruxaKnightAndando4.png','BruxaKnightAndando5.png','BruxaKnightAndando6.png']\r\n\r\npygame.init()\r\n\r\nlargura = 567\r\naltura = 1201\r\n\r\nblack = (0,0,0)\r\nwhite = (255,255,255)\r\nred = (255,0,0)\r\ngreen = (0,255,0)\r\nblue = (0,0,255)\r\n\r\nscreen = pygame.display.set_mode((956, 560))\r\n\r\npygame.display.set_caption('Jogo da Bruxa')\r\nclock = pygame.time.Clock()\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n\tdef __init__ (self,listaplay,posplay):\r\n\t\tpygame.sprite.Sprite.__init__(self)\r\n\t\tself.image = pygame.image.load(os.path.join(listaplay[posplay])).convert()\r\n\t\tself.image.set_colorkey(black)\r\n\t\tself.rect = self.image.get_rect()\r\n\t\tself.rect.x = largura/2\r\n\t\tself.rect.y = altura/2-200\r\n\r\nclass Inimigo(pygame.sprite.Sprite):\r\n\tdef __init__ (self,listaini,xinimigo,yinimigo):\r\n\t\tpygame.sprite.Sprite.__init__(self)\r\n\t\tself.image = pygame.image.load(os.path.join(listaini[0])).convert()\r\n\t\tself.image.set_colorkey(black)\r\n\t\tself.rect = self.image.get_rect()\r\n\t\tself.rect.x = xinimigo\r\n\t\tself.rect.y = yinimigo\r\n\t\tself.x_speed = 4\r\n\t\tself.gperna = 0\r\n\t\tself.nperna = 0\r\n\t\tself.aperna = 0\r\n\r\n\tdef zigzaginimigo(self, xini, h):\r\n\t\tself.rect.x += self.x_speed\r\n\t\tif self.rect.right > xini[h] + 100:\r\n\t\t\tself.x_speed = -5\r\n\t\tif self.rect.left < xini[h] - 100:\r\n\t\t\tself.x_speed = 5\r\n\r\n\tdef guarda_andando(self,posguarda1,posguarda2): #MUDAR A PERNA\r\n\t\tif self.gperna < 7:\r\n\t\t\tself.image = pygame.image.load(os.path.join(guarda[posguarda1])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.gperna >= 7:\r\n\t\t\tself.image = pygame.image.load(os.path.join(guarda[posguarda2])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.gperna > 14:\r\n\t\t\tself.gperna = 0\r\n\t\tself.gperna += 1\r\n\r\n\tdef neve_mexendo(self,posneve1,posneve2): #MUDAR A PERNA\r\n\t\tif self.nperna < 10:\r\n\t\t\tself.image = pygame.image.load(os.path.join(neve[posneve1])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.nperna >= 10:\r\n\t\t\tself.image = pygame.image.load(os.path.join(neve[posneve2])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.nperna > 20:\r\n\t\t\tself.nperna = 0\r\n\t\tself.nperna += 1\r\n\r\n\tdef armario_mexendo(self,posarmario1,posarmario2,posarmario3,posarmario4,posarmario5,posarmario6,posarmario7):\r\n\t\tif self.aperna < 10:\r\n\t\t\tself.image = pygame.image.load(os.path.join(armario[posarmario1])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.aperna >= 10 and self.aperna < 20:\r\n\t\t\tself.image = pygame.image.load(os.path.join(armario[posarmario2])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.aperna >= 20 and self.aperna < 30:\r\n\t\t\tself.image = pygame.image.load(os.path.join(armario[posarmario3])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.aperna >= 30 and self.aperna < 40:\r\n\t\t\tself.image = pygame.image.load(os.path.join(armario[posarmario4])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.aperna >= 40 and self.aperna < 50:\r\n\t\t\tself.image = pygame.image.load(os.path.join(armario[posarmario5])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.aperna >= 50 and self.aperna < 60:\r\n\t\t\tself.image = pygame.image.load(os.path.join(armario[posarmario6])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.aperna >= 60 and self.aperna < 70:\r\n\t\t\tself.image = pygame.image.load(os.path.join(armario[posarmario7])).convert()\r\n\t\t\tself.image.set_colorkey(black)\r\n\t\tif self.aperna >= 70:\r\n\t\t\tself.aperna = 0\r\n\t\tself.aperna += 1\r\n\r\ndef colisao(x2,y2,w2,h2):\r\n\tif x2+w2 >= largura/2 >= x2 and y2+h2 >= altura/2-200 >= y2:\r\n\t\treturn True\r\n\telif x2+w2 >= largura/2+player.rect.size[0] >= x2 and y2+h2 >= altura/2-200 >= y2:\r\n\t\treturn True\r\n\telif x2+w2 >= largura/2 >= x2 and y2+h2 >= altura/2-200+player.rect.size[1] >= y2:\r\n\t\treturn True\r\n\telif x2+w2 >= largura/2+player.rect.size[0] >= x2 and y2+h2 >= altura/2-200+player.rect.size[1] >= y2:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n\r\ndef colisaoini(x1,y1,w1,h1,x2,y2,w2,h2):\r\n\tif x2+w2 >= x1 >= x2 and y2+h2 >= y1 >= y2:\r\n\t\treturn True\r\n\telif x2+w2 >= x1+w1 >= x2 and y2+h2 >= y1 >= y2:\r\n\t\treturn True\r\n\telif x2+w2 >= x1 >= x2 and y2+h2 >= y1+h1 >= y2:\r\n\t\treturn True\r\n\telif x2+w2 >= x1+w1 >= x2 and y2+h2 >= y1+h1 >= y2:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n\r\ntamback = 1701\r\n\r\ndef background(x,y):\r\n\tfor k in range(-10,25):\r\n\t\tscreen.blit(backgroundload,(x+k*tamback,y))\r\n\r\ndef tijolo(xplat,yplat):\r\n\tscreen.blit(tijoloload,(xplat,yplat))\r\n\r\ndef inimigoo(listaini,h,xi,yi):\r\n\tscreen.blit(listaini[h].image,(xi,yi))\r\n\r\ndef playerl(xplay, yplay):\r\n\tscreen.blit(player.image,(xplay, yplay))\r\n\r\ndef mainmenu():\r\n\t# o que aparecera no menu\r\n\tlogo = pygame.image.load('logo.png')\r\n\tfonte = pygame.font.Font('freesansbold.ttf',30) #chamada da fonte\r\n\top1 = fonte.render(\"Novo Jogo\",1,(0,0,0)) \r\n\top2 = fonte.render(\"Ajustes\",1,(0,0,0))\r\n\top3 = fonte.render(\"Sair\",1,(0,0,0))\r\n\top1Selec = fonte.render(\"Novo Jogo\",1,(148,0,211))\r\n\top2Selec = fonte.render(\"Ajustes\",1,(148,0,211))\r\n\top3Selec = fonte.render(\"Sair\",1,(148,0,211))\r\n\r\n\ta = op1Selec\r\n\tb = op2\r\n\tc = op3\r\n\r\n\t#musica do menu\r\n\tpygame.mixer.init()\r\n\tpygame.mixer.music.load('menu.mp3')\r\n\tpygame.mixer.music.play()\r\n\r\n\ttela = 1\r\n\r\n\tsaída = False\r\n\twhile not saída:\r\n\t\tif(tela==1):\r\n\t\t\tscreen.fill([245,245,245])\r\n\t\t\tscreen.blit(logo,[70,-100])\r\n\t\t\tscreen.blit(a, [380,300])\r\n\t\t\tscreen.blit(b, [380,400])\r\n\t\t\tscreen.blit(c, [380,500])\r\n\t\telse:\r\n\t\t\tsettings()\r\n\t\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\t\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\t\tif event.key == K_ESCAPE:\r\n\t\t\t\t\tsys.exit()\r\n\t\t\t\telif b == op2Selec and event.key == K_DOWN:\r\n\t\t\t\t\ta = op1\r\n\t\t\t\t\tb = op2\r\n\t\t\t\t\tc = op3Selec\r\n\t\t\t\telif c == op3Selec and event.key == K_UP:\r\n\t\t\t\t\ta = op1\r\n\t\t\t\t\tb = op2Selec\r\n\t\t\t\t\tc = op3\r\n\t\t\t\telif c == op3Selec and event.key == K_DOWN:\r\n\t\t\t\t\ta = op1Selec\r\n\t\t\t\t\tb = op2\r\n\t\t\t\t\tc = op3\r\n\t\t\t\telif a == op1Selec and event.key == K_UP:\r\n\t\t\t\t\ta = op1\r\n\t\t\t\t\tb = op2\r\n\t\t\t\t\tc = op3Selec\r\n\t\t\t\telif b == op2Selec and event.key == K_UP:\r\n\t\t\t\t\ta = op1Selec\r\n\t\t\t\t\tb = op2\r\n\t\t\t\t\tc = op3\r\n\t\t\t\telif event.key == K_DOWN:\r\n\t\t\t\t\ta = op1\r\n\t\t\t\t\tb = op2Selec\r\n\t\t\t\t\tc = op3\r\n\r\n\t\t\t\tif a == op1Selec and event.key == K_RETURN:\r\n\t\t\t\t\tpygame.mixer.music.stop()\r\n\t\t\t\t\tg = game_loop()\r\n\t\t\t\t\tsaída = True\r\n\t\t\t\telif b == op2Selec and event.key == K_RETURN:\r\n\t\t\t\t\tsettings()\r\n\t\t\t\t\ttela = 2\r\n\t\t\t\telif c == op3Selec and event.key == K_RETURN: #sai, mas nao aparece a imagem\r\n\t\t\t\t\tsair()\r\n\r\n\t\tpygame.display.update()\r\n\r\n#funcao que sai do jogo\r\ndef sair():\r\n\t#pygame.init()\r\n\tscreen.fill(white)\r\n\tscreen.blit(pygame.image.load('sair.png'),[150,150])\r\n\tpygame.display.update()\r\n\ttime.sleep(2)\r\n\tpygame.quit()\r\n\tsys.exit()\r\n\r\n#funcao ajustes do menu\r\ndef settings():\r\n\t#o que aparece no ajustes\r\n\tpygame.init()\r\n\tfonte = pygame.font.Font('freesansbold.ttf',30)\r\n\tsom = fonte.render('Audio ligado',1,(0,0,0))\r\n\tsomon = fonte.render('Audio ligado',1,(148,0,211))\r\n\tsom2 = fonte.render('Audio desligado',1,(0,0,0))\r\n\tsom2off = fonte.render('Audio desligado',1,(148,0,211))\r\n\tvolta = fonte.render('Voltar',1,(0,0,0))\r\n\tvolta2 = fonte.render('Voltar',1,(148,0,211))\r\n\tlogo2 = pygame.image.load('logo2.png')\r\n\r\n\ta = somon\r\n\tb = som2\r\n\tc = volta\r\n\r\n\twhile True:\r\n\t\tscreen.fill(white)\r\n\t\tscreen.blit(logo2,[150,60])\r\n\t\tscreen.blit(a, [380,300])\r\n\t\tscreen.blit(b, [380,400])\r\n\t\tscreen.blit(c,[380,500])\r\n\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\t\tif b == som2off and event.key == K_DOWN:\r\n\t\t\t\t\ta = som\r\n\t\t\t\t\tb = som2\r\n\t\t\t\t\tc = volta2\r\n\t\t\t\telif b == som2off and event.key == K_UP:\r\n\t\t\t\t\ta = somon\r\n\t\t\t\t\tb = som2\r\n\t\t\t\t\tc = volta\r\n\t\t\t\telif a == somon and event.key == K_UP:\r\n\t\t\t\t\ta = som\r\n\t\t\t\t\tb = som2\r\n\t\t\t\t\tc = volta2\r\n\t\t\t\telif a == somon and event.key == K_DOWN:\r\n\t\t\t\t\ta = som\r\n\t\t\t\t\tb = som2off\r\n\t\t\t\t\tc = volta\r\n\t\t\t\telif c == volta2 and event.key == K_UP:\r\n\t\t\t\t\ta = som\r\n\t\t\t\t\tb = som2off\r\n\t\t\t\t\tc = volta\r\n\t\t\t\telif c == volta2 and event.key == K_DOWN:\r\n\t\t\t\t\ta = somon\r\n\t\t\t\t\tb = som2\r\n\t\t\t\t\tc = volta\r\n\r\n\t\t\t\tif c == volta2 and event.key == K_RETURN:\r\n\t\t\t\t\tmainmenu()\r\n\t\t\t\telif b == somon and event.key == K_RETURN:\r\n\t\t\t\t\tpygame.mixer.music.stop()\r\n\t\t\t\telif a == somon and event.key == K_RETURN:\r\n\t\t\t\t\tpygame.mixer.music.play()\r\n\r\n\t\tpygame.display.update()\r\n\r\nbackgroundload = pygame.image.load('background2.png')\r\ntijoloload = pygame.image.load('tijolo.png')\r\n\r\naini = []\r\nxini = []\r\nlistaini = []\r\nfor t in range(0,10): \r\n\ti = random.randint(0,2)\r\n\tif i == 0:\r\n\t\ta = guarda\r\n\t\tprint('guarda')\r\n\tif i == 1:\r\n\t\ta = neve\r\n\t\tprint('neve')\r\n\tif i == 2:\r\n\t\ta = armario\r\n\t\tprint('armario')\r\n\r\n\txini.append(random.randint(500,1000))\r\n\taini.append(a)\r\n\tinim = Inimigo(aini[t],xini[t],400.5)\r\n\tlistaini.append(inim)\r\n\r\nbruxa_escolhida = 1\r\nif bruxa_escolhida == 1:\r\n\twalk_da_bruxa = b1walk\r\n\tneutra_da_bruxa = b1neutra\r\n\tfeitico_da_bruxa = b1feitico\r\n\ttiro_da_bruxa = b1tiro\r\nif bruxa_escolhida == 2:\r\n\twalk_da_bruxa = b2walk\r\n\tneutra_da_bruxa = b2neutra\r\n\tfeitico_da_bruxa = b2feitico\r\n\ttiro_da_bruxa = b2tiro\r\nplayer = Player(walk_da_bruxa,0)\r\n\r\nclass game_loop():\r\n\tdef __init__ (self):\r\n\t\tself.x = -16\r\n\t\tself.y = 20.5\r\n\r\n\t\tself.xplat = 900\r\n\t\tself.yplat = altura/2 - 400\r\n\r\n\t\tself.x_change = 0\r\n\t\tself.y_change = 0\r\n\t\tself.vel = 5\r\n\t\tself.perna = 0\r\n\t\tself.feitico = 0\r\n\t\tself.lanca_feitico = 0\r\n\t\tself.tiro_ativado = 0\r\n\t\tself.tiro_andando = 0\r\n\t\tself.xdotiro = player.rect.x + 100\r\n\t\tself.ydotiro = player.rect.y + 50\r\n\r\n\t\tgameExit = False\r\n\t\twhile not gameExit:\r\n\t\t\tfor event in pygame.event.get():\r\n\t\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\t\tgameExit = True\r\n\r\n\t\t\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\t\t\tif self.feitico == 0:\r\n\t\t\t\t\t\tif event.key == pygame.K_LEFT:\r\n\t\t\t\t\t\t\tself.x_change = 6\r\n\t\t\t\t\t\tif event.key == pygame.K_RIGHT:\r\n\t\t\t\t\t\t\tself.x_change = -6\r\n\t\t\t\t\t\tif event.key == pygame.K_UP:\r\n\t\t\t\t\t\t\tif self.y_change == 0:\r\n\t\t\t\t\t\t\t\tjump()\r\n\t\t\t\t\tif event.key == pygame.K_SPACE:\r\n\t\t\t\t\t\tself.feitico = 1\r\n\r\n\t\t\t\tif event.type == pygame.KEYUP:\r\n\t\t\t\t\tif event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n\t\t\t\t\t\tself.x_change = 0\r\n\t\t\t\t\tif event.key == pygame.K_UP or event.key == pygame.K_DOWN:\r\n\t\t\t\t\t\tself.y_change = 0\r\n\t\t\t\t\tif event.key == pygame.K_SPACE:\r\n\t\t\t\t\t\tself.feitico = 0\r\n\t\t\t\t\t\tself.lanca_feitico = 0\r\n\r\n\t\t\tdef jump():\r\n\t\t\t\tself.y_change = 1/2 * 10 *self.vel\r\n\r\n\t\t\tdef gravidade():\r\n\t\t\t\tif self.y > 5:\r\n\t\t\t\t\tself.y_change -= 0.7\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.y_change = 0\r\n\t\t\t\t\tself.yplat = altura/2 - 400\r\n\t\t\t\t\t\r\n\t\t\tdef andando():\r\n\t\t\t\tif self.x_change!=0: #MUDAR A PERNA\r\n\t\t\t\t\tif self.perna < 10:\r\n\t\t\t\t\t\tplayer.image = pygame.image.load(os.path.join(walk_da_bruxa[0])).convert()\r\n\t\t\t\t\t\tplayer.image.set_colorkey(black)\r\n\t\t\t\t\tif self.perna >= 10:\r\n\t\t\t\t\t\tplayer.image = pygame.image.load(os.path.join(walk_da_bruxa[1])).convert()\r\n\t\t\t\t\t\tplayer.image.set_colorkey(black)\r\n\t\t\t\t\tif self.perna > 20:\r\n\t\t\t\t\t\tself.perna = 0\r\n\t\t\t\t\tself.perna += 1\r\n\t\t\t\tif self.x_change == 0:\r\n\t\t\t\t\tplayer.image = pygame.image.load(os.path.join(neutra_da_bruxa[0])).convert()\r\n\t\t\t\t\tplayer.image.set_colorkey(black)\r\n\r\n\t\t\tdef LancaFeitico():\r\n\t\t\t\tif self.lanca_feitico < 5:\r\n\t\t\t\t\tplayer.image = pygame.image.load(os.path.join(feitico_da_bruxa[0])).convert()\r\n\t\t\t\t\tplayer.image.set_colorkey(black)\r\n\t\t\t\tif self.lanca_feitico >=5:\r\n\t\t\t\t\tplayer.image = pygame.image.load(os.path.join(feitico_da_bruxa[1])).convert()\r\n\t\t\t\t\tplayer.image.set_colorkey(black)\r\n\t\t\t\t\tself.tiro_ativado = 1\r\n\t\t\t\t\t#screen.blit(pygame.image.load(tiro_da_bruxa[0]), (player.rect.x + 100,player.rect.y+50))\r\n\t\t\t\tself.lanca_feitico += 1\r\n\t\t\t\t#print(self.lanca_feitico)\r\n\r\n\t\t\tcolidp = colisao(self.xplat+self.x_change+25, self.yplat+self.y_change, 160-25, 54)\r\n\r\n\t\t\tif not colidp:\r\n\t\t\t\tself.xplat += self.x_change\r\n\t\t\t\tself.yplat += self.y_change\r\n\t\t\t\tself.x += self.x_change\r\n\t\t\t\tself.y += self.y_change\r\n\t\r\n\t\t\tif colidp:\r\n\t\t\t\tprint('ta em cima')\r\n\t\t\t\tself.y_change = 0\r\n\t\t\telse:\r\n\t\t\t\tgravidade()\r\n\r\n\t\t\tfor h in range(len(listaini)):\r\n\t\t\t\tif aini[h] == guarda:\r\n\t\t\t\t\tlistaini[h].zigzaginimigo(xini,h)\r\n\t\t\t\t\tlistaini[h].guarda_andando(0,1)\r\n\t\t\t\tif aini[h] == neve:\r\n\t\t\t\t\tlistaini[h].neve_mexendo(0,1)\r\n\t\t\t\tif aini[h] == armario:\r\n\t\t\t\t\tlistaini[h].armario_mexendo(0,3,4,5,6,7,8)\r\n\t\t\t\t\r\n\t\t\t\tcolidi = colisao(listaini[h].rect.x + self.x, listaini[h].rect.y + self.y, listaini[h].rect.size[0], listaini[h].rect.size[1])\r\n\r\n\t\t\t\tif colidi:\r\n\t\t\t\t\tprint('ta morrendo')\r\n\t\t\t\t\t#FUNCAOBATALHA\r\n\r\n\t\t\t\tfor f in range(h+1,len(listaini)):\r\n\t\t\t\t\tif h <= len(listaini)-1:\r\n\t\t\t\t\t\tcolidii = colisaoini(listaini[f].rect.x + self.x, listaini[f].rect.y + self.y, listaini[f].rect.size[0]+400, listaini[f].rect.size[1], listaini[h].rect.x + self.x, listaini[h].rect.y + self.y, listaini[h].rect.size[0]+400, listaini[h].rect.size[1])\r\n\r\n\t\t\t\t\tif colidii:\r\n\t\t\t\t\t\tprint('ta colidindo!!!!!!!!!!!!')\r\n\t\t\t\t\t\tlistaini[h].rect.x += 500\r\n\t\t\t\t\t\txini[h] += 500\r\n\r\n\t\t\t\tinimigoo(listaini, h, listaini[h].rect.x + self.x, listaini[h].rect.y + self.y)\r\n\r\n\t\t\tif self.feitico == 0:\r\n\t\t\t\tandando()\r\n\r\n\t\t\tif self.feitico == 1:\r\n\t\t\t\tLancaFeitico()\r\n\r\n\t\t\tif self.tiro_ativado >=1 and self.tiro_ativado<=30:\r\n\t\t\t\tif self.tiro_andando < 5:\r\n\t\t\t\t\tscreen.blit(pygame.image.load(tiro_da_bruxa[0]), (self.xdotiro,self.ydotiro))\r\n\t\t\t\t\tself.xdotiro += 1\r\n\t\t\t\tif self.tiro_andando >= 5:\r\n\t\t\t\t\tscreen.blit(pygame.image.load(tiro_da_bruxa[1]), (self.xdotiro,self.ydotiro))\r\n\t\t\t\t\tself.xdotiro += 1\r\n\t\t\t\tif self.tiro_andando >= 10:\r\n\t\t\t\t\tself.tiro_andando = 0\r\n\t\t\t\tself.tiro_andando += 1\r\n\t\t\t\tself.tiro_ativado += 1\r\n\t\t\tif self.tiro_ativado>30:\r\n\t\t\t\tself.xdotiro = player.rect.x + 100\r\n\t\t\t\tself.tiro_ativado = 0\r\n\r\n\t\t\tpygame.display.update()\r\n\r\n\t\t\tbackground(self.x,self.y-400-185)\r\n\t\t\ttijolo(self.xplat,self.yplat)\r\n\t\t\tplayerl(player.rect.x,player.rect.y)\r\n\t\t\tclock.tick(60)\r\n\r\nmainmenu()\r\npygame.quit()\r\nquit()","sub_path":"Bruxa13.py","file_name":"Bruxa13.py","file_ext":"py","file_size_in_byte":14503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"74534684","text":"#!/usr/bin/env python3\n'''\nModule that acts as the controller for the simple database.\n\nAuthor: Alex Roth\nDate: 2015-11-02\n'''\nimport sys\nfrom db_struct import Database\n\n\ndef main():\n database = Database()\n db_history = []\n db_checkpoints = []\n\n for line in sys.stdin:\n db_history, db_checkpoints = process_command(line, database, db_history,\n db_checkpoints)\n\n\ndef process_command(line, database, db_hist, db_chkpnts):\n ''' Process commands from stdin by splitting up command by space.'''\n\n cmd = line.split()\n\n # All transaction command\n if len(cmd) == 1:\n if cmd[0] == 'BEGIN':\n db_chkpnts = begin(db_hist, db_chkpnts)\n\n if cmd[0] == 'COMMIT':\n db_hist, db_chkpnts = commit(db_hist, db_chkpnts)\n\n if cmd[0] == 'ROLLBACK':\n db_hist = rollback(db_hist, db_chkpnts, database)\n\n if cmd[0] == 'END':\n end()\n elif len(cmd) == 3:\n\n # Only the set command uses two arguments\n if cmd[0] == 'SET':\n db_hist.append(line)\n \n # In case the user didn't call begin at the start of the session\n if db_chkpnts == []:\n db_chkpnts.append(-1)\n\n database.set(cmd[1], int(cmd[2]), db_chkpnts[-1])\n\n else:\n\n if cmd[0] == 'GET':\n print(database.get(cmd[1]))\n\n if cmd[0] == 'UNSET':\n db_hist.append(line)\n database.unset(cmd[1], db_chkpnts[-1])\n\n if cmd[0] == 'NUMEQUALTO':\n print(database.numequalto(int(cmd[1])))\n \n return (db_hist, db_chkpnts)\n\n\ndef begin(db_history, db_checkpoints):\n ''' Open a new transaction block.'''\n db_history.append('BEGIN')\n cur_session_index = len(db_history) - 1\n db_checkpoints.append(cur_session_index)\n return db_checkpoints\n\n\ndef commit(db_history, db_checkpoints):\n ''' Close all open transaction blocks, permanently applying the changes made\n in them.'''\n commit_point = db_checkpoints.pop()\n db_checkpoints = []\n return (db_history[commit_point:], db_checkpoints)\n\n\ndef rollback(db_history, db_checkpoints, database):\n ''' Undo all of the commands issued in the most recent transaction block,\n and close the block.'''\n try:\n cur_session_index = db_checkpoints.pop()\n rollback_history = db_history[:cur_session_index]\n\n prev_index = -1\n if db_checkpoints != []:\n prev_index = db_checkpoints[-1]\n\n database = recover(rollback_history, database, prev_index)\n\n return rollback_history\n except:\n print(\"NO TRANSACTION\")\n return db_history\n\n\ndef recover(db_history, database, prev_chk):\n ''' Helper method to rebuild the database.'''\n\n # Cleans out the database.\n if db_history == []:\n for key in database.var_dict:\n database.var_dict[key] = []\n\n for key in database.val_counter:\n database.val_counter[key] = 0\n\n # Process of rebuilding the database from the previous checkpoint.\n for i in db_history:\n if 'SET' in i:\n cmd = i.split()\n database.set(cmd[1], int(cmd[2]), prev_chk)\n elif 'UNSET' in i:\n cmd = i.split()\n database.unset(cmd[1], prev_chk)\n\n\ndef end():\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"simple-database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"427465212","text":"# This file has the main class where we import all files including config, model_dipatche and create folds \n# and there functions are imported and used to make the code parameterized \nimport pandas as pd\nfrom sklearn import tree \nfrom sklearn import metrics\nimport joblib\nimport config\nimport os\nimport argparse\nimport model_dispatcher as md\n\ndf_train = pd.read_csv(config.TRAINING_FOLD_FILE)\n\ndef run_predictions(k,model):\n # Creating training and test sets based on the folds\n df_train_fold = df_train[df_train['fold']!=k].reset_index(drop=True)\n df_test_fold = df_train[df_train['fold']==k].reset_index(drop=True)\n\n # Creating training and test sets separating feature and labels\n X_train = df_train_fold.drop(columns='label')\n X_test = df_train_fold.label.values\n y_train = df_test_fold.drop(columns='label')\n y_test = df_test_fold.label.values\n\n # Create a model object\n classifier = 'model'\n dt_model = md.model[model].fit(X_train,X_test)\n y_pred = dt_model.predict(y_train)\n\n # Accuracy calculation\n print('SCORE FOR FOLD = ',k)\n print('Accuracy score is ',round(metrics.accuracy_score(y_pred,y_test)*100,2))\n print('Macro F1 score is ',round(metrics.f1_score(y_pred,y_test,average='macro')*100,2))\n print('Micro F1 score is ',round(metrics.f1_score(y_pred,y_test,average='micro')*100,2))\n print('Weighted F1 score is ',round(metrics.f1_score(y_pred,y_test,average='weighted')*100,2),'\\n')\n\n # Saving the model\n joblib.dump(dt_model, os.path.join(config.MODEL_OUTPUT,f\"{classifier}_{k}_folds.bin\"))\n\nif __name__== '__main__':\n # Initializing arhument parser to take input from command line\n parser = argparse.ArgumentParser()\n # Add agrument with the name and type for user input\n parser.add_argument('--folds',type=int)\n parser.add_argument('--model',type=str)\n # Read the argument from command line\n arg = parser.parse_args()\n # Pass the argument read from command line to the function\n run_predictions(arg.folds,arg.model)","sub_path":"MNIST/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"240365829","text":"import numpy as np\n\nclass NeuralNet:\n def __init__(self):\n self.p = 0.1;\n self.X = np.zeros(28 * 28)\n self.Y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n self.W1 = 1 / 20 * np.random.rand(200, 784)\n self.B1 = np.zeros(200)\n self.W2 = 1 / 10 * np.random.rand(10, 200)\n self.B2 = np.zeros(10)\n self.H1 = np.zeros(200)\n self.O = np.zeros(10, dtype=\"float32\")\n\n self.X_size = len(self.X)\n self.H1_size = len(self.H1)\n self.O_size = len(self.O)\n\n def set_X(self, X):\n self.X = X\n\n def set_Y(self, Y):\n self.Y = Y\n\n def act_func(self, X, W, B):\n return np.maximum(0, np.dot(W, X) + B)\n\n def grad_act_func(self, x):\n x = (abs(x) + x) / 2\n return x\n\n def grad_loss_func(self, x, j):\n return 2 * (x - self.Y[j])\n\n def forward(self):\n self.H1 = self.act_func(self.X, self.W1, self.B1)\n U = (np.random.rand(*self.H1.shape) < self.p) / self.p\n self.H1 *= U\n self.O = self.act_func(self.H1, self.W2, self.B2)\n\n def descend(self):\n db2 = []\n\n for i in range(self.O_size):\n db2.append(self.grad_act_func(self.O[i]) * self.grad_loss_func(self.O[i], i))\n\n X_matrix = self.X.reshape(self.X_size, 1)\n H1_matrix = self.H1.reshape(self.H1_size, 1)\n db2 = np.array(db2)\n helper_matrix = db2.reshape(1, self.O_size)\n dw2 = (np.matmul(H1_matrix, helper_matrix)).T\n\n x = np.tile(np.matmul(helper_matrix, self.W2).T, self.X_size)\n y = np.matmul(X_matrix, self.grad_act_func(H1_matrix.T))\n dw1 = np.multiply(x.T, y).T\n db1 = np.multiply(np.matmul(helper_matrix, self.W2), self.grad_act_func(H1_matrix.T)).reshape(self.H1_size)\n\n return dw1, dw2, db1, db2\n\n def back_propagate(self):\n dw1, dw2, db1, db2 = self.descend()\n self.W2 -= dw2 * 0.1\n self.W1 -= dw1 * 0.1\n self.B2 -= db2 * 0.1\n self.B1 -= db1 * 0.1\n\n def get_results(self):\n return np.argmax(self.O)\n","sub_path":"NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"127791764","text":"\"\"\"empty message\n\nRevision ID: c7657e5a1667\nRevises: 1c6aff0e9fcf\nCreate Date: 2017-11-20 10:01:24.181987\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c7657e5a1667'\ndown_revision = '1c6aff0e9fcf'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user_verification', sa.Column('expired_at', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user_verification', 'expired_at')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/c7657e5a1667_.py","file_name":"c7657e5a1667_.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"182598555","text":"import numpy as np\nwith open('waveform-+noise.data')as file:\n lines = file.readlines()\ndata=[]\nfor line in lines:\n data.append([float(x) for x in line.split(',')])\n\narray_data=np.array(data)\narray_data=np.copy(array_data[0:500,:])\n\nglobal attributes\nattributes=array_data[:,0:40]\nclasses=np.array([int(x) for x in array_data[:,-1]])\nestimate_class=np.zeros(array_data.shape[0])\nmid_sample=attributes[np.random.randint(array_data.shape[0],size=3)]\nlast_mid_sample=np.zeros(attributes.shape)\n\ndef caculate_cost( mid_sample,classes ):\n cost = 0\n counter=0\n for sample in attributes:\n if classes[counter]==0:\n cost=cost+sum(abs(sample-mid_sample[0,:]))\n elif classes[counter]==1:\n cost = cost + sum(abs(sample - mid_sample[1,:]))\n else:\n cost=cost+sum(abs(sample-mid_sample[2,:]))\n counter+=1\n return cost\n\ndef cluster(mid_sample):\n counter = 0\n estimate_class=np.zeros(attributes.shape[0])\n for attribute in attributes:\n distance = np.array([])\n for y in range(3):\n distance = np.hstack([distance, np.sum(abs(mid_sample[y, :] - attribute))])\n position = np.argwhere(distance == np.min(distance))\n estimate_class[counter] = position[0, 0]\n counter = counter + 1\n return estimate_class\nestimate_class=cluster(mid_sample)\ncost=caculate_cost(mid_sample,estimate_class)\nlast_cost=float(\"inf\")\niteration=0\nwhile cost
Click here to unsubscribe'.format(pitch, unsub_url),\n from_email='sandbox@sparkpostbox.com\"',\n subject='Hello from HackGen'\n )\n return HttpResponse('Success!')\n except:\n traceback.print_exc()\n return HttpResponseBadRequest('Could not subscribe email address')\n\n\ndef unsubscribe_view(request):\n email = request.GET.get('email')\n try:\n subscription = Subscription.objects.get(email=email)\n except:\n return HttpResponse('Failed to cancel subscription')\n subscription.delete()\n if settings.DEBUG:\n unsub_url = \"http://localhost:8000/unsubscribe?email={}\".format(email)\n else:\n unsub_url = \"https://hackgen.herokuapp.com/unsubscribe?email={}\".format(email)\n sp.transmissions.send(\n use_sandbox=True,\n recipients=[email],\n html='You have successfuly unsubscribed from HackGen',\n from_email='sandbox@sparkpostbox.com\"',\n subject='Hello from HackGen'\n )\n return HttpResponse('Successfuly unsubscribed!')\n","sub_path":"hackathon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"284130794","text":"## You can find this code on this link: https://www.tensorflow.org/guide/eager\n# Tensorflow's eager execution is an imperative programming environment that \n# evaluates operations immediately, without building graphs: operations return\n# concrete values instead of constructing a computational graph to run later.\n# Eager execution is a flexible ML platform for research and experimentation,\n# providing:\n# · An intuitive interface\n# · Easier debugging\n# · Natural control flow\n# Eager execution support most TensorFlow operations and GPU acceleration.\n\n#%% Setup and basic usage\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\n# Now you can run TensorFlow operation and the results will return immediately:\ntf.executing_eagerly()\n\nx = [[2.]]\nm = tf.matmul(x, x)\nprint(\"hello, {}\".format(m))\n\n# Enabling eager execution changes how TensorFlow operation behave - now they\n# immediately evaluate and return their values to Python. tf.Tensor objects \n# reference concrete values instead of symbolic handles to nodes in a \n# computational graph. Since there isn't a computational graph to build and\n# run later in session, it's easy to inspect results using print() or a debugger.\n# Evaluating, printing, and checking tensor values does not break the flow for \n# computing gradients.\n# Eager execution works nicely with NumPy. NumPy operations accept tf.Tensor arguments.\n# TensorFlow math operations convert Python objects and NumPy arrays to tf.Tensor objects.\n# The tf.Tensor.numpy method returns the object's value as a NumPy ndarray.\na = tf.constant([[1, 2], \n [3, 4]])\nprint(a)\n\n# Broadcasting support\nb = tf.add(a, 1)\nprint(b)\n\n# Operator overloading is supported\nprint(a * b)\n\n# Use NumPy values\nimport numpy as np\nc = np.multiply(a, b)\nprint(c)\n\n# Obtain numpy value from a tensor:\nprint(a.numpy())\n\n# The tf.contrib.eager module contains symbols available to both eager and graph execution\n# environment and is useful for writing code to work with graphs:\ntfe = tf.contrib.eager\n\n#%% Dynamic Control flow\n# A major benefit of eager execution is that all the functionality of the host language is\n# available while your model is executing. So, for example, it is easy to write fizzbuzz:\ndef fizzbuzz(max_num):\n counter = tf.constant(0)\n max_num = tf.convert_to_tensor(max_num)\n for num in range(1, max_num.numpy() + 1):\n num = tf.constant(num)\n if int(num % 3) == 0 and int(num % 5) == 0:\n print('FizzBuzz')\n elif int(num % 3) == 0:\n print('Fizz')\n elif int(num % 5) == 0:\n print('Buzz')\n else:\n print(num.numpy())\n counter += 1\n\nfizzbuzz(15)\n\n#%% Build a model\n# When using TensorFlow with eager execution you can either write your own layers or use \n# a layer provided in the tf.keras.layers package.\n\n# While you can use any Python object to represent a layer, TensorFlow has tf.keras.layers.Layer\n# as a convenient base class. Inherit from it to implement your own layer:\nclass MySimpleLayer(tf.keras.layers.Layer):\n def __init__(self, output_units):\n super(MySimpleLayer, self).__init__()\n self.output_units = output_units\n \n def build(self, input_shape):\n # The build method gets called the first time your layer is used.\n # Creating variables on build() allows you to make their shape depend\n # on the input shape and hence removes the need for the user to specify\n # full shapes. It is possible to create variables during __init__() if \n # you already know their full shapes.\n self.kernel = self.add_variable(\n \"kernel\", [input_shape[-1], self.output_units]\n )\n\n def call(self, input):\n # Override call() instead of __call__ so we can perform some bookkeeping\n return tf.matmul(input, self.kernel)\n\n# Use tf.keras.layers.Dense layer instead MySimpleLayer above as it has a superset\n# of functionality (it can also add a bias)\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(10, input_shape=(784,)),\n tf.keras.layers.Dense(10)\n])\n\n# Alternatively, organize models in classes by inheriting from tf.keras-Model.\nclass MNISTModel(tf.keras.Model):\n def __init__(self):\n super(MNISTModel, self).__init__()\n self.dense1 = tf.keras.layers.Dense(units=10)\n self.dense2 = tf.keras.layers.Dense(units=10) \n \n def call(self, input):\n \"\"\"Run the model.\"\"\"\n result = self.dense1(input)\n result = self.dense2(result)\n result = self.dense2(result) # reuse variables from dense2 layer\n return result\n\nmodel = MNISTModel()\n\n#%% Eager training\n# Automatic differentiation is useful for implementing ML algorithms such as \n# backpropagation for training neural networks. During eager execution, use tf.GradientTape\n# to trace operations for computing gradients later.\n# tf.Gradienttape is an opt-in feature to provide maximal performance when not tracing. Since\n# different operations can occur during each call, all forward-pass operations get recorded\n# to a \"tape\". To compute the gradient, play the tape backwards and then discard. A particular\n# tf.GradientTape can only compute one gradient, subsequent calls throw a runtime error.\nw = tf.Variable([[1.0]])\nwith tf.GradientTape() as tape:\n loss = w * w\n\ngrad = tape.gradient(loss, w)\nprint(grad) # => tf.Tensor([[2.]], shape=(1, 1), dtype=float32)\n\n## Train a model\n# Fetch and format the mnist data\n(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()\ndataset = tf.data.Dataset.from_tensor_slices(\n (tf.cast(mnist_images[..., tf.newaxis]/255, tf.float32),\n tf.cast(mnist_labels, tf.int64)))\ndataset = dataset.shuffle(1000).batch(32)\n\n# Build the model\nmnist_model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(16,[3,3], activation='relu'),\n tf.keras.layers.Conv2D(16,[3,3], activation='relu'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense(10) \n])\n\nfor images, labels in dataset.take(1):\n print(\"Logits: \", mnist_model(images[0:1]).numpy())\n\n# While keras models have a builtin training loop (using the fit method),\n# sometimes you need more customization. Here's an example, of a training\n# loop implemented with eager:\noptimizer = tf.train.AdamOptimizer()\nloss_history = []\nfor (batch, (images, labels)) in enumerate(dataset.take(400)):\n if batch % 10 == 0:\n print('.', end='')\n with tf.GradientTape() as tape:\n logits = mnist_model(images, training=True)\n loss_value = tf.losses.sparse_softmax_cross_entropy(labels, logits)\n \n loss_history.append(loss_value.numpy())\n grads = tape.gradient(loss_value, mnist_model.trainable_variables)\n optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables),\n global_step=tf.train.get_or_create_global_step())\n\nimport matplotlib.pyplot as plt\nplt.plot(loss_history)\nplt.xlabel('Batch #')\nplt.ylabel('Loss [entropy]')\n\n#%% Variables and optimizers\n# Better encapsulate model parameters by using tf.Variable with tf.GradienTape.\nclass Model(tf.keras.Model):\n def __init__(self):\n super(Model, self).__init__()\n self.W = tf.Variable(5., name='weight')\n self.B = tf.Variable(10., name='bias')\n \n def call(self, inputs):\n return inputs * self.W + self.B\n\n# A toy dataset of points aroung 3 * x + 2\nNUM_EXAMPLES = 2000\ntraining_inputs = tf.random_normal([NUM_EXAMPLES])\nnoise = tf.random_normal([NUM_EXAMPLES])\ntraining_outputs = training_inputs * 3 + 2 + noise\n\n# The loss function to be optimized\ndef loss(model, inputs, targets):\n error = model(inputs) - targets\n return tf.reduce_mean(tf.square(error))\n\ndef grad(model, inputs, targets):\n with tf.GradientTape() as tape:\n loss_value = loss(model, inputs, targets)\n return tape.gradient(loss_value, [model.W, model.B])\n\n# Define:\n# 1. A model.\n# 2. Derivatives of a loss function with respec to model parameters.\n# 3. A strategy for updating the variables based on the derivatives.\nmodel = Model()\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\nprint(\"Initial loss: {:.3f}\".format(loss(model, training_inputs, training_outputs)))\n\n# Training loop\nfor i in range(300):\n grads = grad(model, training_inputs, training_outputs)\n optimizer.apply_gradients(zip(grads, [model.W, model.B]),\n global_step=tf.train.get_or_create_global_step())\n \n if i % 20 == 0:\n print(\"Loss at step {:03d}: {:.3f}\".format(i, loss(model, training_inputs, training_outputs)))\n \nprint(\"Final loss: {:.3f}\".format(loss(model, training_inputs, training_outputs)))\nprint(\"W = {}, B = {}\".format(model.W.numpy(), model.B.numpy()))\n\n#%% User objects for state during eager execution\n# With graph execution, program state (such as the variables) is stored in\n# global collections and their lifetime is managed by the tf.Session object.\n# In contrast, during eager execution the lifetime of state objects is \n# determined by the lifetime of their corresponding Python object.\n\n# Variables are objects\nif tf.test.is_gpu_available():\n with tf.device(\"gpu:0\"):\n v = tf.Variable(tf.random_normal([1000, 1000]))\n v = None # v no longer takes up GPU memory\n\n# Object-based saving\nx = tf.Variable(10.)\ncheckpoint = tf.train.Checkpoint(x=x)\n\nx.assign(2.) # Assign a new value to the variables and save.\ncheckpoint_path = './checkpoints/'\ncheckpoint.save('./checkpoints/')\n\nx.assign(11.) # Change the variable after saving\n# Restore values from the checkpoint\ncheckpoint.restore(tf.train.latest_checkpoint(checkpoint_path))\nprint(x) # => 2.0\n\nimport os\nimport tempfile\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(16,[3,3], activation='relu'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense(10)\n])\noptimizer = tf.train.AdamOptimizer(learning_rate=0.001)\ncheckpoint_dir = tempfile.mkdtemp()\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\nroot = tf.train.Checkpoint(optimizer=optimizer,\n model=model,\n optimizer_step=tf.train.get_or_create_global_step())\n\nroot.save(checkpoint_prefix)\nroot.restore(tf.train.latest_checkpoint(checkpoint_dir))\n\n# Object-oriented metrics\nm = tfe.metrics.Mean(\"loss\")\nm(0)\nm(5)\nm.result() # => 2.5\nm([8, 9])\nm.result() # => 5.5\n\n## Summaries and TensorBoard\n# TensorBoard is a visualization tool for understanding, debugging and\n# optimizing the model training process. It uses summary events that \n# are written while executing the program.\nglobal_step = tf.train.get_or_create_global_step()\nlogdir = \"./tb/\"\nwriter = tf.contrib.summary.create_file_writer(logdir)\nwriter.set_as_default()\n\nfor _ in range(10):\n global_step.assign_add(1)\n # Must include a record_summaries method\n with tf.contrib.summary.record_summaries_every_n_global_steps(100):\n # your model code goes here\n tf.contrib.summary.scalar('global_step', global_step)\n\n#%% Advanced automatic differentiation topics\n# Dynamic models\n# This example for a backtracking line search algorithm looks like normal\n# NumPy code, except there are gradients and is differentiable, despite the\n# complex control flow:\ndef line_search_step(fn, init_x, rate=1.0):\n with tf.GradientTape() as tape:\n # Variables are automatically recorded, but manually watch a tensor\n tape.watch(init_x)\n value = fn(init_x)\n grad = tape.gradient(value, init_x)\n grad_norm = tf.reduce_sum(grad * grad)\n init_value = value\n while value > init_value - rate * grad_norm:\n x = init_x - rate * grad\n value = fn(x)\n rate /= 2.0\n return x, value\n\n# Additional functions to compute gradients\ndef square(x):\n return tf.multiply(x, x)\n\ngrad = tfe.gradients_function(square)\nsquare(3.).numpy()\ngrad(3.)[0].numpy()\n\n# The second-order derivative of square:\ngradgrad = tfe.gradients_function(lambda x: grad(x)[0])\ngradgrad(3.)[0].numpy()\n\n# The third-order derivative is None:\ngradgradgrad = tfe.gradients_function(lambda x: gradgrad(x)[0])\ngradgradgrad(3.)\n\n# With flow control:\ndef abs(x):\n return x if x > 0. else -x\n\ngrad = tfe.gradients_function(abs)\ngrad(3.)[0].numpy()\ngrad(-3.)[0].numpy()\n\n## Custom gradients\n# Custom gradients are an easy way to override gradients in eager and \n# graph execution. Within the forward function, define the gradient with\n# respect to the inputs, outputs, or intermediate results. For example,\n# here's an easy way to clip the norm of the gradients in the backward pass:\n@tf.custom_gradient\ndef clip_gradien_by_norm(x, norm):\n y = tf.identity(x)\n def grad_fn(dresult):\n return [tf.clip_by_norm(dresult, norm), None]\n return y, grad_fn\n\n# Custom gradients are commonly used to provide a numerically stable gradient\n# for a sequence of operations:\ndef log1pexp(x):\n return tf.log(1 + tf.exp(x))\ngrad_log1pexp = tfe.gradients_function(log1pexp)\n# The gradient computation works fine at x = 0.\ngrad_log1pexp(0.)[0].numpy()\n# However, x = 100 fails because of numerical instability\ngrad_log1pexp(100.)[0].numpy()\n\n# Here, the log1pexp function can be analytically simplified with a custom gradient.\n# The implementation below reuses the value for tf.exp(x) that is computed during the\n# forward pass--making it more efficient by eliminating redundant calculations:\n@tf.custom_gradient\ndef log1pexp(x):\n e = tf.exp(x)\n def grad(dy):\n return dy * (1 - 1 / (1 + e))\n return tf.log(1 + e), grad\n\ngrad_log1pexp = tfe.gradients_function(log1pexp)\n# As before, the gradient computation works fine at x = 0\ngrad_log1pexp(0.)[0].numpy()\n\n# And the gradient computation also works at x = 100\ngrad_log1pexp(100.)[0].numpy()\n\n#%% Performance\nimport time\n\ndef measure(x, steps):\n # TensorFlow initializes a GPU the first time it's used, exclude from timing.\n tf.matmul(x, x)\n start = time.time()\n for i in range(steps):\n x = tf.matmul(x, x)\n # tf.matmul can return before completing the matrix multiplication\n # (e.g., can return after enqueing the operation on a CUDA stream).\n # The x.numpy() call below will ensure that all enqueued operations\n # have completed (and will also copy the result to host memory,\n # so we're including a little more than just the matmul operation\n # time).\n _ = x.numpy()\n end = time.time()\n return end - start\n\nshape = (1000, 1000)\nsteps = 200\nprint(\"Time to multiply a {} matrix by itself {} times:\".format(shape, steps))\n\n# Run on CPU:\nwith tf.device(\"/cpu:0\"):\n print(\"CPU: {} secs\".format(measure(tf.random_normal(shape), steps)))\n\n# Run on GPU, if available:\nif tfe.num_gpus() > 0:\n with tf.device(\"/gpu:0\"):\n print(\"GPU: {} secs\".format(measure(tf.random_normal(shape), steps)))\nelse:\n print(\"GPU: not found\")\n\n# A tf.Tensor object can be copied to a different device to execute its operations:\nif tf.test.is_gpu_available():\n x = tf.random_normal([10, 10])\n\n x_gpu0 = x.gpu()\n x_cpu = x.cpu()\n\n _ = tf.matmul(x_cpu, x_cpu) # Runs on CPU\n _ = tf.matmul(x_gpu0, x_gpu0) # Runs on GPU:0\n\n if tfe.num_gpus() > 1:\n x_gpu1 = x.gpu(1)\n _ = tf.matmul(x_gpu1, x_gpu1) # Runs on GPU:1\n\n## Use eager execution in a grph environment\n# Selectively enable eager execution in a TensorFlow graph environment using \n# tfe.py_func. This is used when tf.enable_eager_execution() has \n# not been called.\ndef my_py_func(x):\n x = tf.matmul(x, x) # You can use tf ops\n print(x) # but it's eager!\n return x\n\nwith tf.Session() as sess:\n x = tf.placeholder(dtype=tf.float32)\n # Call eager function in graph!\n pf = tfe.py_func(my_py_func, [x], tf.float32)\n\n sess.run(pf, feed_dict={x: [[2.0]]}) # [[4.0]]","sub_path":"kerasGuides/eagerExecution.py","file_name":"eagerExecution.py","file_ext":"py","file_size_in_byte":15652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"210790981","text":"import numpy as np\nimport random\n\n\ndef minibatch_generator(data, batch_size, window_size, upsample,\n noise_flag=True, epoch_size=None):\n \"\"\"\n :param data: [total_time, dims]\n return: [batch_size, dims, total_time] if 'GPU'\n [batch_size, total_time, dims] if not\n \"\"\"\n # TODO: add support for noise selection latter\n trace_length = data['traces'].shape[0]\n if epoch_size == None:\n epoch_size = trace_length // window_size\n\n if 'soma_spikes' in data:\n s_flag = True\n else:\n s_flag = False\n\n for epoch_idx in range(epoch_size):\n x_accum = []\n n_accum = []\n m_accum = []\n if s_flag:\n soma_accum = []\n for _window in window_generator(trace_length, window_size, batch_size):\n x_accum.append(data['traces'][_window, :])\n n_accum.append(data['noises'][_window, :])\n m_accum.append(data['masks'][_window, :])\n if s_flag:\n spike_window = np.arange(_window[0], _window[0] + window_size * 5)\n soma_accum.append(data['soma_spikes'][spike_window, :])\n\n x_accum = np.stack(x_accum)\n n_accum = np.stack(n_accum)\n m_accum = np.stack(m_accum)\n\n x_accum = np.transpose(x_accum, (0, 2, 1))\n n_accum = np.transpose(n_accum, (0, 2, 1))\n m_accum = np.transpose(m_accum, (0, 2, 1))\n\n if s_flag:\n soma_accum = np.stack(soma_accum)\n soma_accum = np.transpose(soma_accum, (0, 2, 1))\n\n if s_flag:\n yield {'traces': x_accum,\n 'noises': n_accum,\n 'masks': m_accum,\n 'soma_spikes': soma_accum}\n else:\n yield {'traces': x_accum,\n 'noises': n_accum,\n 'masks': m_accum}\n\n\ndef window_generator(trace_length, window_size, n_windows):\n \"\"\"\n :param trace_length:\n :param window_size:\n :param n_windows:\n :return:\n \"\"\"\n for _ in range(n_windows):\n left_indx = random.randint(0, trace_length - window_size - 1)\n yield np.arange(left_indx, left_indx + window_size)\n\n\ndef minibatch_generator_old(data, batch_size, window_size, noise_flag,\n int_min, int_max, interval=None, epoch_size=None):\n trace_length = data['traces'].shape[0]\n if epoch_size == None:\n epoch_size = trace_length // window_size\n\n for epoch_idx in range(epoch_size):\n x_accum = []\n n_accum = []\n m_accum = []\n for i in range(batch_size):\n if interval is None:\n interval = np.random.randint(int_min, int_max)\n t_start = interval * window_size\n t_end = t_start + window_size\n x_ = data['traces'][t_start:t_end, :]\n x_accum.append(x_)\n if noise_flag:\n n_x = data['noises'][t_start:t_end, :]\n n_accum.append(n_x)\n m_x = data['noises'][t_start:t_end, :]\n m_accum.append(m_x)\n\n x_accum = np.stack(x_accum)\n if noise_flag:\n n_accum = np.stack(n_accum)\n m_accum = np.stack(m_accum)\n if noise_flag:\n yield {'traces': x_accum,\n 'noises': n_accum,\n 'masks': m_accum}\n else:\n yield {'traces': x_accum}\n","sub_path":"bayesdend/utils/batch_util.py","file_name":"batch_util.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"477554209","text":"import urllib2\r\nfrom bs4 import BeautifulSoup\r\nimport datetime\r\n\r\n# Get date.\r\nd = datetime.datetime.date(datetime.datetime.now())\r\n\r\n# # Build URL.\r\n# url = 'https://www.statbunker.com/competitions/FantasyFootballPlayersStats?comp_id=515'\r\n\r\n# # Get HTML file.\r\n# r = urllib2.urlopen(url)\r\n# html = r.read()\r\n\r\nf = open('FF_EPL_15_16.html', 'r')\r\nhtml = f.read()\r\nf.close()\r\n\r\n# Use BeautifulSoup to parse.\r\nsoup = BeautifulSoup(html, 'html.parser')\r\n\r\ntable = soup.find_all(\"tr\")\r\n\r\nfp = open('FF_EPL_15-16.csv', 'w')\r\n\r\nfp.write('Player,Points,Position,MatchesStarted,Goals,Assists,CleanSheetsFull,CleanSheetsPartial,YellowCards,RedCards,StartedAsSub,CameOn,TakenOff,PenaltySaved,PenaltyMissed,GoalsConceded,GoalsConceded1P,OwnGoals,\\n')\r\n\r\nfor i in range(1, len(table)-1):\r\n if (\"Goalkeeper\" in table[i].text):\r\n td = table[i].find_all(\"td\")\r\n #print(td[0].text + \" \" + td[1].text + \" \" + td[3].text)\r\n print('Found data for ' + td[0].text)\r\n fp.write(td[0].text + ',' + td[1].text + ',' + td[2].text + ',' + td[3].text + ',' + td[4].text + ',' + td[5].text + ',' + td[6].text + ',' + td[7].text + ',' + td[8].text + ',' + td[9].text + ',' + td[10].text + ',' + td[11].text + ',' + td[12].text + ',' + td[13].text + ',' + td[14].text + ',' + td[15].text + ',' + td[16].text + ',' + td[17].text + ',\\n')\r\n\r\nfp.close()\r\n","sub_path":"statbunkerFF.py","file_name":"statbunkerFF.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"187624518","text":"from PIL import Image, ImageTk, ImageOps\nimport tkinter\nfrom tkinter import filedialog\nimport functions\nimport getpass\nimport os\nimport praw\nimport requests\nfrom io import BytesIO\nimport random\n\nreddit = praw.Reddit(client_id='[REDACTED]',\n client_secret='[REDACTED]',\n user_agent='[REDACTED]')\n\nglobal imagepos\nimagepos = 0\n\nif __name__ == \"__main__\":\n functions.clear()\n password = getpass.getpass(\"Password: \")\n if not functions.login(password):\n import sys\n print(\"Invalid password\")\n sys.exit(1)\n\nfunctions.clear()\n\nglobal images\nimages = []\n\nglobal imagelabel\n\n\ndef handle_keypress(event):\n if str(event.keysym) == \"Left\":\n if imagepos > 0:\n back()\n if str(event.keysym) == \"Right\":\n try:\n _ = images[imagepos + 1]\n except:\n pass\n else:\n forward()\n if str(event.keysym) == \"Escape\":\n root.quit()\n\n\ndef forward():\n global imagepos, imagelabel\n imagelabel.grid_forget()\n #imagepos = (imagepos + 1) % len(images)\n imagepos += 1\n backbutton[\"state\"] = \"normal\"\n try:\n _ = images[imagepos + 1]\n except:\n forwardbutton[\"state\"] = \"disabled\"\n display_image(images)\n\n\ndef back():\n global imagepos, imagelabel\n imagelabel.grid_forget()\n imagepos -= 1\n forwardbutton[\"state\"] = \"normal\"\n if imagepos == 0:\n backbutton[\"state\"] = \"disabled\"\n display_image(images)\n\n\ndef get_image():\n path = filedialog.askopenfilename()\n load_image(path)\n\n\ndef get_folder():\n path = filedialog.askdirectory()\n load_directory(path)\n\n\ndef load_image(path):\n if path == ():\n return\n global images\n images = []\n if functions.validate(path):\n image = Image.open(path)\n width, height = image.size\n longer = (width if width > height else height)\n if longer > 750:\n ratio = 750 / longer\n if longer == width:\n width = 750\n height = int(round(height * ratio, 0))\n else:\n height = 750\n width = int(round(width * ratio, 0))\n image = image.resize((width, height), Image.ANTIALIAS)\n imgobj = ImageTk.PhotoImage(image)\n\n images.append(imgobj)\n if path.endswith(\".enc\"):\n temp = functions.decodepath(path, delete=False)\n if functions.validate(temp):\n image = Image.open(temp)\n width, height = image.size\n longer = (width if width > height else height)\n if longer > 750:\n ratio = 750 / longer\n if longer == width:\n width = 750\n height = int(round(height * ratio, 0))\n else:\n height = 750\n width = int(round(width * ratio, 0))\n image = image.resize((width, height), Image.ANTIALIAS)\n imgobj = ImageTk.PhotoImage(image)\n images.append(imgobj)\n os.remove(temp)\n display_image(images)\n\n\ndef load_directory(directory):\n global images, imagepos\n imagepos = 0\n if directory == ():\n return\n images = []\n if not os.path.isdir(directory):\n return\n files = [\n os.path.join(directory, f) for f in os.listdir(directory)\n if os.path.isfile(os.path.join(directory, f))\n ]\n files = sorted(files)\n for file in files:\n if functions.validate(file):\n image = Image.open(file)\n width, height = image.size\n longer = (width if width > height else height)\n if longer > 750:\n ratio = 750 / longer\n if longer == width:\n width = 750\n height = int(round(height * ratio, 0))\n else:\n height = 750\n width = int(round(width * ratio, 0))\n image = image.resize((width, height), Image.ANTIALIAS)\n imgobj = ImageTk.PhotoImage(image)\n images.append(imgobj)\n if file.endswith(\".enc\"):\n temp = functions.decodepath(file, delete=False)\n if functions.validate(temp):\n image = Image.open(temp)\n width, height = image.size\n longer = (width if width > height else height)\n if longer > 750:\n ratio = 750 / longer\n if longer == width:\n width = 750\n height = int(round(height * ratio, 0))\n else:\n height = 750\n width = int(round(width * ratio, 0))\n image = image.resize((width, height), Image.ANTIALIAS)\n imgobj = ImageTk.PhotoImage(image)\n images.append(imgobj)\n os.remove(temp)\n\n global forwardbutton\n if len(images) > 1:\n forwardbutton[\"state\"] = \"normal\"\n display_image(images)\n\n\ndef save_file():\n global imglist, imagepos\n filetypes = [(\"PNG Image\", \"*.png\"), (\"All Files\", \"*.*\")]\n f = filedialog.asksaveasfile(filetypes=filetypes)\n path = f.name\n image = imglist[imagepos]\n image.convert(\"RGBA\")\n image.save(path)\n f.close()\n functions.encodepath(path)\n\n\ndef display_image(images):\n global imagelabel, imagepos\n if len(images) < 1:\n imagelabel = tkinter.Label()\n else:\n imagelabel = tkinter.Label(image=images[imagepos])\n global backbutton\n backbutton = tkinter.Button(root,\n text=\"<<\",\n command=back,\n state=tkinter.DISABLED)\n\n global forwardbutton\n forwardbutton = tkinter.Button(root,\n text=\">>\",\n command=forward,\n state=tkinter.DISABLED)\n if imagepos > 0:\n backbutton[\"state\"] = \"normal\"\n if len(images) > 1 and (imagepos + 1) < len(images):\n forwardbutton[\"state\"] = \"normal\"\n\n imagelabel.grid(row=1, column=0, columnspan=5)\n backbutton.grid(row=0, column=0)\n forwardbutton.grid(row=0, column=4)\n if len(images) > 0:\n height = images[imagepos].height() + 33\n width = images[imagepos].width()\n root.geometry(\"{}x{}\".format(width + 1, height))\n root.update()\n savebutton = tkinter.Button(root, text=\"Save Image\", command=save_file)\n savebutton.grid(row=0, column=2)\n\n\nroot = tkinter.Tk()\nroot.lift()\nroot.attributes('-topmost', True)\nroot.after_idle(root.attributes, '-topmost', False)\nroot.title(\"Private Image Viewer\")\n\nglobal directory\n\nglobal imagelabel\n#imagelabel = tkinter.Label(image=images[0])\nimagelabel = tkinter.Label()\n\nimagebutton = tkinter.Button(root, text=\"Select Image\", command=get_image)\nexitbutton = tkinter.Button(root, text=\"Exit PIV\", command=root.quit)\nfolderbutton = tkinter.Button(root, text=\"Select Folder\", command=get_folder)\n\nimagebutton.grid(row=0, column=1)\nexitbutton.grid(row=0, column=2)\nfolderbutton.grid(row=0, column=3)\n\nglobal backbutton\nbackbutton = tkinter.Button(root,\n text=\"<<\",\n command=back,\n state=tkinter.DISABLED)\n\nglobal forwardbutton\nforwardbutton = tkinter.Button(root,\n text=\">>\",\n command=forward,\n state=tkinter.DISABLED)\n\n# load_directory(directory)\n# display_image(images)\n\"\"\"\nimagelabel.grid(row=0, column=0, columnspan=5)\nbackbutton.grid(row=1, column=0)\nforwardbutton.grid(row=1, column=4)\n\"\"\"\nroot.bind(\"\", handle_keypress)\n\nsubname = input(\"Subreddit: r/\")\nsubreddit = reddit.subreddit(subname)\ntry:\n for _ in subreddit.top(limit=1):\n pass\nexcept:\n print(\"Invalid subreddit: r/{}\".format(subname))\n import sys\n sys.exit(1)\nprint(\"Loading...\")\n\nposts = []\ni = 1\nfor post in subreddit.hot(limit=None):\n if i > 50:\n break\n if not post.is_self:\n posts.append(post.url)\n i += 1\n\nglobal imglist\nimglist = []\nimages = []\nrandom.shuffle(posts)\nfor url in posts:\n response = requests.get(url)\n try:\n image = Image.open(BytesIO(response.content))\n except:\n continue\n imglist.append(image)\n width, height = image.size\n longer = (width if width > height else height)\n if longer > 750:\n ratio = 750 / longer\n if longer == width:\n width = 750\n height = int(round(height * ratio, 0))\n else:\n height = 750\n width = int(round(width * ratio, 0))\n image = image.resize((width, height), Image.ANTIALIAS)\n imgobj = ImageTk.PhotoImage(image)\n images.append(imgobj)\nprint(\"Done\")\n\ndisplay_image(images)\n\n\ndef main():\n try:\n root.mainloop()\n except KeyboardInterrupt:\n root.quit()\n\n\nif __name__ == \"__main__\":\n main()\n functions.clear()\n","sub_path":"python/2020-04-23_Vault/Reddit PIV.py","file_name":"Reddit PIV.py","file_ext":"py","file_size_in_byte":8974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"515061675","text":"from model import dynamics, cost\nimport numpy as np\nfrom numpy.linalg import inv, norm\n\n# Riccati recursion\ndef Riccati(A,B,Q,R):\n\n # implement infinite horizon riccati recursion\n n = A.shape[0]\n P = np.zeros((n,n))\n BT = np.transpose(B)\n AT = np.transpose(A)\n\n print(A.shape)\n print(B.shape)\n print(Q.shape)\n print(R.shape)\n\n\n l0_1 = inv(R + np.matmul(np.matmul(BT, P), B)) \n l0_2 = np.matmul(np.matmul(BT, P), A)\n L = np.matmul(l0_1, l0_2) \n\n L_old = np.ones((2,4))\n\n while norm(L - L_old,2) > 1e-4:\n p1 = np.matmul(np.matmul(AT, P), A)\n p2 = np.matmul(np.matmul(AT, P), B)\n p3 = inv(R + np.matmul(np.matmul(BT, P) , B))\n p4 = np.matmul(np.matmul(BT, P), A)\n P = p1 + np.matmul(np.matmul(p2, p3), p4) + Q\n L_old = L\n l1 = inv(R + np.matmul(np.matmul(BT, P), B))\n l2 = np.matmul(np.matmul(BT, P), A)\n L = np.matmul(l1, l2)\n \n return L,P\n\ndef simulate():\n # dynfun = dynamics(stochastic=False)\n dynfun = dynamics(stochastic=True) # uncomment for stochastic dynamics\n\n costfun = cost()\n\n T = 100 # episode length\n N = 100 # number of episodes\n gamma = 0.95 # discount factor\n A = dynfun.A\n B = dynfun.B\n Q = costfun.Q\n R = costfun.R\n\n L,P = Riccati(A,B,Q,R)\n\n total_costs = []\n\n for n in range(N):\n costs = []\n \n x = dynfun.reset()\n for t in range(T):\n \n # policy \n u = (-L @ x)\n \n # get reward\n c = costfun.evaluate(x,u)\n costs.append((gamma**t)*c)\n \n # dynamics step\n x = dynfun.step(u)\n \n total_costs.append(sum(costs))\n \n return np.mean(total_costs)\n\ncost_list = []\ntrail_num = 20\nfor i in range(trail_num):\n c = simulate()\n cost_list.append(c)\n print(c)\n\nprint(\"average cost over {} trails is {}\".format(trail_num, np.mean(cost_list)))","sub_path":"2020/HW5/problem_1/part_a.py","file_name":"part_a.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"478865411","text":"import re, csv\ndef email(data):\n\tEMAIL_REGEX = re.compile(r\"[^@]+@[^@]+\\.[^@]+\")\n\temails = []\n\tfor line in data:\n\t\tfor s in line:\n\t\t\temails.extend(re.findall(EMAIL_REGEX, s))\n\n\treturn emails\n\nwith open('faculty.csv', 'rb') as csvfile:\n\t\treader = csv.reader(csvfile, delimiter=',')\n\t\tparsed_data = list(reader)\n\nemails = email(parsed_data)\n\nwriter = csv.writer(open('emails.csv', 'wb'))\nfor e in emails:\n\twriter.writerow([e])\n","sub_path":"python/advanced_python_csv.py","file_name":"advanced_python_csv.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"121811562","text":"import tensorflow as tf\nfrom tensorflow.keras.optimizers import Adam\nimport tensorflow_probability as tfp\nfrom networks import ActorCriticNetwork\n\n\nclass Agent:\n\n def __init__(self, alpha=0.0003, gamma=0.99, n_actions=2):\n self.gamma = gamma\n self.n_actions = n_actions\n self.actions = None\n self.action_space = [i for i in range(self.n_actions)]\n\n self.actor_critic = ActorCriticNetwork(n_actions=n_actions)\n self.actor_critic.compile(optimizer=Adam(learning_rate=alpha))\n\n def choose_action(self, observation):\n state = tf.convert_to_tensor([observation]) # add batch dimension\n _, probs = self.actor_critic(state)\n\n action_probabilities =tfp.distributions.Categorical(probs=probs)\n action = action_probabilities.sample()\n self.action = action\n\n return action.numpy()[0] # remove batch dimension\n\n def save_model(self):\n print('Saving model.')\n self.actor_critic.save_weights(self.actor_critic.checkpoint_file)\n\n def load_model(self):\n print('Loading model')\n self.actor_critic.load_weights(self.actor_critic.checkpoint_file)\n\n def learn(self, state, reward, next_state, done):\n state = tf.convert_to_tensor([state], dtype=tf.float32)\n next_state = tf.convert_to_tensor([next_state], dtype=tf.float32)\n reward = tf.convert_to_tensor(reward, dtype=tf.float32)\n\n with tf.GradientTape() as tape:\n state_value, probs = self.actor_critic(state)\n next_state_value, _ = self.actor_critic(next_state)\n\n state_value = tf.squeeze(state_value)\n next_state_value = tf.squeeze(next_state_value)\n\n action_probs = tfp.distributions.Categorical(probs=probs)\n log_prob = action_probs.log_prob(self.action)\n\n delta = reward + self.gamma * next_state_value * (1 - int(done)) - state_value\n actor_loss = -log_prob * delta\n critic_loss = delta**2\n\n total_loss = actor_loss + critic_loss\n\n gradient = tape.gradient(total_loss, self.actor_critic.trainable_variables)\n self.actor_critic.optimizer.apply_gradients(zip(gradient, self.actor_critic.trainable_variables))\n","sub_path":"Actor-Critic/tf/actor_critic.py","file_name":"actor_critic.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"249673772","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 25 16:00:41 2021\r\n\r\n@author: aniruddh.phukan\r\n\"\"\"\r\n\r\n# Problem 1\r\n\r\ns = 'hobnobobsticks'\r\n\r\ndef count_vowels(s):\r\n \r\n vowels = 'aeiou'\r\n count = 0\r\n \r\n for v in vowels:\r\n count += s.count(v)\r\n return(count)\r\n\r\nprint(count_vowels(s))\r\n\r\n\r\n# Problem 2\r\n\r\ndef count_string(s):\r\n \r\n string_to_find = 'bob'\r\n count = 0\r\n \r\n for i in range(len(s)):\r\n if s[i:i+3]==string_to_find:\r\n count += 1\r\n \r\n return(count)\r\n \r\nprint(count_string(s))\r\n\r\n\r\n# Problem 3\r\n\r\ndef longest_asc_order_string(s):\r\n \r\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\r\n index = [0]*len(s)\r\n \r\n for i in range(len(s)):\r\n index[i] = alphabet.find(s[i])\r\n \r\n start, end = 0,1\r\n longest_str, cur_str = s[start:end], s[start:end]\r\n \r\n for j in range(len(s)-1):\r\n \r\n if index[j] <= index[j+1]:\r\n end = j+2\r\n cur_str = s[start:end]\r\n \r\n if len(cur_str)>len(longest_str):\r\n longest_str = cur_str\r\n \r\n else:\r\n start = j+1\r\n end = j+2\r\n \r\n cur_str = s[start:end]\r\n \r\n \r\n return('Longest substring in alphabetical order is: {}'.format(longest_str))\r\n\r\nprint(longest_asc_order_string(s))","sub_path":"pSet1.py","file_name":"pSet1.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"230686834","text":"# -*- coding: utf-8 -*-\n\"\"\"Positional Evaluation\"\"\"\n\nimport operator\n\nimport chess.pregame\n\nclass Evaluator:\n \"\"\"State Evaluator\n\n A positive score indicates white is winning, while a negative score\n means black is winning. Evaluation features used are Material, Piece\n Square Tables, Center Control, Mobility, Connectivity, Development\n \"\"\"\n\n MODES = [LAZY, NORMAL, EAGER] = range(3)\n\n def __init__(self):\n\n self.pieceSquareTables = chess.pregame.load_piece_square_tables()\n self.masks = chess.pregame.load_evaluation_masks()\n\n self.pieceValues = (\n 1, # PAWN\n 3, # BISHOP\n 3, # KNIGHT\n 5, # ROOK\n 9, # QUEEN\n 0 # KING\n )\n\n self.weights = (\n 1.5, # Material\n 0.001, # Piece Square Table Values\n 1, # development\n 0.3, # Center Control\n 0.02, # tempo bonus\n 5, # Connectivity\n 3, # Mobility\n 0.2, # Bishop Pair bonus\n 2, # Pawn structure score\n 1, #pressure\n )\n\n self.memo = [{} for _ in range(len(Evaluator.MODES))]\n\n def __call__(self, state, *args, mode=NORMAL):\n \"\"\"Main Evaluation function\"\"\"\n\n if state.hash in self.memo[mode]:\n return self.memo[mode][state.hash]\n\n\n scores = [score(state,*args) for score in self.get_scores(mode)]\n dot_product = lambda l1,l2: sum(v1*v2 for v1,v2 in zip(l1,l2))\n valuation = dot_product(scores, self.weights)\n self.memo[mode][state.hash] = valuation\n\n # if mode == 1:\n # print('_'*10)\n # print(state)\n #\n # print('scores', list(scores))\n # print('weighted', [v1*v2 for v1,v2 in zip(scores, self.weights)])\n # print('valuation:',valuation)\n # print('_'*10)\n\n return valuation\n\n def score(func):\n \"\"\"Evaluation Feuture to be used in the Linear Combination\"\"\"\n def decorator(self, *args):\n whiteScore = func(self, 0, *args)\n blackScore = func(self, 1, *args)\n return round(whiteScore - blackScore, 4)\n return decorator\n\n @score\n def material(self, color, state, *args):\n \"\"\"Piece Value Sum\n\n Pawn: 1 -- Knight: 3 -- Bishop: 3 -- Rook: 5 -- Queen: 9\n \"\"\"\n pieces = state.pieces.get_color(color)\n pieceValues = map(lambda p: self.pieceValues[p[1]], pieces)\n return sum(pieceValues)\n\n @score\n def piece_square_value(self, color, state, *args):\n \"\"\"Piece-Square Value Sum\n\n For each piece, a piece square table contains a score for every\n square indicating the strength of a square for that piece.\n \"\"\"\n pieces = state.pieces.get_color(color)\n tables = self.pieceSquareTables[color]\n pst_values = map(lambda piece: tables[piece[1]][piece[0]], pieces)\n return sum(pst_values) / state.pieces.size(color)\n\n @score\n def development(self, color, state, *args):\n \"\"\"Piece Development\"\"\"\n isPieceDeveloped = lambda mask: mask & state.colors[color] == 0\n developmentMap = map(isPieceDeveloped, self.masks.minorPieceSquares[color])\n return sum(developmentMap) / 4 # 4 minor pieces per color\n\n\n @score\n def center_control(self, color, state, attacks):\n \"\"\"Determined by the number of center squares attacked\n\n Center squares are D4, E4, D5, E5.\n \"\"\"\n def countCenterSquares(attack):\n isAttackingSquare = lambda square: attack & square != 0\n return sum(map(isAttackingSquare, self.masks.centerSquares))\n return sum(map(countCenterSquares, attacks[color]))\n\n @score\n def connectivity(self, color, state, attacks):\n \"\"\"Indicates how well pieces are working together\"\"\"\n def countDefences(attack):\n isDefendingPiece = lambda p: p[0] & attack != 0\n return sum(map(isDefendingPiece, state.pieces.get_color(color)))\n defenceBoolMap = map(countDefences, attacks[color])\n return sum(defenceBoolMap) / state.pieces.size(color)\n\n @score\n def mobility(self, color, state, attacks):\n \"\"\"Amount of legal moves\"\"\"\n pass\n\n @score\n def king_safety(self, state, attacks, color):\n \"\"\"TODO\"\"\"\n pass\n\n @score\n def pawn_structure(self, state, attacks, color):\n \"\"\"TODO\"\"\"\n pass\n\n @score\n def pressure(self, state, attacks, color):\n \"\"\"TODO\"\"\"\n pass\n\n @score\n def tempo(self, color, state, *args):\n \"\"\"Bonus for the moving player\n\n The purpose of a tempo bonus is to discourage cyclical repetitions.\n \"\"\"\n return int(state.colorToMove == color)\n\n def get_scores(self, mode):\n \"\"\"Gets scores used for an evaluation mode\n\n Modes include Lazy, normal, eager\n \"\"\"\n\n # Lazy Evaluation\n yield from (self.material, self.piece_square_value)\n if mode == 0: return\n\n # Normal Evaluation\n yield from (self.development, self.center_control, self.tempo)#, self.connectivity)\n if mode == 1: return\n\n # Eager Evaluation\n yield from (self.mobility, self.king_safety, self.connectivity, self.king_safety, self.pawn_structure, self.pressure)\n","sub_path":"chess/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"31151180","text":"# coding=utf-8\n__author__ = 'Gareth Williams'\nimport json\nimport urllib3\nimport certifi\n\nfrom system.decorators import *\nfrom system.logging import Logger\nfrom twisted.words.protocols import irc\nfrom pprint import pprint\nimport time\n\n\nclass plugin(object):\n \"\"\"\n ///////////////////////////////////////////////\n This plugin contains debug\n commands.\n ///////////////////////////////////////////////\n \"\"\"\n\n commands = {\n \"conf\": \"configDebug\",\n \"lst\": \"listlogin\",\n \"testing\": \"test\"\n }\n\n tests = {'Gaz': ['host', '1456501529']}\n\n def __init__(self, irc):\n self.irc = irc\n self.logs = Logger()\n\n def configDebug(self, user, channel, arguments):\n self.irc.sendnotice(user, \"Not Implemented Yet\")\n\n def test(self, user, channel, arguments):\n ts = str(time.time()).split('.')[0]\n timeleft = str(int(ts) - int(self.tests[user][1]))\n self.irc.sendnotice(user, \"%s %s\" % (timeleft, ts))\n\n\n\n\n @config(\"rank\", \"voice\")\n def listlogin(self, user, channel, arguments):\n\n loggedinUsr = ''\n GuestUsr = ''\n\n for luser in self.irc.logged_in:\n loggedinUsr = loggedinUsr + luser + \", \"\n\n for luser in self.irc.not_logged_in:\n GuestUsr = GuestUsr + luser + \", \"\n\n self.irc.sendnotice(user, \"Users logged in with nickserv: \" + loggedinUsr)\n self.irc.sendnotice(user, \"Users not logged in with nickserv: \" + GuestUsr)\n\n name = \"debug\"\n","sub_path":"plugins/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"107316629","text":"from collections import deque\n\n\nclass DataSetLoader:\n def __init__(self, filename, size=16):\n self.filename = filename\n self.size = size\n\n def load(self):\n features = []\n labels = []\n\n with open(self.filename, 'r') as ins:\n for line in ins:\n feature_row = [0 for _ in range(self.size)]\n\n pieces = deque(line.split(' '))\n\n new_label = int(pieces.popleft())\n labels.append(new_label)\n\n for feature_data in pieces:\n feature_parts = feature_data.split(':')\n feature_row[int(feature_parts[0]) - 1] = float(feature_parts[1])\n\n features.append(feature_row)\n\n return features, labels\n","sub_path":"Project/02. decision-tree/data_set_loader.py","file_name":"data_set_loader.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"86536075","text":"\"\"\"\nGlobalSuccessor spider created on the top of ATSSpider\n\nscrapy crawl global_successor -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://gs6.globalsuccessor.com/fe/tpl_axa01.asp\"\n\nSample URL:\n http://gs19.globalsuccessor.com/fe/tpl_amey01.asp\n https://gs10.globalsuccessor.com/fe/tpl_britishlibrary01.asp\n http://gs19.globalsuccessor.com/fe/tpl_sabic04.asp\n https://gs6.globalsuccessor.com/fe/tpl_axa01.asp\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http.cookies import CookieJar\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\nfrom urllib import urlencode\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, HtmlFormatter, NormalizedJoin, Prefix, RemoveBadElements\nfrom brightcorp.lib.utils import get_hidden_inputs\n\npattern = {\n 'ref_number': compile(r'jobid=(\\d+),'),\n}\n\n\nclass GlobalSuccessor(ATSSpider):\n\n name = 'global_successor'\n download_delay = 2\n\n def parse(self, response):\n params = get_hidden_inputs(response)\n cookie_jar = response.meta.setdefault('cookie_jar', CookieJar())\n cookie_jar.extract_cookies(response, response.request)\n request = Request(\n callback=self.parse_jobs_list,\n meta={\n 'cookie_jar': cookie_jar,\n 'dont_merge_cookies': True,\n },\n dont_filter=True,\n url=urljoin(response.url, '?%s' % urlencode(params))\n )\n cookie_jar.add_cookie_header(request)\n yield request\n\n def parse_jobs_list(self, response):\n sel = Selector(response)\n\n for href in sel.xpath(\n '//tr/td[@class=\"searchresultsjoblink\"]/a/@href |'\n '//tr/td[contains(@class, \"igsearchresultstitle\")]/a/@href |'\n '//table[@id=\"searchresultslist\"]/tbody/tr/td/a/@href'\n ).extract():\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, href)\n )\n\n # pagination\n next_page = sel.xpath(\n '//p/span[@id=\"INIT_NEXTPAGE2\"]/a[contains(text(), \"Next Page\")]/@href |'\n '//tr/td//a[@title=\"Next Page\"]/@href'\n ).extract()\n if next_page:\n yield Request(\n callback=self.parse_jobs_list,\n url=urljoin(response.url, next_page[0])\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n match = pattern['ref_number'].search(response.url)\n if match:\n ref_number = match.group(1)\n # construct job details url\n url = urljoin(\n response.url, '?newms=jj&id=%s&newlang=1' % ref_number\n )\n loader = BrightcorpItemLoader(selector=sel)\n loader.add_xpath(\n 'title',\n '//div/div[@id=\"igWriteJob\"]/h2/text() |'\n '//tr/td/div[@id=\"webdescription\"]/text() |'\n '//tr/td/div[@id=\"igWriteJob\"]/h2/text() |'\n '//table[@id=\"igWriteJob\"]//tr/td/h2/text() |'\n '//tr/td[@class=\"tablecontent\" and @valign=\"middle\"]/b/text() |'\n '//div/h1[@id=\"jobTitleWeb\"]/text()'\n )\n loader.add_xpath(\n 'location',\n [\n '//dl[@id=\"jobCodeLists\"]/dt[contains(text(), \"Location\")]/following-sibling::dd[1]/text()',\n '//dl[@id=\"jobCodeLists\"]/dt[contains(text(), \"Region\") or contains(text(), \"Country\")]/following-sibling::dd[1]/text()',\n '//tr/td/span[contains(text(), \"Location\")]/../following-sibling::td[1]/text() |'\n '//td/p/b[contains(text(), \"Location\")]/following-sibling::text()[1]',\n ],\n NormalizedJoin(', ')\n )\n loader.add_value(\n 'referencenumber',\n ref_number,\n Prefix('%s-' % self.name)\n )\n loader.add_value('url', url)\n loader.add_xpath(\n 'description',\n '//div[@id=\"igWriteJob\"]/dl[@id=\"jobCodeLists\"]/following-sibling::node()[following-sibling::*[@class=\"list_nobullet\" or @id=\"igSendFile\"]] |'\n '//tr/td/span[@class=\"jdtext\"] |'\n '//div[@class=\"section\"][div[@class=\"header\"]/h3[contains(text(), \"Job Description\")]] |'\n '//td/p/b[contains(text(), \"Description\") or contains(text(), \"Closing date\")]/../following-sibling::node() |'\n '//div[@id=\"igWriteJob\"]/dl/following-sibling::node()[following-sibling::div[@id=\"applylinks\"]]',\n RemoveBadElements(['img', 'a']),\n HtmlFormatter()\n )\n loader.add_xpath(\n 'jobtype',\n '//dl[@id=\"jobCodeLists\"]/dt[contains(text(), \"Position Type\")]/following-sibling::dd[1]/text() |'\n '//tr/td/span[contains(text(), \"Position Type\")]/../following-sibling::td[1]/text()'\n )\n loader.add_xpath(\n 'jobcategory',\n '//dl[@id=\"jobCodeLists\"]/dt[contains(text(), \"Category\")]/following-sibling::dd[1]/text()'\n )\n loader.add_xpath(\n 'baseSalary',\n '//dt[contains(text(), \"Salary\") or contains(text(), \"Remuneration\")]/following-sibling::dd[1]/text()'\n )\n loader.add_xpath(\n 'expiration_date',\n '//dt[contains(text(), \"Closing date\")]/following-sibling::dd[1]/text() |'\n '//td/p/b[contains(text(), \"Closing date for applications\")]/following-sibling::text()[1]',\n ConvertDateString('%d %B %Y')\n )\n loader.add_value('apply_url', url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/global_successor.py","file_name":"global_successor.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"70276829","text":"import os\n\nimport numpy as np\nimport torch\n\n\ndef cuda_reproducible_backend(cuda: bool) -> None:\n \"\"\"\n Function to set the CUDA backend to reproducible (i.e. deterministic) or to default configuration (per PyTorch\n 1.9.1).\n @param cuda: Parameter to set or unset the reproducability of the PyTorch CUDA backend.\n @type cuda: bool\n @return: None\n @rtype: None\n \"\"\"\n if cuda:\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n else:\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False\n\n\ndef init_reproducibility(torch_seed: int = 42, cuda: bool = False, numpy_seed: int = 43, hash_seed: int = 44) -> None:\n \"\"\"\n Function to pre-set all seeds for libraries used during training. Allows for re-producible network initialization,\n and non-deterministic number generation. Allows to prevent 'lucky' draws in network initialization.\n @param torch_seed: Integer seed to use for the PyTorch PRNG and CUDA PRNG.\n @type torch_seed: int\n @param cuda: Flag to indicate whether the CUDA backend needs to be\n @type cuda: bool\n @param numpy_seed: Integer seed to use for NumPy's PRNG.\n @type numpy_seed: int\n @param hash_seed: Integer seed to use for Pythons Hash function PRNG, will set the\n @type hash_seed: int\n\n @return: None\n @rtype: None\n \"\"\"\n torch.manual_seed(torch_seed)\n if cuda:\n torch.cuda.manual_seed_all(torch_seed)\n cuda_reproducible_backend(True)\n np.random.seed(numpy_seed)\n os.environ['PYTHONHASHSEED'] = str(hash_seed)\n","sub_path":"fltk/nets/util/reproducability.py","file_name":"reproducability.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"480438344","text":"import datetime\n\n\nclass Participant:\n def __init__(self):\n self.name = \"\"\n self.isStillIn = True\n self.hasCheckedIn = False\n self.relapseDate = None\n\n @property\n def hasRelapsed(self):\n return self.relapseDate is not None\n\n def setFromLine(self, lineString):\n # format of participants.txt line:\n # name hasCheckedIn isStillIn\n # e.g.:\n # foobarbazblarg True True\n words = lineString.split()\n self.name = words[0]\n self.hasCheckedIn = words[1] == 'True'\n self.isStillIn = words[2] == 'True'\n if len(words) >= 4:\n self.relapseDate = datetime.datetime.strptime(words[3], \"%Y.%m.%d\").date()\n\n def relapseNowIfNotAlready(self):\n if self.isStillIn:\n self.isStillIn = False\n self.relapseDate = datetime.date.today()\n\n def relapseDayOfWeekIndex(self):\n if self.relapseDate:\n return self.relapseDate.weekday()\n else:\n return None\n\n def relapseDayOfWeekName(self):\n if self.relapseDayOfWeekIndex():\n return {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[self.relapseDayOfWeekIndex()]\n else:\n return None\n\n def asLine(self):\n answer = self.name + \" \" + str(self.hasCheckedIn) + \" \" + str(self.isStillIn)\n if self.relapseDate:\n answer += \" \"\n answer += self.relapseDate.strftime(\"%Y.%m.%d\")\n return answer\n","sub_path":"stayclean-2018-august/participant.py","file_name":"participant.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"271878567","text":"from __future__ import unicode_literals\n\nfrom django.apps import apps, AppConfig\nfrom django.db.models.signals import post_migrate\n\n\nclass SessionsConfig(AppConfig):\n name = 'talk'\n\n def ready(self):\n post_migrate.connect(create_talk_committee, sender=self)\n post_migrate.connect(set_default_event, sender=self)\n\n\ndef create_talk_committee(**kwargs):\n Group = apps.get_model('auth', 'Group')\n Permission = apps.get_model('auth', 'Permission')\n\n group, created = Group.objects.get_or_create(name='talk_committee')\n\n can_vote = Permission.objects.get(content_type__app_label='talk', codename='add_vote')\n can_comment = Permission.objects.get(content_type__app_label='talk', codename='add_talkcomment')\n group.permissions.set([can_comment, can_vote])\n\n group.save()\n\n\ndef set_default_event(verbosity=2, **kwargs):\n Event = apps.get_model('event', 'Event')\n default_event = Event.objects.current_event()\n\n Room = apps.get_model('talk', 'Room')\n rooms = Room.objects.filter(event=None)\n if verbosity >= 2:\n print(\"Found %d rooms\" % rooms.count())\n rooms.update(event=default_event)\n\n Track = apps.get_model('talk', 'Track')\n tracks = Track.objects.filter(event=None)\n if verbosity >= 2:\n print(\"Found %d tracks\" % tracks.count())\n tracks.update(event=default_event)\n\n Timeslot = apps.get_model('talk', 'Timeslot')\n timeslots = Timeslot.objects.filter(event=None)\n if verbosity >= 2:\n print(\"Found %d time slots\" % timeslots.count())\n timeslots.update(event=default_event)\n","sub_path":"devday/talk/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"464860046","text":"from kivy.lang import Builder\r\nfrom kivy.uix.gridlayout import GridLayout\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivymd.uix.label import MDLabel\r\nfrom kivy.uix.button import Button\r\nfrom kivy.properties import NumericProperty, ListProperty,StringProperty\r\nfrom kivy.graphics import Color\r\nfrom kivy.metrics import dp\r\nfrom kivymd.font_definitions import theme_font_styles\r\n\r\n\r\nKV='''\r\n\r\n orientation:'vertical' \r\n size_hint_y:0.95\r\n GridLayout:\r\n id:header\r\n spacing:2\r\n padding:[10,10,10,10]\r\n size_hint_y:None\r\n height:dp(28)\r\n ScrollView:\r\n size_hint_y:1 \r\n GridLayout:\r\n id:body\r\n spacing:2\r\n padding:[10,10,10,10]\r\n size_hint_y:None\r\n #spacing:dp(2)\r\n height:self.minimum_height\r\n BoxLayout:\r\n padding: 20\r\n orientation:'vertical'\r\n MDTextField:\r\n id: name\r\n hint_text: \"Item Name\"\r\n on_text: root.process1(self.text,'name')\r\n MDTextField:\r\n id: unit\r\n hint_text: \"Unit Price\"\r\n on_text: root.process1(self.text,'price')\r\n MDTextField:\r\n id:quan\r\n hint_text: \"Quantity\"\r\n on_text: root.process1(self.text,'quantity')\r\n MDTextField:\r\n id:tax\r\n hint_text: \"Tax\"\r\n on_text: root.process1(self.text,'tax')\r\n MDRaisedButton:\r\n pos_hint: {\"center_x\": .5}\r\n text: \"Add Item\"\r\n elevation_normal: 5\r\n md_bg_color: 1, 0, 1, 1\r\n on_press: root.addSingle()\r\n AnchorLayout:\r\n anchor_x:\"right\"\r\n anchor_y:\"bottom\"\r\n MDFloatingActionButton:\r\n icon:'arrow-right-bold'\r\n width: 40\r\n height: 40\r\n pos_hint: { \"center_y\": .9}\r\n text: \"Generate Invoice\"\r\n md_bg_color: app.theme_cls.primary_color\r\n on_press: app.print()\r\n \r\n\r\n\r\n
\r\n padding:[10,10,10,10]\r\n canvas.before:\r\n Color:\r\n rgba: app.theme_cls.primary_light\r\n Rectangle:\r\n pos: self.pos\r\n size: self.size\r\n size_hint_y:None\r\n size_hint_x:header.size_hint_x\r\n height:dp(28)\r\n MDLabel:\r\n halign:\"center\"\r\n id:header\r\n text:root.text\r\n font_style:\"Body1\"\r\n\r\n padding:[10,10,10,10]\r\n canvas.before:\r\n Rectangle:\r\n pos: self.pos\r\n size: self.size\r\n size_hint_y:None\r\n size_hint_x:cell.size_hint_x\r\n height:dp(28)\r\n MDLabel:\r\n halign:\"center\"\r\n font_style:\"Body2\"\r\n id:cell\r\n text:root.text \r\n'''\r\nBuilder.load_string(KV)\r\nclass Header(BoxLayout):\r\n text = StringProperty()\r\n\r\n\r\n\r\nclass Cell(BoxLayout):\r\n text = StringProperty()\r\n\r\n\r\n\r\n\r\nclass Table(BoxLayout):\r\n def __init__(self, **kwargs):\r\n super(Table, self).__init__(**kwargs)\r\n self.check=False\r\n self.cols = NumericProperty(1)\r\n self.table_content = []\r\n self.thead = ListProperty()\r\n self.tbody = ListProperty()\r\n self.color = [128, 0, 2, 0.8]\r\n self.tempItem=dict()\r\n self.thead=[\"Item\",\"Quan\",\"Price\",\"Tax\"]\r\n self.ids['header'].cols = len(self.thead)\r\n self.ids['body'].cols = len(self.thead)\r\n for i in self.thead:\r\n head = Header(text=i)\r\n self.ids['header'].add_widget(head)\r\n\r\n def process1(self,val,cat):\r\n if cat==\"name\":\r\n self.tempItem[\"name\"]=val\r\n elif cat==\"price\":\r\n self.tempItem[\"price\"]=val\r\n elif cat==\"tax\":\r\n self.tempItem[\"tax\"]=val\r\n elif cat==\"quantity\":\r\n self.tempItem[\"quantity\"]=val\r\n\r\n def addSingle(self):\r\n self.ids['header'].clear_widgets()\r\n self.ids['body'].clear_widgets()\r\n self.table_content.append({\"Item\":self.tempItem[\"name\"],\"Quan\":self.tempItem[\"quantity\"],\"Price\":self.tempItem[\"price\"],\"Tax\":self.tempItem[\"tax\"]})\r\n for i in self.table_content:\r\n self.thead =[]\r\n for j in i.keys():\r\n self.thead.append(j)\r\n #self.thead=[\"ITEM\",\"QUANTITY\",\"UNIT PRICE\",\"TAX\"]\r\n self.ids['header'].cols = len(self.thead)\r\n self.ids['body'].cols = len(self.thead)\r\n for i in self.thead:\r\n head = Header(text=i)\r\n self.ids['header'].add_widget(head)\r\n for i in self.table_content:\r\n for j in i.keys():\r\n body = Cell(text=i[j])\r\n self.ids['body'].add_widget(body)\r\n self.ids['name'].text=\"\"\r\n self.ids['quan'].text=\"\"\r\n self.ids['unit'].text=\"\"\r\n self.ids['tax'].text=\"\"\r\n\r\n","sub_path":"MyTable.py","file_name":"MyTable.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"404449261","text":"import math\n\nstart_num, last_num = map(int, input().split())\nnums = {x for x in range(start_num, last_num+1) if x == 2 or x % 2 ==1} # nums = 2와 홀수로 이루어진 집합\n\nfor odd_num in range(3, int(math.sqrt(last_num))+1, 2): # 3부터 last_num제곱근의 범위에서 홀수만\n nums -= {i for i in range(2 * odd_num, last_num + 1, odd_num)}\n # for문이 반복되는 동안 홀수의 배수로 이루어진 집합을 빼줌\n \nfor sosu in sorted(nums) : \n if sosu > 1 :\n print(sosu) # 오름차순으로 정렬해서 하나씩 출력","sub_path":"basic/소수.py","file_name":"소수.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"590890301","text":"import time\nimport requests\nimport PIL\nfrom PIL import Image\nimport json\nfrom multiprocessing.pool import ThreadPool\nfrom io import BytesIO\nimport torchvision.transforms.functional as TF\nimport torchvision.transforms as transforms\nimport pickle\nimport numpy as np\nimport os\nimport shutil\nfrom tqdm import tqdm\nfrom multiprocessing import Process, Queue\nimport queue\n\nimport pycocotools\nfrom detectron2.structures import BoxMode\n\ndef absolute_paths(directory):\n filenames = sorted(os.listdir(directory))\n return [os.path.join(directory, filename) for filename in filenames]\n\ndef compute_bbox(mask):\n pos = np.where(mask)\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n return [xmin, ymin, xmax, ymax]\n\ndef to_coco(dataset_path):\n image_paths = [img_path for img_path in absolute_paths(os.path.join(dataset_path, 'images')) if img_path.endswith('.png')]\n #target_paths = absolute_paths(os.path.join(dataset_path, 'targets'))\n\n dataset_dicts = []\n\n for idx, image_path in enumerate(image_paths):\n target_path = os.path.join(dataset_path, 'targets', os.path.splitext(os.path.basename(image_path))[0] + '.pkl')\n with open(target_path, 'rb') as f:\n target = pickle.load(f)\n\n record = {}\n record['file_name'] = image_path\n record['image_id'] = idx\n record['height'] = target['size'][1]\n record['width'] = target['size'][0]\n\n objs = []\n for m in target['masks']:\n annotation = {'segmentation': pycocotools.mask.encode(np.asarray(m, order=\"F\")),\n 'bbox': compute_bbox(m),\n 'bbox_mode': BoxMode.XYXY_ABS,\n 'category_id': 0,}\n objs.append(annotation)\n record['annotations'] = objs\n dataset_dicts.append(record)\n return dataset_dicts\n\n\ndef save_mask_target(image, masks, name, dataset_path = 'dataset'):\n image.save(os.path.join(dataset_path, 'images', name + '.png'))\n with open(os.path.join(dataset_path, 'targets', name + '.pkl'), 'wb') as f:\n pickle.dump(masks, f, protocol=pickle.HIGHEST_PROTOCOL)\n\ndef get_image_from_url(url):\n while True:\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n break\n time.sleep(2)\n response.raw.decode_content = True\n img = Image.open(BytesIO(response.content)).convert('RGB')\n return img\n\ndef augment(image, masks, crop_size):\n # Brightness\n brightness_factor = np.random.normal()*0.2 + 1\n image = TF.adjust_brightness(image, brightness_factor)\n\n # Contrast\n contrast_factor = np.random.normal()*0.2 + 1\n image = TF.adjust_contrast(image, contrast_factor)\n\n # Affine\n angle = np.random.uniform(-180, 180)\n shear = np.random.normal()*25\n scale = np.random.uniform(0.5, 2.0)\n translate = np.random.randint(-30, 30, size=2).tolist()\n image = TF.affine(image, angle, translate, scale, shear, resample=PIL.Image.BILINEAR, fillcolor=None)\n masks = [TF.affine(mask, angle, translate, scale, shear, resample=PIL.Image.BILINEAR, fillcolor=None) for mask in masks]\n\n # Random crop\n i, j, h, w = transforms.RandomCrop.get_params(\n image, output_size=(crop_size, crop_size))\n\n image = TF.crop(image, i, j, h, w)\n masks = [TF.crop(mask, i, j, h, w) for mask in masks]\n # Random horizontal flipping\n if np.random.random() > 0.5:\n image = TF.hflip(image)\n masks = [TF.hflip(mask) for mask in masks]\n\n # Random vertical flipping\n if np.random.random() > 0.5:\n image = TF.vflip(image)\n masks = [TF.vflip(mask) for mask in masks]\n\n # squeeze and binarize\n masks = [(np.array(mask)[:, :, 0] > 0.5).astype(np.uint8) for mask in masks]\n\n # prune masks that have no object or only a sliver of an object\n masks = [mask for mask in masks if mask[10:-10, 10:-10].any()]\n return image, masks\n\nclass Worker(Process):\n def __init__(self, task_queue, result_queue, img, masks, out_path, crop_size):\n super().__init__()\n self.task_queue = task_queue\n self.result_queue = result_queue\n self.img = img\n self.masks = masks\n self.out_path = out_path\n self.crop_size = crop_size\n\n def run(self):\n proc_name = self.name\n while True:#not stopping.is_set():\n try:\n index = self.task_queue.get(True, 1)\n sub_img, sub_masks = augment(self.img, self.masks, self.crop_size)\n target = {'masks': sub_masks, 'size': sub_img.size}\n save_mask_target(sub_img, target, f'{index:05d}', dataset_path=self.out_path)\n self.result_queue.put(index)\n except queue.Empty:\n return\n\ndef download_dataset(json_path, out_path, samples_per_img=100, num_threads=16, num_processes=4, selected_ids=None, crop_size=256):\n\n if os.path.exists(out_path):\n shutil.rmtree(out_path)\n os.makedirs(os.path.join(out_path, 'images'))\n os.makedirs(os.path.join(out_path, 'targets'))\n\n\n total_images = 0\n with open(json_path) as f:\n data = json.load(f)\n\n if selected_ids is not None:\n # Filter only selected images\n data = [img_obj for img_obj in data if img_obj['External ID'] in selected_ids]\n\n task_queue = Queue()\n result_queue = Queue()\n\n with tqdm(total=len(data)*samples_per_img) as pbar:\n for img_obj in data:\n\n img_url = img_obj['Labeled Data']\n if 'objects' not in img_obj['Label']:\n continue\n\n mask_urls = [instance['instanceURI'] for instance in img_obj['Label']['objects']]\n\n\n img = get_image_from_url(img_url)\n\n masks = list(ThreadPool(num_threads).imap_unordered(get_image_from_url, mask_urls))\n\n for _ in range(samples_per_img):\n task_queue.put(total_images)\n total_images += 1\n\n workers = []\n for proc_index in range(num_processes):\n p = Worker(task_queue, result_queue, img, masks, out_path, crop_size)\n p.daemon = True\n p.start()\n workers.append(p)\n\n\n\n for worker in workers:\n worker.join(200)\n for worker in workers:\n if worker.is_alive():\n print(\"Process timed out\")\n\n pbar.update(samples_per_img)\n # for index in range(samples_per_img):\n # while True:\n # try:\n # i = result_queue.get(True, 10)\n # pbar.update(1)\n # except queue.Empty:\n # break\n\n\ndef main():\n ##########################\n json_path = 'datasets/export-2020-09-24T13 50 37.489Z.json' #'datasets/export-2020-08-21T20 16 28.026Z.json'\n samples_per_img = 300\n crop_size = 256\n ##########################\n print('download dataset')\n\n\n train_dataset = [\n 'image_part_001.jpg',\n 'image_part_002.jpg',\n 'image_part_004.jpg',\n ]\n\n download_dataset(json_path,\n 'datasets/collagen_confocal',\n samples_per_img=samples_per_img,\n #selected_ids=train_dataset,\n crop_size=crop_size,\n num_processes = 12,\n num_threads = 16)\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"501758839","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 06 23:06:45 2017\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport math \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndx=1\r\ndt=1\r\nx=[-50]\r\nD=1.0/4*(dx**2)/dt\r\n#the density of x=0 is 1 ,others are 0\r\nrho=[([0]*101)for i in range(101)]\r\nfor n in [50]:\r\n rho[0][n]=1\r\n \r\n \r\ndef run():\r\n for i in range(0,100):\r\n x.append(x[-1]+1)\r\n for n in range(0,100):\r\n rho[i+1][n]=rho[i][n]+(D*dt/(dx**2))*(rho[i][n+1]+rho[i][n-1]-2*rho[i][n])\r\n \r\nrun() \r\n\r\ndef show_result(): \r\n plt.plot(x,rho[5],label=\"step number=5\")\r\n plt.plot(x,rho[20],label=\"step number=20\")\r\n plt.plot(x,rho[100],label=\"step number=100\")\r\n plt.legend()\r\n plt.title('One-dimsenional Diffusion')\r\n plt.xlabel('x')\r\n plt.ylabel(r'$\\rho{(x)}$')\r\n plt.xlim(-50,50)\r\n plt.show()\r\nshow_result()","sub_path":"diffusion.py","file_name":"diffusion.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"428738918","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nThe :class:`~openstack.session.Session` overrides\n:class:`~keystoneauth1.session.Session` to provide end point filtering and\nmapping KSA exceptions to SDK exceptions.\n\n\"\"\"\nimport re\n\nfrom keystoneauth1 import exceptions as _exceptions\nfrom keystoneauth1 import session as _session\n\nimport openstack\nfrom openstack import exceptions\n\nfrom six.moves.urllib import parse\n\nDEFAULT_USER_AGENT = \"openstacksdk/%s\" % openstack.__version__\nVERSION_PATTERN = re.compile('/v\\d[\\d.]*')\n\n\ndef parse_url(filt, url):\n result = parse.urlparse(url)\n path = result.path\n vstr = VERSION_PATTERN.search(path)\n if not vstr:\n path += '/' + filt.get_path()\n vstr = VERSION_PATTERN.search(path)\n start, end = vstr.span()\n prefix = path[:start]\n version = '/' + filt.get_path(path[start + 1:end])\n postfix = path[end:].rstrip('/') if path[end:] else ''\n url = result.scheme + \"://\" + result.netloc + prefix + version + postfix\n return url\n\n\ndef map_exceptions(func):\n def map_exceptions_wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except _exceptions.HttpError as e:\n if e.http_status == 404:\n raise exceptions.NotFoundException(\n message=e.message, details=e.details,\n response=e.response, request_id=e.request_id,\n url=e.url, method=e.method,\n http_status=e.http_status, cause=e)\n else:\n raise exceptions.HttpException(\n message=e.message, details=e.details,\n response=e.response, request_id=e.request_id,\n url=e.url, method=e.method,\n http_status=e.http_status, cause=e)\n except _exceptions.ClientException as e:\n raise exceptions.SDKException(message=e.message, cause=e)\n\n return map_exceptions_wrapper\n\n\nclass Session(_session.Session):\n\n def __init__(self, profile, user_agent=None, **kwargs):\n \"\"\"Create a new Keystone auth session with a profile.\n\n :param profile: If the user has any special profiles such as the\n service name, region, version or interface, they may be provided\n in the profile object. If no profiles are provided, the\n services that appear first in the service catalog will be used.\n :param user_agent: A User-Agent header string to use for the\n request. If not provided, a default of\n :attr:`~openstack.session.DEFAULT_USER_AGENT`\n is used, which contains the openstacksdk version\n When a non-None value is passed, it will be\n prepended to the default.\n :type profile: :class:`~openstack.profile.Profile`\n \"\"\"\n if user_agent is not None:\n self.user_agent = \"%s %s\" % (user_agent, DEFAULT_USER_AGENT)\n else:\n self.user_agent = DEFAULT_USER_AGENT\n super(Session, self).__init__(user_agent=self.user_agent, **kwargs)\n\n self.profile = profile\n\n def get_endpoint(self, auth=None, interface=None, **kwargs):\n \"\"\"Override get endpoint to automate endpoint filtering\"\"\"\n\n service_type = kwargs.get('service_type')\n filt = self.profile.get_filter(service_type)\n if filt.interface is None:\n filt.interface = interface\n url = super(Session, self).get_endpoint(auth, **filt.get_filter())\n return parse_url(filt, url)\n\n @map_exceptions\n def request(self, *args, **kwargs):\n return super(Session, self).request(*args, **kwargs)\n","sub_path":"openstack/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"181356974","text":"from socket import gethostbyname\n# debug\ndebug = True\nnum_samples = None\nworkers_per_gpu = 8\nif debug:\n num_samples = 200\n workers_per_gpu = 1\n\n# Server adaptation\n#if 'X399' in gethostname():\n#\timgs_per_gpu = 2\n#\ttotal_epochs = 12\n#\tload_from = None\n#\tresume_from = None\n#\tpretrained = 'torchvision://resnet50'\n#\tdata_root = '/home/cybercore/Workspace/dataset/coco/'\n#\twork_dir = '/home/cybercore/thuync/checkpoints/retinamask_r50_newloss/'\n#\tfp16 = dict(loss_scale=512.)\n\n#elif '184' in gethostname():\nlr_start = 1e-2\nlr_end = 1e-4\nimgs_per_gpu = 16\ntotal_epochs = 12\nresume_from = None\npretrained = 'torchvision://resnet50'\ndata_root= '/home/member/Workspace/dataset/coco/'\nwork_dir = '/home/member/Workspace/thuync/checkpoints/retinamask_r50_newloss/'\nload_from = \"/home/member/Workspace/thuync/checkpoints/retinanet_r50/retinanet_r50_fpn_1x_20181125-7b0c2548.pth\"\nfp16 = dict(loss_scale=512.)\n\n#elif '185' in gethostname():\n#\tlr_start = 1e-2\n#\tlr_end = 1e-4\n#\timgs_per_gpu = 16\n#\ttotal_epochs = 12\n#\tresume_from = None\n#\tpretrained = 'torchvision://resnet50'\n#\tdata_root= '/home/member/Workspace/dataset/coco/'\n#\twork_dir = '/home/member/Workspace/thuync/checkpoints/retinamask_r50_newloss/'\n#\tload_from = \"/home/member/Workspace/thuync/checkpoints/retinanet_r50/retinanet_r50_fpn_1x_20181125-7b0c2548.pth\"\n#\tfp16 = dict(loss_scale=512.)\n\n#elif '186' in gethostname():\n#\timgs_per_gpu = 16\n#\ttotal_epochs = 12\n#\tload_from = None\n#\tresume_from = None\n#\tpretrained = 'torchvision://resnet50'\n#\tdata_root= '/home/user/thuync/datasets/coco/'\n#\twork_dir = '/home/user/thuync/checkpoints/retinamask_r50_newloss/'\n#\tfp16 = dict(loss_scale=512.)\n\n# work_dir = 'work_dirs/retinamask_r50_fpn_1x'\n# data_root= './dataset-coco/'\n# model settings\nmodel = dict(\n\ttype='RetinaMask',\n\tpretrained=pretrained,\n\tbackbone=dict(\n\t\ttype='ResNet',\n\t\tdepth=50,\n\t\tnum_stages=4,\n\t\tout_indices=(0, 1, 2, 3),\n\t\tfrozen_stages=1,\n\t\tstyle='pytorch',\n\t),\n\tneck=dict(\n\t\ttype='FPN',\n\t\tin_channels=[256, 512, 1024, 2048],\n\t\tout_channels=256,\n\t\tstart_level=1,\n\t\tadd_extra_convs=True,\n\t\tnum_outs=5,\n\t),\n\tbbox_head=dict(\n\t\ttype='RetinaHead',\n\t\tnum_classes=81,\n\t\tin_channels=256,\n\t\tstacked_convs=4,\n\t\tfeat_channels=256,\n\t\toctave_base_scale=4,\n\t\tscales_per_octave=3,\n\t\tanchor_ratios=[0.5, 1.0, 2.0],\n\t\tanchor_strides=[8, 16, 32, 64, 128],\n\t\tloss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0),\n\t\t# loss_cls=dict(type='AutoFocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.5, loss_weight=1.0),\n\t\tloss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),\n\t\t# loss_bbox=dict(type='AdaptiveRobustLoss_R', num_dims=4, loss_weight=1.0),\n\t),\n\tmask_roi_extractor=dict(\n\t\ttype='SingleRoIExtractor',\n\t\troi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),\n\t\tout_channels=256,\n\t\tfeatmap_strides=[4, 8, 16, 32],\n\t),\n\tmask_head=dict(\n\t\ttype='FCNMaskHead',\n\t\tnum_convs=4,\n\t\tin_channels=256,\n\t\tconv_out_channels=256,\n\t\tnum_classes=81,\n\t\tloss_mask=dict(type='SegmFocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0),\n\t\t# loss_mask=dict(type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),\n\t),\n)\n# training and testing settings\ntrain_cfg = dict(\n\tassigner=dict(\n\t\ttype='MaxIoUAssigner',\n\t\tpos_iou_thr=0.5,\n\t\tneg_iou_thr=0.4,\n\t\tmin_pos_iou=0,\n\t\tignore_iof_thr=-1,\n\t),\n\tallowed_border=-1,\n\tpos_weight=-1,\n\trpn_proposal=dict(\n\t\tnms_pre=1000,\n\t\tmin_bbox_size=0,\n\t\tscore_thr=0.0,\n\t\tnms=dict(type='nms', iou_thr=0.7),\n\t\tmax_per_img=1000\n\t),\n\trcnn=dict(\n\t\tassigner=dict(\n\t\t\ttype='MaxIoUAssigner',\n\t\t\tpos_iou_thr=0.5,\n\t\t\tneg_iou_thr=0.5,\n\t\t\tmin_pos_iou=0.5,\n\t\t\tignore_iof_thr=-1,\n\t\t),\n\t\tsampler=dict(\n\t\t\ttype='RandomSampler',\n\t\t\tnum=512,\n\t\t\tpos_fraction=0.25,\n\t\t\tneg_pos_ub=-1,\n\t\t\tadd_gt_as_proposals=True,\n\t\t),\n\t\tmask_size=28,\n\t\tpos_weight=-1,\n\t\tdebug=False,\n\t),\n)\ntest_cfg = dict(\n\tnms_pre=1000,\n\tmin_bbox_size=0,\n\tscore_thr=0.05,\n\tnms=dict(type='nms', iou_thr=0.5),\n\tmax_per_img=100,\n\tmask_thr_binary=0.5,\n)\n# dataset settings\ndataset_type = 'CocoDataset'\nimg_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n\tdict(type='LoadImageFromFile'),\n\tdict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n\tdict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n\tdict(type='RandomFlip', flip_ratio=0.5),\n\tdict(type='Normalize', **img_norm_cfg),\n\tdict(type='Pad', size_divisor=32),\n\tdict(type='DefaultFormatBundle'),\n\tdict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),\n]\ntest_pipeline = [\n\tdict(type='LoadImageFromFile'),\n\tdict(\n\t\ttype='MultiScaleFlipAug',\n\t\timg_scale=(1333, 800),\n\t\tflip=False,\n\t\ttransforms=[\n\t\t\tdict(type='Resize', keep_ratio=True),\n\t\t\tdict(type='RandomFlip'),\n\t\t\tdict(type='Normalize', **img_norm_cfg),\n\t\t\tdict(type='Pad', size_divisor=32),\n\t\t\tdict(type='ImageToTensor', keys=['img']),\n\t\t\tdict(type='Collect', keys=['img']),\n\t\t])\n]\ndata = dict(\n imgs_per_gpu=2,\n workers_per_gpu=workers_per_gpu,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'annotations/instances_train2017.json',\n img_prefix=data_root + 'images/train2017/',\n pipeline=train_pipeline, num_samples=num_samples),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'annotations/instances_val2017.json',\n img_prefix=data_root + 'images/val2017/',\n pipeline=test_pipeline, num_samples=num_samples),\n test=dict(\n type=dataset_type,\n ann_file=data_root + 'annotations/instances_val2017.json',\n img_prefix=data_root + 'images/val2017/',\n pipeline=test_pipeline, num_samples=num_samples))\n# optimizer\noptimizer = dict(type='SGD', lr=lr_start, momentum=0.9, weight_decay=1e-4)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n\t# policy='step', step=[8, 11],\n\tpolicy='cosine', target_lr=lr_end, by_epoch=False,\n\twarmup='linear', warmup_iters=500, warmup_ratio=1.0/3,\n)\ncheckpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n\tinterval=20,\n\thooks=[\n\t\tdict(type='TextLoggerHook'),\n\t\t# dict(type='TensorboardLoggerHook')\n\t])\n# yapf:enable\n# runtime settings\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nworkflow = [('train', 1)]\n","sub_path":"ccdetection/configs/retina_mask/retinamask_r50_fpn_1x.py","file_name":"retinamask_r50_fpn_1x.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"492781990","text":"import re\nimport sys\n\ninput = sys.stdin.readline\n\nqueue = []\n\nN = int(input())\npoint = 0\n\nfor i in range(N):\n command = input()\n if 'push' in command:\n data = re.findall(\"\\d+\", command)\n queue.append(int(data[0]))\n\n elif 'pop' in command:\n if (len(queue) - point) <= 0:\n print(\"-1\")\n else:\n print(queue[point])\n point += 1\n\n elif 'size' in command:\n print(len(queue) - point)\n\n elif 'empty' in command:\n if (len(queue) - point) <= 0:\n print(\"1\")\n else:\n print(\"0\")\n\n elif 'front' in command:\n if (len(queue) - point) <= 0:\n print(\"-1\")\n else:\n print(queue[point])\n\n elif 'back' in command:\n if (len(queue) - point) <= 0:\n print(\"-1\")\n else:\n print(queue[-1])\n\n","sub_path":"BOJ/큐, 덱/18258 - 큐 2.py","file_name":"18258 - 큐 2.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"282525806","text":"import re\nfrom random import randint\n\nclass BaseComponent:\n def evaluate(self):\n pass\n\nclass RollComponent(BaseComponent):\n \n def __init__(self):\n self.count = \"\" \n self.die = \"\"\n\n def evaluate(self):\n total = 0\n rolls = []\n countInt = int(self.count)\n dieInt = int(self.die)\n for i in range(0, countInt):\n roll = randint(1, dieInt)\n total += roll\n rolls.append(roll)\n return total, rolls\n\nclass ConstantComponent(BaseComponent):\n\n def __init__(self):\n self.count = \"\"\n\n def evaluate(self):\n countInt = int(self.count)\n return countInt, [countInt]\n\ndef parseSingle(rollString) -> \"BaseComponent\":\n rollString = rollString.lower().replace(\" \", \"\")\n if \"d\" in rollString:\n roll = RollComponent()\n else:\n roll = ConstantComponent()\n buildCount = True\n for c in rollString:\n if c == \"d\":\n buildCount = False\n continue\n if buildCount:\n roll.count += c\n else:\n roll.die += c\n return roll\n\ndef parse(rollString, nameMap): \n components = re.findall(r\"\\w+|\\+|\\-\", rollString)\n ops = []\n rolls = []\n decipheredRollString = \"\"\n for component in components:\n if component == \"+\" or component == \"-\":\n decipheredRollString += component\n else:\n decipheredRollString += nameMap(component)\n components = re.findall(r\"\\w+|\\+|\\-\", decipheredRollString)\n for component in components:\n if component == \"+\" or component == \"-\":\n ops.append(component)\n else:\n rolls.append(parseSingle(component))\n if len(rolls) != len(ops) + 1:\n print(len(rolls) + \" \" + len(ops))\n raise ValueError(\"Wrong number of args\")\n first = rolls.pop(0)\n total, individuals = first.evaluate()\n for i in range(0, len(rolls)):\n op = ops[i]\n roll = rolls[i]\n nextRoll, nextIndividuals = roll.evaluate()\n if op == \"+\":\n total += nextRoll\n individuals.extend(nextIndividuals)\n else:\n total -= nextRoll\n nextIndividuals = map(lambda r : r * -1, nextIndividuals)\n individuals.extend(nextIndividuals)\n return total, individuals \n","sub_path":"dice/logic/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"141091774","text":"from live import Live\nimport iniFile as ini\nimport sfml as sf\nfrom object import Position\nfrom walker import Walker\n\nFRIEND = 1\nENEMY = -1\nNEUTRAL = 0\n\nALARM_TIME = 10 # alarm time in seconds\n\niniReader=ini.IniFile(\"configs\\\\relations_test.ini\")\n\ndef GetRelation(group1,group2):\n\tvalue=iniReader.ReadInt(group1,group2)\n\tif(value>999):\n\t\treturn FRIEND\n\telif(value<999):\n\t\treturn ENEMY\n\telse:\n\t\treturn NEUTRAL\ndef SetRelation(group1,group2,value):\n\tif value<=-5000:\n\t\tvalue=-5000\n\telif value>=5000:\n\t\tvalue=5000 \n\tiniReader.Write(group1,group2,value)\n\t\nclass Npc(Live, Walker):\n\tdef __init__(self, objectLogicPath, object):\n\t\tLive.__init__(self,objectLogicPath, object)\n\t\tWalker.__init__(self, objectLogicPath, object)\n\t\tlogicFileName = object.GetLogicFileName()\n\t\tconfigFile=ini.IniFile(objectLogicPath)\n\t\tconfigFile.ReadString(\"npc\", \"group\")\n\t\tself.SetHealth(configFile.ReadInt(\"npc\", \"health\"))\n\t\tself._clock = sf.Clock()\n\t\tself._alarm = False\n\tdef IsAlarm(self):\n\t\tif self._alarm:\n\t\t\tif self._clock.elapsed_time.seconds < ALARM_TIME:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tself._alarm = False\n\t\treturn False\n\t_group = None\n\t_clock = None\n\t_alarm = None\n\t\n","sub_path":"Npc.py","file_name":"Npc.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"280036194","text":"import dungeon\nimport magic\n\nclass Class(object):\n def __init__(self,host):\n self.host=host\n\nclass Fighter(Class):\n desc='Thanks to unremittingly training, $$$ was skilled at all weapons.'\n def __init__(self,host):\n Class.__init__(self,host)\n i = dungeon.Populator.create_item('Flail', 'basic_weapons', 2)\n self.host.pick_up(i)\n self.host.equip(i)\n \n c = dungeon.Populator.create_item('ChainmailShirt', 'basic_armor', 2)\n self.host.pick_up(c)\n self.host.equip(c)\n\n b = dungeon.Populator.create_item('Bow','basic_weapons',0)\n self.host.pick_up(b)\n \n for _ in xrange(0,3):\n r = dungeon.Populator.create_item('Arrows','basic_weapons',0)\n self.host.pick_up(r)\n \n for _ in xrange(0,3):\n r = dungeon.Populator.create_item('Darts','basic_weapons',0)\n self.host.pick_up(r)\n \n #if hasattr(self.host.slot,'trousers'):\n # t = dungeon.Populator.create_item('Trousers', 'basic_stuff', 2)\n # self.host.pick_up(t)\n # self.host.equip(t)\n self.host.timer=0\nclass Barbarian(Class):\n desc='Since his youth, $$$ clearly loved one weapon the most: the Axe.'\n def __init__(self,host):\n Class.__init__(self,host)\n i = dungeon.Populator.create_item('Axe', 'basic_weapons', 25)\n self.host.pick_up(i)\n self.host.equip(i)\n \n if hasattr(self.host.slot,'trousers'):\n t = dungeon.Populator.create_item('Trousers', 'basic_stuff', 2)\n self.host.pick_up(t)\n self.host.equip(t)\n self.host.timer=0 \nclass Priest(Class):\n desc='Since %%% birth, $$$ stood up for Law and Order.'\n def __init__(self,host):\n Class.__init__(self,host) \n self.host.timer=0\n \nclass Sorcerer(Class):\n desc='All %%% live $$$ tried to master the Elemental-Forces '\n def __init__(self,host):\n Class.__init__(self,host)\n self.host.spells.append(magic.fire_spells.FireBall())\n self.host.timer=0\nclass Necromancer(Class):\n desc='Allured by the Power of Chaos, $$$ was a fearsome Wizrad.' \n def __init__(self,host):\n Class.__init__(self,host)\n self.host.spells.append(magic.chaos_spells.CorpseDance())\n self.host.spells.append(magic.chaos_spells.DrainLife())\n self.host.timer=0\nclasskits = (('Fighter',Fighter),\n ('Barbarian',Barbarian),\n ('Sorcerer',Sorcerer),\n ('Priest',Priest),\n ('Necromancer',Necromancer))","sub_path":"src/actor/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"67573532","text":"from lxml import etree\nfrom lxml import objectify\n\nE = objectify.E\n\nfileElem = E.file(\n E.customers(\n E.customer(\n E.phone(\n E.type('home'),\n E.number('555-555-5555')\n ),\n E.phone(\n E.type('cell'),\n E.number('999-999-9999')\n ),\n E.phone(\n E.type('home'),\n E.number('111-111-1111')\n )\n )\n )\n)\n\nprint(etree.tostring(fileElem, pretty_print=True))","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"318828029","text":"\"\"\"\nGeneral benchmark template for all registration methods.\nIt also serves for evaluating the input registration pairs\n(while no registration is performed, there is only the initial deformation)\n\nEXAMPLE (usage):\n>> mkdir ./results\n>> python benchmarks/bm_registration.py \\\n -c data_images/pairs-imgs-lnds_histol.csv -d ./data_images \\\n -o ./results --unique\n\nCopyright (C) 2016-2019 Jiri Borovec \n\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport time\nimport logging\nimport shutil\nimport multiprocessing as mproc\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nsys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\nfrom birl.utilities.data_io import (\n update_path, create_folder, image_size, load_landmarks, load_image, save_image)\nfrom birl.utilities.evaluate import compute_points_dist_statistic, compute_affine_transf_diff\nfrom birl.utilities.experiments import exec_commands, string_dict, wrap_execute_sequence\nfrom birl.utilities.visualisation import (\n export_figure, draw_image_points, draw_images_warped_landmarks)\nfrom birl.utilities.registration import estimate_affine_transform\nfrom birl.utilities.cls_experiment import Experiment\n\n#: number of available threads on this computer\nNB_THREADS = int(mproc.cpu_count())\n#: default number of threads used by benchmarks\nNB_THREADS_USED = max(1, int(NB_THREADS * .8))\n# some needed files\nNAME_CSV_REGISTRATION_PAIRS = 'registration-results.csv'\n#: default file for exporting results in table format\nNAME_CSV_RESULTS = 'results-summary.csv'\n#: default file for exporting results in formatted text format\nNAME_TXT_RESULTS = 'results-summary.txt'\n#: logging file for registration experiments\nNAME_LOG_REGISTRATION = 'registration.log'\n#: output image name in experiment folder for reg. results - image and landmarks are warped\nNAME_IMAGE_MOVE_WARP_POINTS = 'image_warped_landmarks_warped.jpg'\n#: output image name in experiment folder for reg. results - warped landmarks in reference image\nNAME_IMAGE_REF_POINTS_WARP = 'image_ref_landmarks_warped.jpg'\n#: output image name in experiment folder for showing improved alignment by used reguistration\nNAME_IMAGE_WARPED_VISUAL = 'registration_visual_landmarks.jpg'\n# columns names in cover and also registration table\n#: reference (registration target) image\nCOL_IMAGE_REF = 'Target image'\n#: moving (registration source) image\nCOL_IMAGE_MOVE = 'Source image'\n#: reference image warped to the moving frame\nCOL_IMAGE_REF_WARP = 'Warped target image'\n#: moving image warped to the reference frame\nCOL_IMAGE_MOVE_WARP = 'Warped source image'\n#: reference (registration target) landmarks\nCOL_POINTS_REF = 'Target landmarks'\n#: moving (registration source) landmarks\nCOL_POINTS_MOVE = 'Source landmarks'\n#: reference landmarks warped to the moving frame\nCOL_POINTS_REF_WARP = 'Warped target landmarks'\n#: moving landmarks warped to the reference frame\nCOL_POINTS_MOVE_WARP = 'Warped source landmarks'\n#: registration folder for each particular experiment\nCOL_REG_DIR = 'Registration folder'\n#: define robustness as improved image alignment from initial state\nCOL_ROBUSTNESS = 'Robustness'\n#: measured time of image registration in minutes\nCOL_TIME = 'Execution time [minutes]'\n#: tuple of image size\nCOL_IMAGE_SIZE = 'Image size [pixels]'\n#: image diagonal in pixels\nCOL_IMAGE_DIAGONAL = 'Image diagonal [pixels]'\n\n# list of columns in cover csv\nCOVER_COLUMNS = (COL_IMAGE_REF, COL_IMAGE_MOVE, COL_POINTS_REF, COL_POINTS_MOVE)\nCOVER_COLUMNS_EXT = tuple(list(COVER_COLUMNS) + [COL_IMAGE_SIZE, COL_IMAGE_DIAGONAL])\nCOVER_COLUMNS_WRAP = tuple(list(COVER_COLUMNS) + [COL_IMAGE_REF_WARP, COL_IMAGE_MOVE_WARP,\n COL_POINTS_REF_WARP, COL_POINTS_MOVE_WARP])\n\n\n# fixing ImportError: No module named 'copy_reg' for Python3\nif sys.version_info.major == 2:\n import types\n import copy_reg\n\n def _reduce_method(m):\n # SOLVING issue: cPickle.PicklingError:\n # Can't pickle :\n # attribute lookup __builtin__.instancemethod failed\n if m.im_self is None:\n tp = m.im_class\n else:\n tp = m.im_self\n return getattr, (tp, m.im_func.func_name)\n\n copy_reg.pickle(types.MethodType, _reduce_method)\n\n\nclass ImRegBenchmark(Experiment):\n \"\"\" General benchmark class for all registration methods.\n It also serves for evaluating the input registration pairs.\n\n :param {str: str|float} params: dictionary with experiment configuration,\n the required options are names in `REQUIRED_PARAMS`,\n note that the basic parameters are inherited\n\n The benchmark has following steps:\n 1. check all necessary pathers and required parameters\n 2. load cover file and set all paths as absolute\n 3. run individual registration experiment in sequence or in parallel\n (nb_workers > 1); if the particular experiment folder exist (assume\n completed experiment) and skip it\n a) create experiment folder and init experiment\n b) generate execution command\n c) run the command (an option to lock it in single thread)\n d) evaluate experiment, set the expected outputs and visualisation\n e) clean all extra files if any\n 4. visualise results abd evaluate registration results\n\n NOTE: The actual implementation simulates the \"IDEAL\" registration while\n it blindly copies the reference landmarks as results of the registration.\n In contrast to the right registration, it copies the moving images so there\n is alignment (consistent warping) between resulting landmarks and image.\n\n Running in single thread:\n >>> from birl.utilities.data_io import create_folder, update_path\n >>> path_out = create_folder('temp_results')\n >>> path_csv = os.path.join(update_path('data_images'), 'pairs-imgs-lnds_mix.csv')\n >>> params = {'nb_workers': 1, 'unique': False, 'visual': True,\n ... 'path_out': path_out, 'path_cover': path_csv}\n >>> benchmark = ImRegBenchmark(params)\n >>> benchmark.run()\n True\n >>> del benchmark\n >>> shutil.rmtree(path_out, ignore_errors=True)\n\n Running in multiple parallel threads:\n >>> from birl.utilities.data_io import create_folder, update_path\n >>> path_out = create_folder('temp_results')\n >>> path_csv = os.path.join(update_path('data_images'), 'pairs-imgs-lnds_mix.csv')\n >>> params = {'nb_workers': 2, 'unique': False, 'visual': True,\n ... 'path_out': path_out, 'path_cover': path_csv}\n >>> benchmark = ImRegBenchmark(params)\n >>> benchmark.run()\n True\n >>> del benchmark\n >>> shutil.rmtree(path_out, ignore_errors=True)\n \"\"\"\n REQUIRED_PARAMS = ['path_cover', 'path_out', 'nb_workers']\n\n def __init__(self, params):\n \"\"\" initialise benchmark\n\n :param dict params: {str: value}\n \"\"\"\n assert 'unique' in params, 'missing \"unique\" among %r' % params.keys()\n super(ImRegBenchmark, self).__init__(params, params['unique'])\n logging.info(self.__doc__)\n self._df_cover = None\n self._df_experiments = None\n self.nb_workers = params.get('nb_workers', NB_THREADS)\n self._path_csv_regist = os.path.join(self.params['path_exp'],\n NAME_CSV_REGISTRATION_PAIRS)\n\n def _check_required_params(self):\n \"\"\" check some extra required parameters for this benchmark \"\"\"\n logging.debug('.. check if the BM have all required parameters')\n super(ImRegBenchmark, self)._check_required_params()\n for n in self.REQUIRED_PARAMS:\n assert n in self.params, 'missing \"%s\" among %r' % (n, self.params.keys())\n\n def _update_path(self, path, destination='data'):\n \"\"\" update te path to the dataset or output\n\n :param str path: original path\n :param str destination: type of update - data | output | general\n :return str: updated path\n \"\"\"\n if destination == 'data' and 'path_dataset' in self.params:\n path = os.path.join(self.params['path_dataset'], path)\n elif destination == 'expt' and 'path_exp' in self.params:\n path = os.path.join(self.params['path_exp'], path)\n path = update_path(path, absolute=True)\n return path\n\n def _relativize_path(self, path, destination='path_exp'):\n \"\"\" extract relative path according given parameter\n\n :param str path: the original path to file/folder\n :param str destination: use path from parameters\n :return str: relative or the original path\n \"\"\"\n if path is None or not os.path.exists(path):\n logging.debug('Source path does not exists: %s', path)\n return path\n assert destination in self.params, 'Missing path in params: %s' % destination\n base_path = self.params['path_exp']\n base_dir = os.path.basename(base_path)\n path_split = path.split(os.sep)\n if base_dir not in path_split:\n logging.debug('Missing requested folder \"%s\" in source path: %s',\n base_dir, path_split)\n return path\n path_split = path_split[path_split.index(base_dir) + 1:]\n path_rltv = os.sep.join(path_split)\n if os.path.exists(os.path.join(self.params[destination], path_rltv)):\n return path_rltv\n else:\n logging.debug('Not existing relative path: %s', path)\n return path\n\n def _copy_config_to_expt(self, field_path):\n \"\"\" copy particular configuration to the experiment folder\n\n :param str field_path: field from parameters containing a path to file\n \"\"\"\n path_source = self.params.get(field_path, '')\n path_config = os.path.join(self.params['path_exp'], os.path.basename(path_source))\n if os.path.isfile(path_source):\n shutil.copy(path_source, path_config)\n self.params[field_path] = path_config\n else:\n logging.warning('Missing config: %s', path_source)\n\n def _get_paths(self, row):\n \"\"\" expand the relative paths to absolute\n\n :param row: row from cover file with relative paths\n :return (str, str, str, str): path to reference and moving image\n and reference and moving landmarks\n \"\"\"\n paths = [self._update_path(row[col], 'data') for col in COVER_COLUMNS]\n return paths\n\n def _get_path_reg_dir(self, record):\n return self._update_path(str(record[COL_REG_DIR]), 'expt')\n\n def _load_data(self):\n \"\"\" loading data, the cover file with all registration pairs \"\"\"\n logging.info('-> loading data...')\n # loading the csv cover file\n assert os.path.isfile(self.params['path_cover']), \\\n 'path to csv cover is not defined - %s' % self.params['path_cover']\n self._df_cover = pd.read_csv(self.params['path_cover'], index_col=None)\n assert all(col in self._df_cover.columns for col in COVER_COLUMNS), \\\n 'Some required columns are missing in the cover file.'\n\n def _run(self):\n \"\"\" perform complete benchmark experiment \"\"\"\n logging.info('-> perform set of experiments...')\n\n # load existing result of create new entity\n if os.path.isfile(self._path_csv_regist):\n logging.info('loading existing csv: \"%s\"', self._path_csv_regist)\n self._df_experiments = pd.read_csv(self._path_csv_regist,\n index_col=None)\n if 'ID' in self._df_experiments.columns:\n self._df_experiments.set_index('ID', inplace=True)\n else:\n self._df_experiments = pd.DataFrame()\n\n # run the experiment in parallel of single thread\n self.__execute_method(self._perform_registration, self._df_cover,\n self._path_csv_regist, 'registration experiments',\n aggr_experiments=True)\n\n def __execute_method(self, method, input_table, path_csv=None, desc='',\n aggr_experiments=False, nb_workers=None):\n \"\"\" execute a method in sequence or parallel\n\n :param func method: used method\n :param DF input_table: iterate over table\n :param str path_csv: path to the output temporal csv\n :param str desc: name of the running process\n :param bool aggr_experiments: append output to experiment DF\n :param int|None nb_workers: number of jobs, by default using class setting\n :return:\n \"\"\"\n # setting the temporal split\n self._main_thread = False\n # run the experiment in parallel of single thread\n nb_workers = self.nb_workers if nb_workers is None else nb_workers\n iter_table = ((idx, dict(row)) for idx, row, in input_table.iterrows())\n for res in wrap_execute_sequence(method, iter_table, ordered=True,\n nb_workers=nb_workers, desc=desc):\n if res is None or not aggr_experiments:\n continue\n self._df_experiments = self._df_experiments.append(res, ignore_index=True)\n self.__export_df_experiments(path_csv)\n self._main_thread = True\n\n def __export_df_experiments(self, path_csv=None):\n \"\"\" export the DataFrame with registration results\n\n :param str | None path_csv: path to output CSV file\n \"\"\"\n if path_csv is not None:\n if 'ID' in self._df_experiments.columns:\n self._df_experiments.set_index('ID').to_csv(path_csv)\n else:\n self._df_experiments.to_csv(path_csv, index=None)\n\n def __check_exist_regist(self, idx, path_dir_reg):\n \"\"\" check whether the particular experiment already exists and have results\n\n if the folder with experiment already exist and it is also part\n of the loaded finished experiments, sometimes the oder may mean\n failed experiment\n\n :param int idx: index of particular\n :param str path_dir_reg:\n :return bool:\n \"\"\"\n b_df_col = ('ID' in self._df_experiments.columns and idx in self._df_experiments['ID'])\n b_df_idx = idx in self._df_experiments.index\n check = os.path.exists(path_dir_reg) and (b_df_col or b_df_idx)\n if check:\n logging.warning('particular registration experiment already exists:'\n ' \"%r\"', idx)\n return check\n\n def _perform_registration(self, df_row):\n \"\"\" run single registration experiment with all sub-stages\n\n :param (int, dict) df_row: tow from iterated table\n \"\"\"\n idx, row = df_row\n logging.debug('-> perform single registration #%d...', idx)\n # create folder for this particular experiment\n row['ID'] = idx\n row[COL_REG_DIR] = str(idx)\n path_dir_reg = self._get_path_reg_dir(row)\n # check whether the particular experiment already exists and have result\n if self.__check_exist_regist(idx, path_dir_reg):\n return None\n create_folder(path_dir_reg)\n\n row = self._prepare_img_registration(row)\n\n # measure execution time\n time_start = time.time()\n row = self._execute_img_registration(row)\n # if the experiment failed, return back None\n if not row:\n return None\n # compute the registration time in minutes\n row[COL_TIME] = (time.time() - time_start) / 60.\n\n row = self._parse_regist_results(row)\n row = self._clear_after_registration(row)\n return row\n\n def _summarise(self):\n \"\"\" summarise complete benchmark experiment \"\"\"\n logging.info('-> summarise experiment...')\n # load _df_experiments and compute stat\n _compute_landmarks_statistic = partial(\n compute_registration_statistic,\n df_experiments=self._df_experiments,\n path_dataset=self.params.get('path_dataset', None),\n path_experiment=self.params.get('path_exp', None))\n self.__execute_method(_compute_landmarks_statistic, self._df_experiments,\n desc='compute TRE', nb_workers=1)\n # add visualisations\n _visualise_registration = partial(\n visualise_registration,\n path_dataset=self.params.get('path_dataset', None),\n path_experiment=self.params.get('path_exp', None))\n if self.params.get('visual', False):\n self.__execute_method(_visualise_registration, self._df_experiments,\n desc='visualise results')\n # export stat to csv\n if self._df_experiments.empty:\n logging.warning('no experimental results were collected')\n return\n self.__export_df_experiments(self._path_csv_regist)\n # export simple stat to txt\n export_summary_results(self._df_experiments, self.params['path_exp'], self.params)\n\n @classmethod\n def _prepare_img_registration(self, record):\n \"\"\" prepare the experiment folder if it is required,\n eq. copy some extra files\n\n :param {str: str|float} dict record: dictionary with regist. params\n :return {str: str|float}: the same or updated registration info\n \"\"\"\n logging.debug('.. no preparing before registration experiment')\n return record\n\n def _execute_img_registration(self, record):\n \"\"\" execute the image registration itself\n\n :param {} record:\n :return {}:\n \"\"\"\n logging.debug('.. execute image registration as command line')\n path_dir_reg = self._get_path_reg_dir(record)\n\n commands = self._generate_regist_command(record)\n # in case it is just one command\n if not (isinstance(commands, list) or isinstance(commands, tuple)):\n commands = [commands]\n\n path_log = os.path.join(path_dir_reg, NAME_LOG_REGISTRATION)\n # TODO, add lock to single thread, create pool with possible thread ids\n # (USE taskset [native], numactl [need install])\n if not (isinstance(commands, list) or isinstance(commands, tuple)):\n commands = [commands]\n # measure execution time\n cmd_result = exec_commands(commands, path_log)\n # if the experiment failed, return back None\n if not cmd_result:\n return None\n return record\n\n def _generate_regist_command(self, record):\n \"\"\" generate the registration command(s)\n\n :param {str: str|float} record: dictionary with registration params\n :return str|[str]: the execution commands\n \"\"\"\n logging.debug('.. simulate registration: '\n 'copy the target image and landmarks, simulate ideal case')\n path_im_ref, _, _, path_lnds_move = self._get_paths(record)\n path_reg_dir = self._get_path_reg_dir(record)\n name_img = os.path.basename(record[COL_IMAGE_MOVE])\n cmd_img = 'cp %s %s' % (path_im_ref, os.path.join(path_reg_dir, name_img))\n name_lnds = os.path.basename(record[COL_POINTS_MOVE])\n cmd_lnds = 'cp %s %s' % (path_lnds_move, os.path.join(path_reg_dir, name_lnds))\n commands = [cmd_img, cmd_lnds]\n return commands\n\n @classmethod\n def _extract_warped_image_landmarks(self, record):\n \"\"\" get registration results - warped registered images and landmarks\n\n :param record: {str: value}, dictionary with registration params\n :return (str, str, str, str): paths to img_ref_warp, img_move_warp,\n lnds_ref_warp, lnds_move_warp\n \"\"\"\n # detect image\n path_img = os.path.join(record[COL_REG_DIR],\n os.path.basename(record[COL_IMAGE_MOVE]))\n # detect landmarks\n path_lnd = os.path.join(record[COL_REG_DIR],\n os.path.basename(record[COL_POINTS_MOVE]))\n return None, path_img, path_lnd, None\n\n def _extract_execution_time(self, record):\n \"\"\" if needed update the execution time\n\n :param record: {str: value}, dictionary with registration params\n :return float|None: time in minutes\n \"\"\"\n _ = self._get_path_reg_dir(record)\n return None\n\n def _parse_regist_results(self, record):\n \"\"\" evaluate rests of the experiment and identity the registered image\n and landmarks when the process finished\n\n :param record: {str: value}, dictionary with registration params\n :return: {str: value}\n \"\"\"\n # Update the registration outputs / paths\n paths = self._extract_warped_image_landmarks(record)\n columns = (COL_IMAGE_REF_WARP, COL_IMAGE_MOVE_WARP,\n COL_POINTS_REF_WARP, COL_POINTS_MOVE_WARP)\n\n for path, col in zip(paths, columns):\n # detect image and landmarks\n path = self._relativize_path(path, 'path_exp')\n if path is not None and os.path.isfile(self._update_path(path, 'expt')):\n record[col] = path\n\n # Update the registration time\n exec_time = self._extract_execution_time(record)\n if exec_time:\n # compute the registration time in minutes\n record[COL_TIME] = exec_time\n\n return record\n\n @classmethod\n def _clear_after_registration(self, record):\n \"\"\" clean unnecessarily files after the registration\n\n :param {str: value} record: dictionary with regist. information\n :return {str: value}: the same or updated regist. info\n \"\"\"\n logging.debug('.. no cleaning after registration experiment')\n return record\n\n\ndef update_path_(path, path_base=None):\n \"\"\" update the image path with possible base path\n\n :param str path: the last path of the path\n :param str|None path_base: optional base path\n :return str: update path\n \"\"\"\n path = os.path.join(path_base, str(path)) if path_base else path\n return update_path(path, absolute=True)\n\n\ndef _image_diag(record, path_img_ref=None):\n \"\"\" get the image diagonal from several sources\n 1. diagonal exists in the table\n 2. image size exist in the table\n 3. reference image exists\n\n :param {}|DF record: one row from the table\n :param str path_img_ref: optional path to the reference image\n :return float|None: image diagonal\n \"\"\"\n img_diag = record[COL_IMAGE_DIAGONAL] if COL_IMAGE_DIAGONAL in record else None\n if not img_diag and path_img_ref and os.path.isfile(path_img_ref):\n _, img_diag = image_size(path_img_ref)\n return img_diag\n\n\ndef _load_landmarks(record, path_dataset):\n path_img_ref, _, path_lnds_ref, path_lnds_move = \\\n [update_path_(record[col], path_dataset) for col in COVER_COLUMNS]\n points_ref = load_landmarks(path_lnds_ref)\n points_move = load_landmarks(path_lnds_move)\n return points_ref, points_move, path_img_ref\n\n\ndef compute_registration_statistic(idx_row, df_experiments,\n path_dataset=None, path_experiment=None):\n \"\"\" after successful registration load initial nad estimated landmarks\n afterwords compute various statistic for init, and finalNoBmTemplatene alignment\n\n :param (int, dict) idx_row: tow from iterated table\n :param DF df_experiments: DataFrame with experiments\n :param str|None path_dataset: path to the dataset folder\n :param str|None path_experiment: path to the experiment folder\n \"\"\"\n idx, row = idx_row\n row = dict(row) # convert even series to dictionary\n points_ref, points_move, path_img_ref = _load_landmarks(row, path_dataset)\n img_diag = _image_diag(row, path_img_ref)\n df_experiments.loc[idx, COL_IMAGE_DIAGONAL] = img_diag\n\n # compute landmarks statistic\n compute_registration_accuracy(df_experiments, idx, points_ref, points_move,\n 'init', img_diag, wo_affine=False)\n\n # load transformed landmarks\n if (COL_POINTS_MOVE_WARP not in row) and (COL_POINTS_REF_WARP not in row):\n logging.error('Statistic: no output landmarks')\n return\n\n # define what is the target and init state according to the experiment results\n is_move_warp = COL_POINTS_MOVE_WARP in row and row[COL_POINTS_MOVE_WARP]\n points_init = points_move if is_move_warp else points_ref\n points_target = points_ref if is_move_warp else points_move\n col_lnds_warp = COL_POINTS_MOVE_WARP if is_move_warp else COL_POINTS_REF_WARP\n\n # load landmarks\n path_landmarks = update_path_(row[col_lnds_warp], path_experiment)\n if path_landmarks and os.path.isfile(path_landmarks):\n points_warp = load_landmarks(path_landmarks)\n points_warp = np.nan_to_num(points_warp)\n else:\n logging.warning('Invalid path to the landmarks: \"%s\" <- \"%s\"',\n path_landmarks, row[col_lnds_warp])\n return\n\n # compute Affine statistic\n affine_diff = compute_affine_transf_diff(points_init, points_target, points_warp)\n for name in affine_diff:\n df_experiments.loc[idx, name] = affine_diff[name]\n\n # compute landmarks statistic\n compute_registration_accuracy(df_experiments, idx, points_target, points_warp,\n 'elastic', img_diag, wo_affine=True)\n # compute landmarks statistic\n compute_registration_accuracy(df_experiments, idx, points_target, points_warp,\n 'final', img_diag, wo_affine=False)\n row_ = dict(df_experiments.loc[idx])\n if 'TRE Mean (final)' in row_:\n robust = row_['TRE Mean (final)'] < row_['TRE Mean (init)']\n df_experiments.loc[idx, COL_ROBUSTNESS] = int(robust)\n\n\ndef compute_registration_accuracy(df_experiments, idx, points1, points2,\n state='', img_diag=None, wo_affine=False):\n \"\"\" compute statistic on two points sets\n\n :param DF df_experiments: DataFrame with experiments\n :param int idx: index of tha particular record\n :param points1: np.array\n :param points2: np.array\n :param str state: whether it was before of after registration\n :param float img_diag: target image diagonal\n :param bool wo_affine: without affine transform, assume only local/elastic deformation\n \"\"\"\n if wo_affine and points1 is not None and points2 is not None:\n # removing the affine transform and assume only local/elastic deformation\n _, _, points1, _ = estimate_affine_transform(points1, points2)\n\n _, stat = compute_points_dist_statistic(points1, points2)\n if img_diag is not None:\n df_experiments.at[idx, COL_IMAGE_DIAGONAL] = img_diag\n # update particular idx\n for name in (n for n in stat if n not in ['overlap points']):\n if img_diag is not None:\n df_experiments.at[idx, 'rTRE %s (%s)' % (name, state)] = stat[name] / img_diag\n df_experiments.at[idx, 'TRE %s (%s)' % (name, state)] = stat[name]\n for name in ['overlap points']:\n df_experiments.at[idx, '%s (%s)' % (name, state)] = stat[name]\n\n\ndef _visual_image_move_warp_lnds_move_warp(record, path_dataset=None,\n path_experiment=None):\n \"\"\" visualise the case with warped moving image and landmarks\n to the reference frame so they are simple to overlap\n\n :param {} record: row with the experiment\n :param str|None path_dataset: path to the dataset folder\n :param str|None path_experiment: path to the experiment folder\n :return obj|None:\n \"\"\"\n assert COL_POINTS_MOVE_WARP in record and isinstance(record[COL_POINTS_MOVE_WARP], str), \\\n 'Missing registered image \"%s\"' % COL_POINTS_MOVE_WARP\n path_points_warp = update_path_(record[COL_POINTS_MOVE_WARP], path_experiment)\n if not os.path.isfile(path_points_warp):\n logging.warning('missing warped landmarks for: %r', dict(record))\n return\n\n points_ref, points_move, path_img_ref = _load_landmarks(record, path_dataset)\n\n if COL_IMAGE_MOVE_WARP not in record or not isinstance(record[COL_IMAGE_MOVE_WARP], str):\n logging.warning('Missing registered image \"%s\"', COL_IMAGE_MOVE_WARP)\n image_warp = None\n else:\n path_image_warp = update_path_(record[COL_IMAGE_MOVE_WARP], path_experiment)\n image_warp = load_image(path_image_warp)\n\n points_warp = load_landmarks(path_points_warp)\n if not list(points_warp):\n return\n # draw image with landmarks\n image = draw_image_points(image_warp, points_warp)\n save_image(os.path.join(update_path_(record[COL_REG_DIR], path_experiment),\n NAME_IMAGE_MOVE_WARP_POINTS), image)\n del image\n\n # visualise the landmarks move during registration\n image_ref = load_image(path_img_ref)\n fig = draw_images_warped_landmarks(image_ref, image_warp, points_move,\n points_ref, points_warp)\n del image_ref, image_warp\n return fig\n\n\ndef _visual_image_ref_warp_lnds_move_warp(record, path_dataset=None,\n path_experiment=None):\n \"\"\" visualise the case with warped reference landmarks to the move frame\n\n :param {} record: row with the experiment\n :param str|None path_dataset: path to the dataset folder\n :param str|None path_experiment: path to the experiment folder\n :return obj|None:\n \"\"\"\n assert COL_POINTS_REF_WARP in record and isinstance(record[COL_POINTS_REF_WARP], str), \\\n 'Missing registered image \"%s\"' % COL_POINTS_REF_WARP\n path_points_warp = update_path_(record[COL_POINTS_REF_WARP], path_experiment)\n if not os.path.isfile(path_points_warp):\n logging.warning('missing warped landmarks for: %r', dict(record))\n return\n\n points_ref, points_move, path_img_ref = _load_landmarks(record, path_dataset)\n\n points_warp = load_landmarks(path_points_warp)\n if not list(points_warp):\n return\n # draw image with landmarks\n image_move = load_image(update_path_(record[COL_IMAGE_MOVE], path_dataset))\n # image_warp = tl_io.load_image(row['Moving image, Transf.'])\n image = draw_image_points(image_move, points_warp)\n save_image(os.path.join(update_path_(record[COL_REG_DIR], path_experiment),\n NAME_IMAGE_REF_POINTS_WARP), image)\n del image\n\n # visualise the landmarks move during registration\n image_ref = load_image(path_img_ref)\n fig = draw_images_warped_landmarks(image_ref, image_move, points_ref,\n points_move, points_warp)\n del image_ref, image_move\n return fig\n\n\ndef visualise_registration(idx_row, path_dataset=None, path_experiment=None):\n \"\"\" visualise the registration results according what landmarks were\n estimated - in registration or moving frame\n\n :param (int, dict) idx_row: tow from iterated table\n :param str path_dataset: path to the dataset folder\n :param str path_experiment: path to the experiment folder\n \"\"\"\n _, row = idx_row\n row = dict(row) # convert even series to dictionary\n fig, path_fig = None, None\n # visualise particular experiment by idx\n if COL_POINTS_MOVE_WARP in row and isinstance(row[COL_IMAGE_MOVE_WARP], str):\n fig = _visual_image_move_warp_lnds_move_warp(row, path_dataset, path_experiment)\n elif COL_POINTS_REF_WARP in row and isinstance(row[COL_POINTS_REF_WARP], str):\n fig = _visual_image_ref_warp_lnds_move_warp(row, path_dataset, path_experiment)\n else:\n logging.error('Visualisation: no output image or landmarks')\n\n if fig is not None:\n path_fig = os.path.join(update_path_(row[COL_REG_DIR], path_experiment),\n NAME_IMAGE_WARPED_VISUAL)\n export_figure(path_fig, fig)\n\n return path_fig\n\n\ndef export_summary_results(df_experiments, path_out, params=None,\n name_txt=NAME_TXT_RESULTS, name_csv=NAME_CSV_RESULTS):\n \"\"\" export the summary as CSV and TXT\n\n :param DF df_experiments: DataFrame with experiments\n :param str path_out: path to the output folder\n :param {str: any} params: experiment parameters\n :param str name_csv: results file name\n :param str name_txt: results file name\n\n >>> export_summary_results(pd.DataFrame(), '')\n \"\"\"\n costume_percentiles = np.arange(0., 1., 0.05)\n if df_experiments.empty:\n logging.error('No registration results found.')\n return\n if 'ID' in df_experiments.columns:\n df_experiments.set_index('ID', inplace=True)\n df_summary = df_experiments.describe(percentiles=costume_percentiles).T\n df_summary['median'] = df_experiments.median()\n nb_missing = np.sum(df_experiments['TRE Mean (init)'].isnull())\n df_summary['missing'] = nb_missing / float(len(df_experiments))\n df_summary.sort_index(inplace=True)\n path_csv = os.path.join(path_out, name_csv)\n logging.debug('exporting CSV summary: %s', path_csv)\n df_summary.to_csv(path_csv)\n\n path_txt = os.path.join(path_out, name_txt)\n logging.debug('exporting TXT summary: %s', path_txt)\n pd.set_option('display.float_format', '{:10,.3f}'.format)\n pd.set_option('expand_frame_repr', False)\n with open(path_txt, 'w') as fp:\n if params:\n fp.write(string_dict(params, 'CONFIGURATION:'))\n fp.write('\\n' * 3 + 'RESULTS:\\n')\n fp.write('completed registration experiments: %i' % len(df_experiments))\n fp.write('\\n' * 2)\n fp.write(repr(df_summary[['mean', 'std', 'median', 'min', 'max', 'missing',\n '5%', '25%', '50%', '75%', '95%']]))\n","sub_path":"birl/cls_benchmark.py","file_name":"cls_benchmark.py","file_ext":"py","file_size_in_byte":33634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"232711182","text":"################################################################\n# Author : yiorgosynkl (find me in Github: https://github.com/yiorgosynkl)\n# Date created : 20200914\n# Problem link : https://leetcode.com/problems/house-robber/\n################################################################\n\nclass Solution:\n # def rob(self, nums: List[int]) -> int:\n # dp = [0, 0]\n # for m in nums:\n # dp.append(max(m + dp[-2], dp[-1]))\n # return dp[-1]\n \n def rob(self, nums: List[int]) -> int:\n l, r = 0, 0 # left and right from the rightmost cells of the dp array\n for m in nums:\n l, r = r, max(m + l, r)\n return r","sub_path":"30_day_challenge_2020_September/198_house_robber_day14.py","file_name":"198_house_robber_day14.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"222475665","text":"print(\"Processing Employee Hours\")\r\n\r\n# open the file for reading\r\nfile = open(\"employeeHours.txt\", \"r\")\r\n\r\nfor employee in file: # read each line and put it in a list\r\n employee = employee.strip()\r\n employee = employee.split()\r\n\r\n total = 0 # set the total of hours worked to 0\r\n # this loop runs until the number of times worked by the employee is reached\r\n for i in range(1, len(employee)): \r\n hour = float(employee[i]) # find the hours and turn it into a float\r\n total += hour # add the hours to the total\r\n\r\n print(\"total hours worked by {}: {}\".format(employee[0], total))\r\n\r\n","sub_path":"employeehours.py","file_name":"employeehours.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"256802790","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport numpy.random as npr\nfrom mujoco_py import load_model_from_path, MjSim, MjViewer\nimport os\nfrom mult_model_mppi import MultModelMPPI\nfrom mult_model_pi2 import MultModelPI2\nfrom mujoco_py.generated import const\n\n# np.random.seed(800)\n\nframe_skip = 2\nmodel_path = 'assets/franka-cabinet.xml'\nmodel = load_model_from_path(model_path)\nsim = MjSim(model)\n\nviewer = MjViewer(sim)\n\nhandle_sid = model.site_name2id('Handle')\n# drawer_hinge_did = model.jnt_dofadr[model.joint_name2id('drawer_top')]\ndrawer_hinge_did = model.jnt_dofadr[model.joint_name2id('door_left')]\n\ngrasp_sid = model.site_name2id('grasp')\n\ntarget_gripper_config = np.array([\n [0., 0., 1.],\n [1., 0., 0.],\n [0., 1., 0.]\n]).ravel()\n\ndef eulerAnglesToRotationMatrix(a, b):\n v = np.cross(a,b)\n c = np.dot(a,b)\n s = np.linalg.norm(v)\n I = np.identity(3)\n k = np.array([\n [0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]\n ])\n return I + k + np.dot(k, k) * 1/(1+c)\n\n\ndef task(data, ctrl):\n gripper_width = data.get_joint_qpos('finger1') - data.get_joint_qpos('finger2')\n handle_pos = data.site_xpos[handle_sid].ravel()\n grasp_pos = data.site_xpos[grasp_sid].ravel()\n grasp_config = data.site_xmat[grasp_sid].ravel()\n velocity_loss = np.dot(data.qvel, data.qvel)\n\n grasp_err = 800.0 * np.linalg.norm(grasp_pos - handle_pos)\n robot_ang = data.qpos[1] * data.qpos[1]\n grasp_config_err = np.linalg.norm(grasp_config - target_gripper_config)\n loss = 0. * robot_ang + 100.0 * grasp_config_err \\\n + grasp_err \\\n + 0.01 * (velocity_loss)\n if grasp_err < 0.02:\n # np.linalg.norm(data.contact[:]\n loss += 100.0 * gripper_width\n else:\n loss -= 100.0 * gripper_width\n loss += 1000.0 * handle_pos[0]\n\n return loss\n\ndef terminal_cost():\n return 0\n\ndef randomize_param(models, sim):\n\n mean_jnt_axis = models[0].jnt_axis[drawer_hinge_did].ravel()\n mean_jnt_pos = models[0].jnt_pos[drawer_hinge_did].ravel()\n handle_pos = sim.data.site_xpos[handle_sid].ravel()\n tot_models = len(models)\n for i,_model in enumerate(models):\n # _model.jnt_axis[drawer_hinge_did][:] =\n # ax = npr.normal(mean_jnt_axis, 0.1)#, size=(3,))\n if i < tot_models/2:\n ax = npr.normal(np.array([0., 0., 1.]), 0.1)#, size=(3,))\n if i >= tot_models/2:\n ax = npr.normal(np.array([1., 0., 0.]), 0.1)\n # ax = npr.uniform(-1, 1, size=(3,))\n ax /= np.linalg.norm(ax)\n _model.jnt_axis[drawer_hinge_did][:] = ax.copy()\n if np.argmax(ax) == 1 or np.argmax(ax) == 2:\n _model.jnt_type[drawer_hinge_did] = 3\n elif np.argmax(ax) == 0:\n _model.jnt_type[drawer_hinge_did] = 2\n # _model.jnt_pos[drawer_hinge_did][0] = 0.\n # _model.jnt_pos[drawer_hinge_did][1] = npr.uniform(-0.2, 0.2)\n # _model.jnt_pos[drawer_hinge_did][2] = npr.uniform(-0.05, 0.05)\n\ndef update_distribution(sims, probs):\n mean_jnt_axis = 0.0\n mean_jnt_pos = 0.0\n\n for sim, prob in zip(sims, probs):\n mean_jnt_axis += sim.model.jnt_axis[drawer_hinge_did].ravel() * prob\n mean_jnt_pos += sim.model.jnt_pos[drawer_hinge_did].ravel() * prob\n\n mean_jnt_axis /= np.linalg.norm(mean_jnt_axis)\n\n for sim in sims:\n ax = np.random.normal(mean_jnt_axis, 0.01)\n ax /= np.linalg.norm(ax)\n pos = np.random.normal(mean_jnt_pos, 0.01)\n\n sim.model.jnt_axis[drawer_hinge_did][:] = ax\n sim.model.jnt_pos[drawer_hinge_did][:] = pos\n\n if np.argmax(ax) == 1 or np.argmax(ax) == 2:\n sim.model.jnt_type[drawer_hinge_did] = 3\n elif np.argmax(ax) == 0:\n sim.model.jnt_type[drawer_hinge_did] = 2\n\n\ndef main():\n #### --- initial parameters\n num_models = 10\n num_trajectories = 4\n horizon = 40\n final_time = 400\n noise = 0.01\n lam = 0.1\n\n print('Generating the candidate models ...')\n model_pool = []\n for i in range(num_models):\n _model = load_model_from_path(model_path)\n model_pool.append(_model)\n\n print('Randomizing parameters')\n randomize_param(model_pool, sim)\n\n # emppi = MultModelPI2(model_pool, task, terminal_cost,\n # frame_skip=frame_skip,\n # horizon=horizon, num_trajectories=num_trajectories, noise=noise, lam=lam,\n # default_hidden_layers=[128])\n # #default_hidden_layers=[128,64])\n emppi = MultModelMPPI(model_pool, task, terminal_cost,\n frame_skip=frame_skip,\n horizon=horizon, num_trajectories=num_trajectories,\n noise=noise, lam=lam)\n\n while True:\n\n\n sim.reset()\n sim.data.qpos[0] = -0.3\n sim.data.qpos[1] = -1.\n sim.data.qpos[3] = -1.7\n sim.data.qpos[5] = 1.4\n sim.forward()\n\n for t in range(final_time):\n state = sim.get_state()\n ctrl = emppi(state) # TODO: rewrite MPPI with policy and offline learning\n sim.data.ctrl[:] = ctrl\n for _ in range(frame_skip):\n sim.step()\n sensor_measurements = sim.data.sensordata[:]\n emppi.update_distribution(sensor_measurements, state, ctrl)\n if 1/np.sum(np.square(emppi.model_probs)) < emppi.num_tot_trajectories/2:\n # print('resampling!!!!!', 1/np.sum(np.square(emppi.model_probs)), emppi.num_tot_trajectories)\n update_distribution(emppi.pool.sims, emppi.model_probs)\n emppi.model_probs = np.ones(emppi.num_tot_trajectories)\n emppi.model_probs /= np.sum(emppi.model_probs)\n mean_joint_pos = 0.0\n mean_joint_axis = 0.0\n _hinge_poses = []\n _hinge_axis = []\n _hinge_probs = []\n norm_prob = np.linalg.norm(emppi.model_probs)\n\n for _sim, m_prob in zip(emppi.pool.sims, emppi.model_probs):\n mean_joint_pos += _sim.model.jnt_pos[drawer_hinge_did] * m_prob\n mean_joint_axis += _sim.model.jnt_axis[drawer_hinge_did] * m_prob\n _hinge_poses.append(_sim.data.xanchor[drawer_hinge_did].ravel().copy())\n _hinge_axis.append(_sim.data.xaxis[drawer_hinge_did].ravel().copy())\n _hinge_probs.append(m_prob.copy())\n if abs(_hinge_axis[-1])[0] > 0:\n rot = np.array([\n [0., 0., -1],\n [0., 1., 0.],\n [1., 0., 0.]\n ]).flatten()\n else:\n rot = np.eye(3).flatten()\n a = _sim.model.jnt_axis[drawer_hinge_did].ravel()\n a /= np.linalg.norm(a)\n b = np.array([0.,0., 1.])\n rot = eulerAnglesToRotationMatrix(a,b)\n viewer.add_marker(pos=_sim.data.xanchor[drawer_hinge_did].flatten(),\n size=np.array([0.01,0.01,0.4]), type=const.GEOM_ARROW, label='',\n rgba=np.array([1.,1.,1.,m_prob/norm_prob]),\n mat=rot)\n\n viewer.render()\n if abs(sim.data.qpos[drawer_hinge_did]) > 1.1:\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"franka-emppi-data/Simulations/franka-cabinet/test-run.py","file_name":"test-run.py","file_ext":"py","file_size_in_byte":7332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"258201710","text":"###########################################################\n# Package: BlockChain\n# Filename: block\n# Time: Apr 25, 2019 at 9:11:15 PM\n############################################################\n\nclass Block(object):\n __doc__ = '''This class define the structure of a block'''\n\n def __init__(self, version, prevblockhash, merkelroothash, timestamp, nonce):\n self.version = version # Version Number\n self.prevBlockHash = prevblockhash # PrevBlockHash\n self.merkleRootHash = merkelroothash # Merkle Root Hash\n self.timeStamp = timestamp # Time\n self.nonce = nonce # Nonce\n self.certificateNumber = 0 # Number of certificates\n self.certificates = [] # List of certificates\n self.sign = [] # sign used in the PBFT\n\n def add_certificate(self, cert):\n if isinstance(cert, Certificate):\n raise Exception(\"Not a Certicate\")\n\n self.certicats.append(cert)\n self.certicateNumber += 1\n\n def proof_of_work(self):\n raise NotImplemented # To be Done\n\n def PBFT(self):\n raise NotImplemented # To be Done\n\nclass Certificate(object):\n\n def __init__(self, version, serial, algorithm, issuer, types,\n proxyserver, t1, t2, subject, pka, publickey, sign, timestamp, height, loh):\n self.version = version # Version Number\n self.serial = serial # Serial Number\n self.types = types # Type: Create or Revoke or Update\n self.algorithm = algorithm # Algorithm ID\n self.issuer = issuer # Issuer AKA MServer\n self.proxyServer = proxyserver # Proxy Server\n self.notBefore = t1 # Validity Not Before\n self.notAfter = t2 # Validity Not After\n self.subject = subject # subject\n\n self.publicKeyAlgorithm = pka # Public Key Algorithm\n self.subjectPublicKey = publickey # Public Key Body\n self.Signature = sign # Certificate Signature\n self.timeStamp = timestamp # Time Stamp\n self.currentHeight = height # Current Height\n self.LastOperateHeight = loh # Last Operation Height\n\n def create_certificate(self):\n raise NotImplementedError\n\n","sub_path":"BlockChain/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"261537969","text":"#!/usr/bin/python\nfrom parser import DatasetParser\nfrom os import listdir\nfrom os.path import isfile, join, walk\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn import manifold,svm\nfrom sklearn.tree import DecisionTreeClassifier\nfrom collections import OrderedDict\n\n##### MAIN VARIABLES : START #####\npath = './train'\nBACKGROUND = False\nplot_colors = \"ymcrgbwk\" # The library only allows to use 8 different colors\n###### MAIN VARIABLES : END ###### \n\n\n\n######################## READ FILES : START ############################\n\ninput_data=[]\n\nfolders = [f for f in listdir(path) if not isfile(join(path, f))]\nfor index,folder in enumerate(folders):\n\tsubFolders_users = [f for f in listdir(path+'/'+folder) if not isfile(join(path+'/'+folder, f))]\n\tfor index2,folder2 in enumerate(subFolders_users):\n\t\tfiles = [f for f in listdir(path+'/'+folder+'/'+folder2) if isfile(join(path+'/'+folder+'/'+folder2, f)) and '.txt' in f]\n\t\tfor file in files:\n\t\t\tinput_data.append([index] + DatasetParser().parseFile(path+'/'+folder+'/'+folder2+'/'+file))\n\nn_classes = len(folders)\n######################### READ FILES : END #############################\n\n\n\n\n##################### DATA MODIFICATION : START ########################\n\n# Transform the list into a numpy array\nnumpy_array = np.array(input_data)\n\n# Choose which column is the label and which are the data\ndata = numpy_array[:,1:8]\ny = numpy_array[:,0]\n\n# To apply a classifier on this data, we need to flatten the image, to\n# turn the data in a (samples, feature) matrix:\nn_samples = len(data)\n\n#Manifold Embedding\ntsne = manifold.TSNE(n_components=2, random_state=1)\nX_trans = tsne.fit_transform(data)\n\n# We only take the two corresponding features\nX = X_trans\n\n###################### DATA MODIFICATION : END #########################\n\n\n\n######################## PLOTTING PART: START ##########################\n\nif BACKGROUND == True:\n\t# Parameters\n\tplot_step = 0.01\n\t\n\t# Shuffle\n\tidx = np.arange(X.shape[0])\n\tnp.random.seed(13)\n\tnp.random.shuffle(idx)\n\n\tX = X[idx]\n\ty = y[idx]\n\n\t# Standardize\n\tmean = X.mean(axis=0)\n\tstd = X.std(axis=0)\n\tX = (X - mean) / std\n\n\t# Train\n\tclf = DecisionTreeClassifier().fit(X, y)\n\n\tx_min, x_max = round(X[:, 0].min()-0.1,1), round(X[:, 0].max()+0.1,1)\n\ty_min, y_max = round(X[:, 1].min()-0.1,1), round(X[:, 1].max()+0.1,1)\n\txx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),\n\t\t\t\t\t\t np.arange(y_min, y_max, plot_step))\n\n\tZ = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\tZ = Z.reshape(xx.shape)\n\tcs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)\n\n\tfor i in idx:\n\t\tplt.scatter(X[i, 0], X[i, 1], label=y[i], c=plot_colors[int(y[i])], cmap=plt.cm.Paired)\nelse:\t\n\t\n\tx_min = int(round(X[:, 0].min()-10,-1))\n\tx_max = int(round(X[:, 0].max()+10,-1))\n\ty_min = int(round(X[:, 1].min()-10,-1))\n\ty_max = int(round(X[:, 1].max()+10,-1))\t\n\t\n\tplt.xlim(x_min,x_max)\n\tplt.ylim(y_min,y_max)\n\t\n\tplt.xticks(range(x_min,x_max,10))\n\tplt.yticks(range(y_min,y_max,10))\n\t\n\t\n\tfor i in range(0,len(X)):\n\t\tplt.scatter(X[i, 0], X[i, 1], label=y[i], c=plot_colors[int(y[i])], cmap=plt.cm.Paired)\n\nhandles, labels = plt.gca().get_legend_handles_labels()\nby_label = OrderedDict(zip(labels, handles))\n\nplt.legend(by_label.values(), folders, scatterpoints=1,borderpad=1,bbox_to_anchor=[1.15,1.11])\n\nplt.savefig('points.png')\n\n\nplt.show()\n\nplt.close()\n\n######################### PLOTTING PART: END ###########################\n","sub_path":"plotData.py","file_name":"plotData.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"219559023","text":"import pandas as pd\r\nimport os\r\n# -*- coding:utf-8 -*-\r\n\r\ndef remove(file):\r\n data = pd.read_csv(file, encoding='ANSI', thousands=\",\") # read file\r\n data = data.dropna(how='all') # remove empty row\r\n data = data.drop_duplicates(keep=False) # remove duplicates\r\n data = data.drop(columns=[\"業務員\",\r\n \"採購單號(客戶)\",\r\n \"SO號碼\",\r\n \"項次\",\r\n \" 承約量\",\r\n \" 已交貨數量\",\r\n \" 未交貨數量\",\r\n \"淨重\",\r\n \"大小/尺寸\",\r\n \"工廠\",\r\n \" 單位成本\",\r\n \"批次\",\r\n \"項目類別\"], axis=1)\r\n\r\n data.rename(columns={\"單位.1\": 'Unit',\r\n \"客戶名稱\": \"company\",\r\n \"採購單號\": \"purchasing\",\r\n \"料號\": \"item_no\",\r\n \"物料說明\": \"spec\",\r\n \"SO預計交期\": \"ETD\",\r\n \" SO訂購數量\": \"qty\",\r\n \"單價\": \"price\",\r\n \"幣別\": \"currency\",\r\n \"單位\": 'unit',\r\n \"淨重\": 'copper(kg)'}, inplace=True) # change name\r\n\r\n # drop unuseful data\r\n data = data.drop(data[data.item_no == \"TOTAL-AMOUNT\"].index)\r\n data = data.drop(data[data.OK == \"N\"].index)\r\n\r\n # remove spec name\r\n re_name = [\"電纜\", \"銅網遮蔽\", \"移動\", \"黑\"]\r\n for b in re_name:\r\n data['spec'] = data['spec'].astype(str).str.replace(b, \"\").astype(str)\r\n\r\n # remove power cable\r\n trash = ['XLPE', 'FA', 'KNPEV', 'FPNP', 'F-V', 'F-C', 'PVC', 'FI', 'CE', 'CV', 'F-IV', 'PE', \"電纜\", \"PVC\", \"F-CV\", \"銅帶\", \"遮蔽\", \"95mm\", \"MVVS\", \"KNPEV\", \"FPNP\", 'FS-']\r\n\r\n for a in trash:\r\n data = data[data.spec.str.contains(a) == False]\r\n\r\n # change item from Prysmian to Walsin\r\n re_brand = {'TSCGEWOU': 'WS-RLIN', 'SHTOEU': 'WS-RLIN(M)', \"YSLTOE\": 'WS-SPRD', \"GRDGOEU\": 'WS-FSTN', \"0.6/1kV FSTN\": \"0.6/1kV WS-FSTN\", \"0.6/1kV WS- FSTN\": '0.6/1kV WS-FSTN',\r\n \"TSCGEWOU\": 'WS-RLIN',\r\n \"GRDGOU-J\": 'WS-FSTN',\r\n 'GRDCGOU-J': 'WS-FSTN(CH)',\r\n \"600V -2PNCT\": \"600V WS-FSTN-2PNCT\",\r\n \"600V 3PNCT\": '600V WS-3PNCT',\r\n \"600V F\": \"600V WS-FSTN\",\r\n \"600V FSTN\": \"600V WS-FSTN\",\r\n \"600V 2PNCT\": '600V WS-FSTN-2PNCT',\r\n \"0.6/1kV \\": '0.6/1kV WS-FSTN \',\r\n }\r\n for k, v in re_brand.items():\r\n data['spec'] = data['spec'].astype(str).str.replace(k, v).astype(str)\r\n\r\n # re-company with solar\r\n re_solar = {'13094959':'倉發企業', '16223034': '和泰成', '24375479': '台通電訊', '24394433': '東陽能源', '24710776': '達旺電力', '24898645': '冠旭能源', '24907187': '富宬機電',\r\n '25026153': '富陽能開發', '25027462': '久研開發', '25251242': '全盛企業社', '25428467': '大展機電', '26567326': '松泰水電', '27289146': '士能科技',\r\n '27420770': '凱銳光電', '28036177': '聚茂水電', '28453377': '華楷光電', '28551362': '銓泰環能', '28566169': '寶拉珍選', '28895107': '駿州工程',\r\n '28997300': '元晶太陽能', '29099749': '合瑞工程', '36028100': '銘懋工業', '42848416': '巨光綠能', '45090110' : '億祐實業', '45114942': '淨溢',\r\n '4739331': '新宇工程', '47784165': '旭隆科技', '52872478': '永達綠能', '53049919': '向陽能源', '53090347': '向陽優能', '53111259': '舜德機電',\r\n '53151478': '全日能源', '53308507': '豪科能源', '53308865': '普雷嘉工程', '53629070': '寰海國際', '53779491': '人德科技', '53802177': '凱煬太陽能',\r\n '53896195': '鼎承能源', '53906481': '築華科技', '54074561': '日鍊科技', '54075196': '陽光花園', '54089277': '兆庭科技', '54104896': '宥軒新能源',\r\n '54169736': \"宏菻能源\", '54355176': '原生生活', '54692629': '家紳能��', '54760273': '宗泰企業', '54760800': '亮予科技', '54799755': '擎盛光能',\r\n '54896709': '創亞國際', '54902391': '新綠能源', '54924281': '東森科技', '55873226': '群策能源', '55917322': '綠極能科技', '59729701': '元新新能源',\r\n '64598004': '地涌能源', '64884436': '新生水電', '64913657': '優立達', '66549697': '宗昇能源', '69552457': '富優工程', '75708007': '臺灣塑膠工業',\r\n '80466612': '力瑪科技', '89293866': '廣鑫企業', '89643131': '志鋼金屬', '97307792': '捷仕佳', '97614270': '廣集水電', '99558366': '宏益實業社',\r\n }\r\n for key, value in re_solar.items():\r\n data['company'] = data['company'].astype(str).str.replace(key, value).astype(str)\r\n\r\n # translate company name\r\n re_com = {'4398474': '貿聯', '4230623': '亞太國際物流', '16120895': '鴻明', '0': \"Riverwalk\", '53589491': '安康驅動'}\r\n for k, v in re_com.items():\r\n data['company'] = data['company'].astype(str).str.replace(k, v).astype(str)\r\n\r\n # transform datatype\r\n data['price'] = data['price'].astype(str).str.replace(\",\", \"\").astype(float)\r\n\r\n # transform float into int\r\n data.price = data.price.astype(int)\r\n\r\n # del the unreasonable price < 11\r\n data = data.drop(data[data.price < 11].index, axis=0)\r\n\r\n # transform unit from KM to M\r\n data['unit price'] = (data['price'] / 1000).where(data['unit'] == 'KM', data['price'])\r\n\r\n # set data index name\r\n data.set_index('company', inplace=True)\r\n\r\n # sort values\r\n data.sort_values(by=['spec'], ascending=True)\r\n\r\n # save file to new name\r\n data.to_csv('D:/Users/ur07040/PycharmProjects/walsin/clean/clean data/new {}.csv'.format(os.path.split(file)[1]), encoding='ANSI')","sub_path":"clean/sap_clean/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"363129183","text":"#Code will change the pixels of an image to...#\n#recreate a similar image from Obama's 2008...#\n#presidential campaign#\n\nfrom Myro import *\nObamaDarkBlue = makeColor(0,51,76)\nObamaRed = makeColor(217, 26, 33)\nObamaBlue = makeColor(112,150,158)\nObamaYellow = makeColor(252, 227, 166)\npic=makePicture(pickAFile())\nfor pixel in getPixels(pic):\n gray=getGray(pixel)\n if gray>180:\n setColor(pixel,ObamaYellow)\n elif gray>120:\n setColor(pixel,ObamaBlue)\n elif gray>60:\n setColor(pixel,ObamaRed)\n else:\n setColor(pixel,ObamaDarkBlue)\nshow(pic)","sub_path":"Calico/All Star Code/Obamafication/Obamafication.py","file_name":"Obamafication.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"91217151","text":"\"\"\"\n cutitout: automatically cut silence from videos\n Copyright (C) 2020 Wolf Clement\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see .\n\"\"\"\n\n# Margin before and after a clip (in seconds)\n#clip_margin = 0.4\nclip_margin = 0.2\nassert clip_margin >= 0.0\n\n# How loud should noise be to be considered a sound?\naudio_treshold = 0.02\nassert audio_treshold > 0.0 and audio_treshold <= 1.0\n\n# Minimum clip length (in seconds)\n# Sounds shorter than that will be considered noise and cut.\nmin_clip_length = 0.2\nassert min_clip_length > 0.0\n\n# Minimum silence length to skip (in seconds)\nmin_skip_length = 5.0\nassert min_skip_length > 2 * clip_margin\n\n\nimport audioop\nimport subprocess\nimport sys\n\n\ndef get_audio_streams(filename):\n streams = []\n probe = subprocess.check_output(\n [\"ffprobe\", \"-show_streams\", filename],\n encoding=\"utf-8\",\n stderr=subprocess.DEVNULL,\n )\n\n for line in probe.split(\"\\n\"):\n if line == \"[STREAM]\":\n streams.append({})\n\n try:\n key, value = line.split(\"=\")\n streams[-1][key] = value\n except ValueError:\n pass\n\n return list(filter(lambda s: s[\"codec_type\"] == \"audio\", streams))\n\n\ndef print_skips(stream, sample_rate):\n clips = []\n clip_index = 0\n loud_start = -1\n\n # Get 10ms long audio fragments (* 2 because we get 2 bytes)\n fragment_length = int(sample_rate * 0.01 * 2)\n\n chunk_data = orig_audio.stdout.read(fragment_length)\n while chunk_data:\n # With *signed* 16 bit audio, the maximal absolute value is 2^15 = 32768.\n volume = audioop.max(chunk_data, 2) / 32768\n\n if loud_start == -1 and volume >= audio_treshold:\n loud_start = clip_index\n elif loud_start != -1 and volume < audio_treshold:\n # Remove sounds that are too short to be important\n if clip_index - loud_start > min_clip_length * 100:\n clips.append((loud_start, clip_index))\n loud_start = -1\n\n chunk_data = orig_audio.stdout.read(fragment_length)\n clip_index += 1\n\n # Turn clips into skips\n skips = []\n last_skip = 0.0\n index_to_time = lambda index: index / 100\n for clip in clips:\n clip_start = index_to_time(clip[0])\n clip_end = index_to_time(clip[1])\n\n if clip_start - last_skip < min_skip_length:\n last_skip = clip_end + clip_margin\n else:\n skips.append((last_skip + clip_margin, clip_start - clip_margin))\n last_skip = clip_end + clip_margin\n\n skips = [\"{\" + f\"{v[0]},{v[1]}\" + \"}\" for v in skips]\n print(\"return {\" + \",\".join(skips) + \"}\")\n\n\nfor filename in sys.argv[1:]:\n for stream in get_audio_streams(filename):\n index = int(stream[\"index\"])\n sample_rate = int(stream[\"sample_rate\"])\n\n orig_audio = subprocess.Popen(\n [\n \"ffmpeg\",\n \"-i\",\n filename,\n # Output only one channel\n \"-ac\",\n \"1\",\n # Output raw 16bit samples for fast processing\n \"-f\",\n \"s16le\",\n # Open specific audio stream\n \"-map\",\n f\"0:{index}\",\n # Only use one core to avoid making mpv lag\n \"-threads\",\n \"1\",\n # Pipe to orig_audio\n \"pipe:1\",\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL,\n )\n\n print_skips(orig_audio.stdout, sample_rate)\n\n # We're only using the first audio stream\n break\n","sub_path":"shared/stow_user/mpv/.config/mpv/scripts/cutitout_shared/cutitout.py","file_name":"cutitout.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"418346010","text":"def fib1 (n):\n if n == 0 or n == 1:\n return 1\n else:\n return fib1(n-2) + fib1(n-1)\n\nresult_fib1 = []\nfor i in range(10):\n result_fib1.append(fib1(i))\nprint('fib1 result:', result_fib1)\n\ndef fib2(n, a = 0, b = 1):\n if n == 0:\n return b\n else:\n return fib2(n-1, b, a+b)\n\nresult_fib2 = []\nfor i in range(10):\n result_fib2.append(fib2(i))\nprint('fib2 result:', result_fib2)\n","sub_path":"171763-2016-172ISSS610G1/linalg_4.py","file_name":"linalg_4.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"317797329","text":"\"\"\"\nIdempotent API for managing python packages\n\"\"\"\nfrom fabtools.python import *\nfrom fabtools.python_distribute import is_distribute_installed, install_distribute\nfrom fabtools.icanhaz import deb\n\n\ndef distribute():\n \"\"\"\n I can haz distribute\n \"\"\"\n deb.package('curl')\n if not is_distribute_installed():\n install_distribute()\n\n\ndef pip(version=None):\n \"\"\"\n I can haz pip\n \"\"\"\n distribute()\n if not is_pip_installed(version):\n install_pip()\n\n\ndef package(pkg_name, virtualenv=None, use_sudo=False):\n \"\"\"\n I can haz python package\n \"\"\"\n pip(\"1.0.2\")\n if not is_installed(pkg_name):\n install(pkg_name, virtualenv=virtualenv, use_sudo=use_sudo)\n\n\ndef packages(pkg_list, virtualenv=None, use_sudo=False):\n \"\"\"\n I can haz python packages\n \"\"\"\n pip(\"1.0.2\")\n pkg_list = [pkg for pkg in pkg_list if not is_installed(pkg)]\n if pkg_list:\n install(pkg_list, virtualenv=virtualenv, use_sudo=use_sudo)\n","sub_path":"fabtools/icanhaz/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"110809829","text":"from utils import *\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Reshape, Conv2D, MaxPooling2D, Dropout\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils import plot_model\n\nimport matplotlib.pyplot as plt\n#user defined variables\nIMG_SIZE = 32\nBATCH_SIZE = 16\nDATASET_DIR = '/Users/damaro/Desktop/M.Computer Vision/M3.Machine learning/Practices/Databases/MIT_split'\nMODEL_FNAME = 'TestingNetwork.h5'\n\nEPOCHS = 150\ninput_shape = (IMG_SIZE, IMG_SIZE, 3)\n\nif not os.path.exists(DATASET_DIR):\n colorprint(Color.RED, 'ERROR: dataset directory '+DATASET_DIR+' do not exists!\\n')\n quit()\n\n\ncolorprint(Color.BLUE, 'Building MLP model...\\n')\n\n#Build the Multi Layer Perceptron model\nmodel = Sequential()\nmodel.add(Conv2D(32, (3,3), padding='same', activation='relu', input_shape=input_shape))\nmodel.add(Conv2D(32, (3,3), activation='relu'))\nmodel.add(MaxPooling2D( pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, (3,3), padding='same', activation='relu'))\nmodel.add(Conv2D(64, (3,3), activation='relu'))\nmodel.add(MaxPooling2D( pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, (3, 3), padding='same', activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(8, activation='softmax'))\n\n\n#model.add(Reshape((IMG_SIZE*IMG_SIZE*3,),input_shape=(IMG_SIZE, IMG_SIZE, 3),name='first'))\n#model.add(Dense(units=2048, activation='relu',name='second'))\n#model.add(Dense(units=1024, activation='relu'))\n#model.add(Dense(units=8, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\nprint(model.summary())\nplot_model(model, to_file='modelTestingNetwork.png', show_shapes=True, show_layer_names=True)\ncolorprint(Color.BLUE, 'Done!\\n')\nif os.path.exists(MODEL_FNAME):\n colorprint(Color.YELLOW, 'WARNING: model file '+MODEL_FNAME+' exists and will be overwritten!\\n')\ncolorprint(Color.BLUE, 'Start training...\\n')\n\n# this is the dataset configuration we will use for training\n# only rescaling\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n width_shift_range=0.1,\n height_shift_range=0.1,\n vertical_flip=False,\n horizontal_flip=True)\n\n# this is the dataset configuration we will use for testing:\n# only rescaling\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\n# this is a generator that will read pictures found in\n# subfolers of 'data/train', and indefinitely generate\n# batches of augmented image data\ntrain_generator = train_datagen.flow_from_directory(\n DATASET_DIR+'/train', # this is the target directory\n target_size=(IMG_SIZE, IMG_SIZE), # all images will be resized to IMG_SIZExIMG_SIZE\n batch_size=BATCH_SIZE,\n classes = ['coast','forest','highway','inside_city','mountain','Opencountry','street','tallbuilding'],\n class_mode='categorical') # since we use binary_crossentropy loss, we need categorical labels\n\n# this is a similar generator, for validation data\nvalidation_generator = test_datagen.flow_from_directory(\n DATASET_DIR+'/test',\n target_size=(IMG_SIZE, IMG_SIZE),\n batch_size=BATCH_SIZE,\n classes = ['coast','forest','highway','inside_city','mountain','Opencountry','street','tallbuilding'],\n class_mode='categorical')\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch=1881 // BATCH_SIZE, #should be 1881 full dataset - 120 small dataset\n epochs=EPOCHS,\n validation_data=validation_generator,\n validation_steps=807 // BATCH_SIZE) #should be 807 to full dataset - 120 small dataset\n\ncolorprint(Color.BLUE, 'Done!\\n')\ncolorprint(Color.BLUE, 'Saving the model into '+MODEL_FNAME+' \\n')\nmodel.save_weights(MODEL_FNAME) # always save your weights after training or during training\ncolorprint(Color.BLUE, 'Done!\\n')\n\n # summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.savefig('16BATCH_SIZEaccuracyTesting.jpg')\nplt.close()\n # summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.savefig('16BATCH_SIZElossTesting.jpg')\n\n#Evaluate the model\nresult = model.evaluate_generator(validation_generator)\nprint('Test accuracy: ' + str(result[1]*100) + '%')\nprint('Test loss: ' + str(result[0]))","sub_path":"code_M3_DL_1/testingCNN.py","file_name":"testingCNN.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"229920636","text":"import xarray as xr\nimport numpy as np\n\nfrom xrspatial.classify import quantile\n\n\ndef test_quantile():\n k = 5\n n, m = 5, 5\n agg = xr.DataArray(np.arange(n*m).reshape((n, m)), dims=['x', 'y'])\n agg['x'] = np.linspace(0, n, n)\n agg['y'] = np.linspace(0, m, m)\n\n quantile_agg = quantile(agg, k=5)\n assert quantile_agg is not None\n\n print(quantile_agg)\n print(quantile_agg.mean())\n\n unique_elements, counts_elements = np.unique(quantile_agg.data,\n return_counts=True)\n assert len(unique_elements) == k\n assert len(np.unique(counts_elements)) == 1\n","sub_path":"xrspatial/tests/test_classify.py","file_name":"test_classify.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"350561598","text":"#! /usr/bin/env python3\n# coding: utf-8\nfrom __future__ import print_function\n#from bluepy import btle\nimport myo_dicts\nimport struct\nimport socket\nimport json\nimport time\nimport math\nimport pprint\nimport logging as log\nimport subprocess\nimport sys\nimport os\nimport argparse\nfrom csv_writer import write_to_csv, add_cols\n\n\n\nPATH = os.getcwd()\n\nbusylog = False #decides whether emg/imu notifications will generate log messages.\nlog.basicConfig(filename=PATH+\"/dongleless.log\", filemode = 'w', level = log.CRITICAL, #change log.CRITICAL to log.DEBUG to get log messages\n\t\t\t\tformat='%(asctime)s %(levelname)-8s %(message)s', datefmt='%H:%M:%S')\n\n'''\nConnection class inherits from Peripheral class and takes the MAC address as an\ninitial argument. It creates a Pheripheral object and when passed a MAC address the\nconstructor establishes a connection to the device indicated by the MAC address.\n\nand calls the writeCharacteristic(handle, val, withResponse=False)\nmethod 4 times which makes it subscribe to each set of notifications\nThis method writes the data 'val' of type byte in python3 to the characteristic identified by\nthe handle 0x##\nIf withResponse is true, will await confirmation that the write was successful from the device.\n'''\n\nclass Connection(btle.Peripheral):\n\tdef __init__(self, mac):\n\t\tbtle.Peripheral.__init__(self, mac)\n # writeCharacteristic(handle, val, withResponse=False): writes the data val to the characteristic identified by handle\n # This is useful if we know the charasteristic's GATT handle, but do not have a characteristic object\n\n\t\t# self.writeCharacteristic(0x19, struct.pack(' \"+PATH+\"/scan_results.txt\", shell=True).wait()\n\twith open(PATH+\"/scan_results.txt\") as res:\n\t\tlines = list(res)\n\tlis = []\n\tfor line in lines:\n\t\tprint('line:', line)\n\t\tsp = line.split(' ')\n\t\tif sp[-1] == 'Myo\\n':\n\t\t\tlis.append(sp[0])\n\t\t\tprint('found MYOO')\n\t\t\treturn lis\n\treturn lis\n\n'''\nMain loop:\n1) Scanning for Myo\n2) Connecting to Myo and logging info when unable to connect\n3) Delegating the established connection to the MyoDelegate() object instance, along with the\ndictionary of functions\n4) Wait for notifications from the Myo device with a 3s timeout, if\na notification is received the delegate object's handleNotification() method will be called\nand waitForNotifications returns true.\nwaitForNotifications(timeout)\n\nBlocks until a notification is received from the peripheral, or until the given timeout (in seconds)\nhas elapsed. If a notification is received, the delegate object’s handleNotification() method will be called,\nand waitForNotifications() will then return True.\nIf nothing is received before the timeout elapses, this will return False.\n'''\n\ndef run(modes, args):\n# Takes one argument, a dictionary of names of events to functions to be called when they occur.\n\t# Main loop --------\n\twhile True:\n\t\tblacklist = []\n\t\ttry:\n\t\t\tlog.info(\"Initializing bluepy connection.\")\n\t\t\tp=None\n\t\t\twhile not p:\n\t\t\t\tprint('WTF')\n\t\t\t\tx=find_myo_mac(blacklist)\n\t\t\t\tprint('x:', x)\n\t\t\t\tfor mac in x:\n\t\t\t\t\tprint('mac:', mac)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tp = Connection( mac ) # Takes a long time if it's not a myo\n\t\t\t\t\t\tif p:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\texcept btle.BTLEException:\n\t\t\t\t\t\tlog.info(\"Found something that is not a Myo, adding to blacklist and trying again.\")\n\t\t\t\t\t\tlog.debug(\"could not write to %s, ignored\" % mac)\n\t\t\t\t\t\tdel p\n\t\t\t\t\t\tp=None\n\t\t\t\t\t\tblacklist.append(mac)\n\t\t\t\t\t\ttime.sleep(0.5)\n\t\t\t\t\telse:\n\t\t\t\t\t\tlog.info(\"Found Myo at MAC: %s\" % mac)\n\t\t\tp.setDelegate( MyoDelegate(modes, p, args))\n\n\t\t\tlog.info(\"Initialization complete.\")\n\t\t\twhile True:\n\t\t\t\t# break\n\t\t\t\ttry:\n\t\t\t\t\tp.waitForNotifications(3)\n\t\t\t\texcept btle.BTLEException:\n\t\t\t\t\tlog.info(\"Disconnected\")\n\t\t\t\t\tbreak\n\t\texcept KeyboardInterrupt:\n\t\t\tlog.warning(\"KeyboardInterrupt\")\n\t\t\tbreak\n\t\t# except:\n\t\t# log.critical(\"Unexpected error:\", sys.exc_info()[0])\n\tlog.warning(\"Program stopped\")\n\ndef imu_data(myo, quat, accel, gyro):\n #print(\"imu_data:\", quat)\n return\n\ndef emg_data(myo, emg, times=[]):\n\n print(\"emg_data:\", emg)\n\nfunction_dict = {\n\"imu_data\":imu_data,\n\"emg_data\":emg_data,\n\"write_to_csv\":write_to_csv\n#\"prediction\"\n}\n\nif __name__==\"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-name\", type=str, help='Enter name of person')\n\tparser.add_argument(\"-nbr\", type=int, help=\"Enter the number of dataset for that person\")\n\targs = parser.parse_args()\n\n\trun(function_dict, args)\n\tadd_cols('emg_data_{}_{}.csv'.format(args.name, args.nbr))\n","sub_path":"data_acquisition/myo_raw/myo.py","file_name":"myo.py","file_ext":"py","file_size_in_byte":7819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"250310632","text":"#!/usr/bin/python\n\nimport socket\nimport struct\nimport sys\n\nlocal_port = 5006\n\n# Request status\nsend_bytes = b\"LIST\"\n\nif len(sys.argv) == 1:\n\tfamily = socket.AF_INET\n\tconnect_tuple = ( 'localhost', local_port )\nelse:\n\tdetails = socket.getaddrinfo( sys.argv[1], local_port, socket.AF_UNSPEC, socket.SOCK_DGRAM)\n\tfamily = details[0][0]\n\tif family == socket.AF_INET6:\n\t\tconnect_tuple = ( sys.argv[1], local_port, 0, 0)\n\telse:\n\t\tconnect_tuple = ( sys.argv[1], local_port)\n\ns = socket.socket( family, socket.SOCK_DGRAM )\ns.setblocking(0)\ns.connect( connect_tuple )\ns.sendall( send_bytes )\n\ndata = ''\nwhile True:\n\ttry:\n\t\tdata,addr = s.recvfrom(8192)\n\texcept:\n\t\tpass\n\tif data:\n\t\tbreak\n\nprint(data)\ns.close()\n","sub_path":"python/send_list.py","file_name":"send_list.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"301756471","text":"from StockGetFunctions import get_attr_history\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# ====== set global variables ======\r\nstart = \"2012-01-01\" # start date in \"YYYY-MM-DD\"\r\nend = \"2017-01-01\" # end date in \"YYYY-MM-DD\"\r\ninterval = \"1d\" # sampling period for stock prices (see yfinance intervals)\r\nticker_list = [\"VOO\", \"^FTSE\", \"VWO\"] # choose tickers to trade\r\nSNP = get_attr_history(\"VOO\", \"Close\", start, end, interval) # get S&P500 series for benchmarking\r\nma_long_interval = 10 # interval to calculate long-term moving average\r\nma_short_interval = 5 # interval to calculate short-term moving average\r\n\r\nseries_list = {} # initialise dictionary of ticker price series\r\nma_list = {}\r\ncash = 10000.0\r\ntrade_factor = 0.05 # factor to modify trade amount\r\n# initialise lists to store history of portfolio data\r\ncash_history = []\r\nholdings_history = []\r\nportfolio_history = []\r\n# ==================================\r\n\r\n\r\n# function to get ma over an entire time series for a given series\r\ndef ma_series(series, ma_period):\r\n ma = np.zeros(len(series))\r\n for i in range(len(series)):\r\n if i > ma_period:\r\n ma[i] = series[i - ma_period : i].mean()\r\n\r\n return ma\r\n\r\n\r\n# populate series and MA lists\r\nfor ticker in ticker_list:\r\n series_list[ticker] = get_attr_history(ticker, \"Close\", start, end, interval).to_numpy()\r\n ma_list[ticker] = [ma_series(series_list[ticker], ma_long_interval),\r\n ma_series(series_list[ticker], ma_short_interval)]\r\n\r\n\r\n# class to define Stock objects for keeping price and moving averages, etc.\r\nclass Stock:\r\n\r\n # initialise stock info\r\n def __init__(self, ticker):\r\n self.ticker = ticker\r\n self.price = 0.0\r\n self.ma_long = 0.0\r\n self.ma_short = 0.0\r\n self.undervalued = False\r\n self.ma_deviation = 0.0\r\n self.position = 0.0\r\n self.trade_amt = 0.0\r\n self.price_yesterday = 0.0\r\n\r\n # update stock ticker info on a given day\r\n def get_info(self):\r\n # update price and position\r\n self.price = prices_today[self.ticker]\r\n if self.price_yesterday == 0.0:\r\n pass\r\n else:\r\n self.position *= self.price/self.price_yesterday # change position according to price change\r\n self.price_yesterday = self.price # move \"yesterday's\" price one day forward\r\n # update MA values\r\n self.ma_long = ma_today[self.ticker][0]\r\n self.ma_short = ma_today[self.ticker][1]\r\n # check if stock is undervalued or not\r\n if self.ma_long > self.ma_short:\r\n self.undervalued = True\r\n else:\r\n self.undervalued = False\r\n # get deviation of short ma from long ma (based on square distance)\r\n if self.ma_long == 0:\r\n self.ma_deviation = 0.0\r\n else:\r\n self.ma_deviation = abs(self.ma_short - self.ma_long) / self.ma_long\r\n # define an amount to trade based on the ma deviation\r\n self.trade_amt = self.position * self.ma_deviation\r\n\r\n def trade(self):\r\n if self.undervalued: # if stock is undervalued, buy more\r\n self.position += self.trade_amt\r\n elif not self.undervalued: # else, sell\r\n # sell amt of stock, or whole position if amt > position\r\n self.position = max(self.position - self.trade_amt, 0.0)\r\n\r\n\r\n# function to buy and sell stocks on a given day\r\ndef trade(stocks_info, cash):\r\n total_dev = 0.0 # variable to store sum of stock deviations for undervalued stocks\r\n #print(\"===New trade===\")\r\n # deal with overvalued stocks (selling) first\r\n for ticker in stocks_info: # loop through all stocks\r\n Stock = stocks_info[ticker]\r\n\r\n if not Stock.undervalued: # if stock is overvalued, sell amt (or whole position if amt > position)\r\n #print(f\"==Sell overval stock: {ticker}\")\r\n #print(f\"Cash before trade: {cash}\\nPosition before trade:{Stock.position}\\nTrade amt: {Stock.trade_amt}\")\r\n Stock.trade()\r\n cash += min(Stock.position, Stock.trade_amt)\r\n #print(f\"Cash after trade: {cash}\\nPosition after trade: {Stock.position}\")\r\n elif Stock.undervalued: # count deviation for undervalued stocks\r\n total_dev += Stock.ma_deviation\r\n\r\n buy_amt = cash * trade_factor # set aside cash to buy\r\n\r\n # deal with buying undervalued stocks\r\n for ticker in ticker_list: # loop through all stocks (again)\r\n Stock = stocks_info[ticker]\r\n\r\n if Stock.undervalued: # for undervalued stocks, buy proportionally to MA deviation\r\n #print(\"==Buy underval stock:\", Stock.ticker)\r\n Stock.trade_amt = buy_amt * Stock.ma_deviation / total_dev # set proportional trade amt\r\n #print(f\"Cash before trade: {cash}\\nPosition before trade:{Stock.position}\\nMA deviation: {Stock.ma_deviation}\\nTrade amt: {Stock.trade_amt}\")\r\n Stock.trade() # buy amt of stock\r\n cash -= Stock.trade_amt # reflect purchase in cash reserves\r\n #print(f\"Cash after trade: {cash}\\nPosition after trade: {Stock.position}\")\r\n\r\n return cash\r\n\r\n\r\n\r\n# initialise stock objects from ticker list\r\nstocks_info = {}\r\nfor ticker in ticker_list:\r\n stocks_info[ticker] = Stock(ticker)\r\n\r\n\r\n# simulate real-time prices\r\nfor i in range(len(SNP)):\r\n #print(f\"++++ Day {i} ++++\")\r\n if i < ma_long_interval: # don't trade until MA can be calculated\r\n pass\r\n else:\r\n\r\n # get today's price for each stock in ticker list\r\n prices_today = {} # init prices list dictionary\r\n ma_today = {} # init moving average list dictionary\r\n for ticker in ticker_list:\r\n while True:\r\n try:\r\n prices_today[ticker] = series_list[ticker][i] # for each ticker, set the prices list element to today's price\r\n ma_today[ticker] = [ma_list[ticker][0][i], ma_list[ticker][1][i]] # likewise for moving averages\r\n stocks_info[ticker].get_info() # get stock info for today\r\n break\r\n except IndexError:\r\n print(f\"{ticker} has the wrong number of periods\")\r\n break\r\n # perform trading algorithm (sell overvalued stocks then use portion of cash to buy undervalued stocks)\r\n cash = trade(stocks_info, cash)\r\n\r\n # sum stock positions to get total holdings for the day\r\n holdings_total = 0.0\r\n for ticker in ticker_list:\r\n Stock = stocks_info[ticker]\r\n holdings_total += Stock.position\r\n\r\n # append histories\r\n cash_history.append(cash)\r\n holdings_history.append(holdings_total)\r\n portfolio_history.append(cash + holdings_total)\r\n\r\n# === plot results with matplotlib.pyplot ===\r\n# replace all zero MAs with NaN for prettier graphs\r\nfor ticker in ticker_list:\r\n for i in range(ma_long_interval+1):\r\n ma_list[ticker][0][i] = \"NaN\"\r\n ma_list[ticker][1][i] = \"NaN\"\r\n\r\n# plot stock data\r\nfig, a = plt.subplots(len(ticker_list))\r\ni = 0\r\nfor ticker in ticker_list:\r\n a[i].plot(series_list[ticker], \"k\")\r\n a[i].plot(ma_list[ticker][0], \"b\")\r\n a[i].plot(ma_list[ticker][1], \"r\")\r\n a[i].set_title(ticker)\r\n i += 1\r\n\r\n# print results summary\r\nprint(f\"Trading results from {start} to {end}:\\n_________________________________\")\r\nportfolio_returns = round(portfolio_history[-1] / portfolio_history[0] * 100, 2)\r\nprint(f\"Portfolio returns: {portfolio_returns}%\\n_________________________________\")\r\nmarket_returns_sum = 0.0\r\nfor ticker in ticker_list:\r\n market_returns = round(series_list[ticker][-1] / series_list[ticker][0] * 100, 2)\r\n print(f\"Market returns for {ticker}: {market_returns}%\")\r\n market_returns_sum += market_returns\r\nprint(f\"_________________________________\\nAverage market return: {round(market_returns_sum/len(ticker_list), 2)}%\")\r\nprint(f\"S&P500 returns: {round(SNP[-1] / SNP[0] * 100, 2)}%\")\r\n\r\n# plot portfolio data\r\nplt.figure()\r\nplt.plot(np.arange(0, len(SNP)), portfolio_history, 'k', label=\"Total portfolio\")\r\nplt.title(\"Portfolio Values\")\r\nplt.plot(np.arange(0, len(SNP)), cash_history, 'b', label=\"Cash\")\r\nplt.plot(np.arange(0, len(SNP)), holdings_history, 'r', label=\"Equity\")\r\n\r\nplt.legend()\r\nplt.show()\r\n","sub_path":"MultiMeanReversion.py","file_name":"MultiMeanReversion.py","file_ext":"py","file_size_in_byte":8373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"601767540","text":"\"\"\"\n@Project :线程\n@Time :2018/8/27 21:26\n@Author :Zhenxian\n@File :数据共享问题.py\n@Software :PyCharm\n\"\"\"\nfrom threading import Thread\nfrom multiprocessing import Process\nimport os\nimport time\n\n\ndef func():\n global n\n n = 0\n print(n)\n\n\nif __name__ == '__main__':\n n = 100\n p = Process(target=func)\n p.start()\n p.join()\n print(\"主进程:\", n) # 100\n print(\"-------LINE-------\")\n t = Thread(target=func)\n t.start()\n t.join()\n print(\"主线程:\", n)\n","sub_path":"线程/数据共享问题.py","file_name":"数据共享问题.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"216193899","text":"import threading\n\n\nclass ListMonitor:\n\n def __init__(self):\n \"\"\"\n Constructor, set up list item monitor is wrapping\n :return:\n \"\"\"\n self.listSize = 100000\n self.listInt = []\n self.event = threading.Event()\n self.available = True\n for i in range(0, self.listSize):\n self.listInt.append(i)\n\n def get(self, i):\n return self.listInt[i]\n\n\n def set(self, i, j):\n # Block if not availble\n while not self.available:\n self.event.wait()\n # We have been released from block\n self.available = False\n # Block other thread's access\n self.event.clear()\n self.listInt[i] = j\n self.available = True\n # Release access\n self.event.set()\n\n def size(self):\n return self.listSize","sub_path":"ConcurrentFun/ListMonitor.py","file_name":"ListMonitor.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"545876965","text":"import urllib.request\nimport re\nimport os\nimport urllib\nimport ssl\n\n\ndef get_html(url):\n # urlopen打开https链接时,会验证一次SSL证书。这里取消证书的验证。\n ssl._create_default_https_context = ssl._create_unverified_context\n page = urllib.request.urlopen(url)\n html_a = page.read()\n return html_a.decode('utf-8')\n\n\ndef get_img(html):\n reg = r'https://[^\\s]*?\\.jpg'\n image_re = re.compile(reg)\n image_list = image_re.findall(html)\n x = 1\n path = '/Users/baoyongshuai/Documents/pypath'\n if not os.path.isdir(path):\n os.makedirs(path)\n paths = path + '/'\n for image_url in image_list:\n image_name = '{0}{1}.jpg'.format(paths, x)\n urllib.request.urlretrieve(image_url, image_name)\n x = x + 1\n print('图片开始下载,注意查看文件夹,图片名称:' + image_name)\n return image_url\n\n\nhtml_b = get_html('http://tieba.baidu.com/p/6055320747')\nget_img(html_b)\n","sub_path":"crawler/get_images.py","file_name":"get_images.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"405571672","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/hexdutils/intohex.py\n# Compiled at: 2019-08-02 01:46:08\n# Size of source mod 2**32: 1125 bytes\nfrom os import path\nimport sys\nsys.path.insert(0, path.abspath(path.join(path.dirname(__file__), '.')))\nfrom __hex_constants__ import __hex_letters, __alphabet\n\ndef _intohex(number, hex_prefix=False, uppercase=False):\n if type(number) is not int:\n raise TypeError('Value to convert must be int (is %s)' % type(number))\n isneg = None\n if number < 0:\n number = -number\n isneg = True\n\n def hexdivide(target):\n if target % 16 > 9:\n for item in __hex_letters:\n if __hex_letters[item] == target % 16:\n if uppercase:\n return item.upper()\n return item\n\n return target % 16\n return target % 16\n\n values = []\n while number // 16 is not 0:\n values.insert(0, hexdivide(number))\n number = number // 16\n\n values.insert(0, hexdivide(number))\n if hex_prefix:\n return ('-' if isneg else '') + '0x' + ''.join((str(item) for item in values))\n return ('-' if isneg else '') + ''.join((str(item) for item in values))","sub_path":"pycfiles/hexdutils-1.6.1-py3.7/intohex.cpython-37.py","file_name":"intohex.cpython-37.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"100514696","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Dense, Subtract, LSTM, Concatenate\n\nclass UserActor:\n\tdef __init__(self):\n\t\tinput_A = Input(shape = 4) #Input of user actor\n\t\tx = Subtract()([input_A[:, 2:], input_A[:, :2]])\n\t\tx = Dense(32, activation = 'relu')(x)\n\t\tx = Dense(64, activation = 'relu')(x)\n\t\tx = Dense(4, activation = 'softmax')(x)\n\n\t\tself.model = Model(input_A, x)\n\t\tself.model.summary()\n\nclass AsstActor:\n\tdef __init__(self, memory_len):\n\t\tinput_B = Input(shape = (memory_len, 6)) #Input of assistant actor/ Output of user actor \n\t\tinput_C = Input(shape = (11,11,1)) #Icon layout/ input of assistant actor\n\t\ta = Dense(32, activation = 'relu')(input_B)\n\t\ta = LSTM(32, activation = 'tanh')(a)\n\n\t\t# b = tf.keras.layers.Conv2D(filters = 2, kernel_size = 3, activation = 'relu')(input_C)\n\t\t# b = tf.keras.layers.MaxPooling2D()(b)\n\t\t# b = tf.keras.layers.Flatten()(input_C)\n\t\t# b = tf.keras.layers.Dense(64, activation = 'relu')(b)\n\t\t# b = tf.keras.layers.Dense(32, activation = 'relu')(b)\n\n\t\t# a = Concatenate()([a, b])\n\t\ta = Dense(32, activation = 'relu')(a)\n\t\ta = Dense(4, activation = 'softmax')(a)\n\n\t\tself.model = Model(inputs = [input_B, input_C], outputs = a)\n\t\tself.model.summary()\n\n\nclass CentralizedCritic:\n\tdef __init__(self, memory_len):\n\t\tinput_A = Input(shape = 4) #Input of user actor\n\t\tinput_B = Input(shape = (memory_len, 6)) #Input of assistant actor/ Output of user actor \n\t\tinput_C = Input(shape = (11,11,1)) #Icon layout/ input of assistant actor\n\t\tinput_D = Input(shape = 4) #Output of assistant actor\n\n\t\tx = Subtract()([input_A[:, 2:], input_A[:, :2]])\n\t\tx = Dense(32, activation = 'relu')(x)\n\t\tx = Dense(64, activation = 'relu')(x)\n\t\tx = Dense(32, activation = 'relu')(x)\n\n\t\ty = Dense(32, activation = 'relu')(input_B)\n\t\ty = LSTM(32, activation = 'tanh')(y)\n\n\t\t# z = tf.keras.layers.Conv2D(filters = 2, kernel_size = 3, activation = 'relu')(input_C)\n\t\t# z = tf.keras.layers.MaxPooling2D()(z)\n\t\t# z = tf.keras.layers.Flatten()(input_C)\n\t\t# z = tf.keras.layers.Dense(64, activation = 'relu')(z)\n\t\t# z = tf.keras.layers.Dense(32, activation = 'relu')(z)\n\n\t\tw = Dense(32, activation = 'relu')(input_D)\n\n\t\ty = Concatenate()([x, y, w])\n\t\ty = Dense(32, activation = 'relu')(y)\n\t\ty = Dense(1)(y)\n\n\t\tself.model = Model(inputs = [input_A, input_B, input_C, input_D], outputs = y)\n\t\tself.model.summary()\n\n\n\n","sub_path":"CTDE_with_PPO/Without_Target_Layout/Networks.py","file_name":"Networks.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"335836040","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom django.contrib import messages\nfrom .models import User\nimport bcrypt\n\ndef index(request):\n print('\\n\\n** Index')\n if not 'logged_in' in request.session:\n request.session['logged_in'] = False\n elif request.session['logged_in'] == True:\n return redirect('/dashboard')\n\n return render(request, 'login_registration/index.html')\n\ndef register(request):\n print('\\n\\n** Register')\n print(request.POST)\n\n errors = User.objects.validateNewUser(request.POST)\n\n if len(errors):\n print(\"There are errors! I can't register that! GeddaddaHERE!\")\n for k, message in errors.items():\n messages.error(request, message)\n return redirect('/')\n else:\n pwHash = bcrypt.hashpw(request.POST['password'].encode('utf-8'), bcrypt.gensalt(12))\n user = User.objects.create(first_name = request.POST['first_name'], last_name = request.POST['last_name'], email = request.POST['email'], pass_hash = pwHash.decode())\n print(user)\n print(\"Nice.. you're registered.\")\n request.session['logged_in'] = True\n request.session['user_id'] = user.id\n return redirect('/')\n\ndef login(request):\n print('\\n\\n** Login')\n print(request.POST)\n\n errors = User.objects.validateLogin(request.POST)\n\n if len(errors):\n for k, message in errors.items():\n messages.error(request, message)\n return redirect('/')\n else:\n u = User.objects.get(email = request.POST['email'])\n\n request.session['logged_in'] = True\n print(f'HERE!! This is the user object: { u }')\n request.session['user_id'] = u.id\n return redirect('/')\n\ndef success(request):\n print(\"Getting the User to show as successful...\")\n u = User.objects.get(id=request.session[\"user_id\"])\n print(f'User: {u}')\n\n context = {\n 'full_name' : f'{u.first_name}'\n }\n\n return render(request, 'login_registration/success.html', context)\n\ndef logout(request):\n request.session['logged_in'] = False\n request.session['user_id'] = None\n return redirect('/')\n","sub_path":"apps/login_registration/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"34216024","text":"from PyQt5 import QtWidgets, QtCore\nfrom fuzzywuzzy import fuzz\nimport modal\n\nfrom lib.settings import SETTINGS, update_settings\n\n\nclass ContactsView(QtWidgets.QWidget):\n def __init__(self, main_window, *args, **kwargs):\n super(ContactsView, self).__init__(*args, **kwargs)\n self.main_window = main_window\n self.layout = QtWidgets.QVBoxLayout()\n self.layout.setSpacing(8)\n\n self.initial_state = SETTINGS # store the settings to restore it if needed\n\n self.title = QtWidgets.QLabel(\"Contacts\")\n self.title.setObjectName(\"viewTitle\")\n self.title.setMinimumHeight(48)\n self.layout.addWidget(self.title)\n\n scroll_area = QtWidgets.QScrollArea()\n scroll_area.setMaximumWidth(960)\n scroll_area.setWidgetResizable(True)\n\n main_layout = QtWidgets.QHBoxLayout()\n main_layout.addWidget(scroll_area)\n\n widget = QtWidgets.QWidget()\n widget.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n\n top_widget = QtWidgets.QWidget()\n top_layout = QtWidgets.QHBoxLayout()\n top_layout.setContentsMargins(0, 0, 0, 0)\n top_widget.setLayout(top_layout)\n self.layout.addWidget(top_widget)\n\n self.search_name = QtWidgets.QLineEdit()\n self.search_name.setObjectName(\"searchContactName\")\n self.search_name.setPlaceholderText(\"Search for a name in your contact list...\")\n self.search_name.installEventFilter(self)\n top_layout.addWidget(self.search_name)\n\n self.add_contact_button = QtWidgets.QToolButton()\n self.add_contact_button.setText(\"\\uf234 Create new contact\")\n self.add_contact_button.clicked.connect(self.create_new_contact)\n top_layout.addWidget(self.add_contact_button)\n\n self.build_contact_list()\n\n # If needed make space below empty\n self.layout.addStretch(1)\n\n self.setLayout(main_layout)\n widget.setLayout(self.layout)\n scroll_area.setWidget(widget)\n\n def build_contact_list(self):\n row = 3\n self.no_contacts = NoContactsView(self)\n self.layout.addWidget(self.no_contacts)\n if len(SETTINGS[\"contacts\"]) > 0:\n self.no_contacts.hide()\n\n self.entries = dict()\n for name, contact in SETTINGS[\"contacts\"].items():\n row += 1\n entry = ContactEntryView(self, name, contact)\n self.entries[name] = entry\n self.layout.addWidget(entry)\n\n def eventFilter(self, obj, event):\n if event.type() == QtCore.QEvent.KeyRelease and obj is self.search_name:\n if self.search_name.hasFocus():\n query = self.search_name.text()\n if len(query) == 0:\n for view in self.entries.values():\n view.show()\n self.no_contacts.hide()\n else:\n num_found = 0\n for name, view in self.entries.items():\n if fuzz.partial_ratio(name.lower(), query.lower()) >= 60:\n view.show()\n num_found += 1\n else:\n view.hide()\n self.no_contacts.setVisible(num_found == 0)\n return super().eventFilter(obj, event)\n\n def add_contact(self, name, email):\n contact = dict()\n SETTINGS[\"contacts\"][name] = contact\n contact[\"email\"] = dict()\n contact[\"email\"][\"address\"] = email\n entry = ContactEntryView(self, name, contact)\n self.entries[name] = entry\n self.layout.insertWidget(2, entry)\n self.search_name.clear()\n for view in self.entries.values():\n view.show()\n self.no_contacts.hide()\n self.update_contact_list()\n\n def remove_contact(self, name):\n if name in SETTINGS[\"contacts\"]:\n del SETTINGS[\"contacts\"][name]\n if name in self.entries:\n self.entries[name].close()\n del self.entries[name]\n\n def rename_contact(self, old_name, new_name):\n if old_name in SETTINGS[\"contacts\"]:\n if new_name in SETTINGS[\"contacts\"]:\n modal.ModalMessageWindow(\n self.main_window,\n f\"Contact with name {new_name} is already stored in the this contact list.\",\n \"Failed to rename contact!\",\n modal.MSG_ERROR,\n )\n return False\n\n contact = SETTINGS[\"contacts\"][old_name]\n del SETTINGS[\"contacts\"][old_name]\n SETTINGS[\"contacts\"][new_name] = contact\n if old_name in self.entries:\n entry = self.entries[old_name]\n del self.entries[old_name]\n self.entries[new_name] = entry\n return True\n else:\n print(f\"contact `{old_name}` is not in contact list, this is likely a bug!\")\n return False\n\n def update_contact_list(self):\n update_settings(\"../config/contacts\", SETTINGS[\"contacts\"])\n self.main_window.set_info_message(\"Contacts saved!\")\n\n def create_new_contact(self):\n create_modal = CreateContactModal(self.main_window, self.search_name.text())\n create_modal.create_callback = lambda: self.add_contact(create_modal.name.text(), create_modal.email.text())\n\n\nclass CreateContactModal(modal.ModalWindow):\n def __init__(self, parent, name):\n self.name = name\n self.create_callback = None\n super(CreateContactModal, self).__init__(parent)\n\n def build_layout(self):\n layout = QtWidgets.QGridLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setVerticalSpacing(12)\n layout.setHorizontalSpacing(0)\n\n label_icon = QtWidgets.QLabel(\"\\uf234\")\n label_icon.setObjectName(\"modalIconBlue\")\n\n label_title = QtWidgets.QLabel(\"Create a new contact\")\n label_title.setObjectName(\"modalTitle\")\n\n self.name = QtWidgets.QLineEdit(self.name)\n self.name.setPlaceholderText(\"Enter the name of the contact...\")\n self.name.setObjectName(\"hMargin\")\n\n self.email = QtWidgets.QLineEdit(\"\")\n self.email.setPlaceholderText(\"Enter the email address to your contact...\")\n self.email.setObjectName(\"hMargin\")\n\n self.modal_frame.setMinimumWidth(480)\n\n self.bottom_layout = self.build_bottom_layout()\n bottom_frame = QtWidgets.QFrame()\n bottom_frame.setObjectName(\"modalBottomFrame\")\n bottom_frame.setLayout(self.bottom_layout)\n\n name_label = QtWidgets.QLabel(\"Name:\")\n name_label.setObjectName(\"hMargin\")\n email_label = QtWidgets.QLabel(\"Email:\")\n email_label.setObjectName(\"hMargin\")\n\n layout.addWidget(label_icon, 0, 0)\n layout.addWidget(label_title, 0, 1, 1, 2)\n layout.addWidget(name_label, 1, 0)\n layout.addWidget(self.name, 1, 1, 1, 2)\n layout.addWidget(email_label, 2, 0)\n layout.addWidget(self.email, 2, 1, 1, 2)\n layout.addWidget(bottom_frame, 3, 0, 1, 3)\n layout.setColumnStretch(1, 1)\n return layout\n\n def build_bottom_layout(self):\n bottom_layout = QtWidgets.QHBoxLayout()\n bottom_layout.setContentsMargins(12, 12, 12, 12)\n bottom_layout.setSpacing(12)\n\n create_button = QtWidgets.QToolButton()\n create_button.setText(\"Create\")\n create_button.clicked.connect(self.answer_create)\n cancel_button = QtWidgets.QToolButton()\n cancel_button.setText(\"Cancel\")\n cancel_button.clicked.connect(self.close_window)\n\n bottom_layout.addStretch(1)\n bottom_layout.addWidget(create_button)\n bottom_layout.addWidget(cancel_button)\n return bottom_layout\n\n def answer_create(self):\n if self.create_callback:\n self.create_callback()\n self.close_window()\n\n\nclass ContactEntryView(QtWidgets.QFrame):\n def __init__(self, contact_view, name, contact):\n super(ContactEntryView, self).__init__()\n layout = QtWidgets.QGridLayout()\n self.contact_view = contact_view\n\n self.contact = contact\n self.name = name\n\n self.name_edit = QtWidgets.QLineEdit(name)\n self.name_edit.setObjectName(\"fontBold\")\n self.name_edit.installEventFilter(self)\n layout.addWidget(self.name_edit, 0, 0)\n\n self.email_address_edit = QtWidgets.QLineEdit(\"\")\n self.email_address_edit.setObjectName(\"fontFaint\")\n if contact[\"email\"] and contact[\"email\"][\"address\"]:\n self.email_address_edit.setText(contact[\"email\"][\"address\"])\n self.email_address_edit.installEventFilter(self)\n layout.addWidget(self.email_address_edit, 1, 0)\n\n self.save_button = QtWidgets.QToolButton()\n self.save_button.setObjectName(\"iconButton\")\n self.save_button.setText(\"\\uf00c\")\n self.save_button.clicked.connect(self.save_contact)\n layout.addWidget(self.save_button, 0, 1, 2, 1)\n\n self.restore_button = QtWidgets.QToolButton()\n self.restore_button.setObjectName(\"iconButton\")\n self.restore_button.setText(\"\\uf00d\")\n self.restore_button.clicked.connect(self.restore_contact)\n layout.addWidget(self.restore_button, 0, 2, 2, 1)\n\n self.remove_button = QtWidgets.QToolButton()\n self.remove_button.setObjectName(\"iconButton\")\n self.remove_button.setText(\"\\uf1f8\")\n self.remove_button.clicked.connect(self.remove_contact)\n layout.addWidget(self.remove_button, 0, 3, 2, 1)\n\n self.save_button.hide()\n self.restore_button.hide()\n\n self.setLayout(layout)\n\n def eventFilter(self, obj, event):\n if event.type() == QtCore.QEvent.KeyRelease:\n if obj is self.name_edit or obj is self.email_address_edit:\n if self.name_edit.hasFocus() or self.email_address_edit.hasFocus():\n new_name = self.name_edit.text()\n new_email_address = self.email_address_edit.text()\n email_address = self.contact[\"email\"][\"address\"]\n if (not new_name == self.name) or (not new_email_address == email_address):\n self.save_button.show()\n self.restore_button.show()\n else:\n self.save_button.hide()\n self.restore_button.hide()\n\n if event.type() == QtCore.QEvent.KeyPress:\n if obj is self.name_edit or obj is self.email_address_edit:\n if event.key() == QtCore.Qt.Key_Return:\n if self.name_edit.hasFocus() or self.email_address_edit.hasFocus():\n self.save_contact()\n return True\n return super().eventFilter(obj, event)\n\n def remove_contact(self):\n del_modal = modal.ModalYesNoQuestionWindow(\n self.contact_view.main_window,\n f\"Are your sure you want to delete `{self.name}` from your contacts?\",\n \"Delete contact?\",\n )\n del_modal.yes_callback = self.remove_contact_impl\n\n def remove_contact_impl(self):\n self.contact_view.remove_contact(self.name)\n self.save_button.hide()\n self.restore_button.hide()\n self.contact_view.update_contact_list()\n\n def save_contact(self):\n is_dirty = False\n new_name = self.name_edit.text()\n if not new_name == self.name:\n if self.contact_view.rename_contact(self.name, new_name):\n self.contact = SETTINGS[\"contacts\"][new_name]\n self.name = new_name\n is_dirty = True\n\n new_email_address = self.email_address_edit.text()\n if not new_email_address == self.contact[\"email\"][\"address\"]:\n self.contact[\"email\"][\"address\"] = new_email_address\n is_dirty = True\n\n if is_dirty:\n self.contact_view.update_contact_list()\n self.save_button.hide()\n self.restore_button.hide()\n\n def restore_contact(self):\n self.name_edit.setText(self.name)\n self.email_address_edit.setText(self.contact[\"email\"][\"address\"])\n self.save_button.hide()\n self.restore_button.hide()\n\n\nclass NoContactsView(QtWidgets.QFrame):\n def __init__(self, contacts_view):\n super(NoContactsView, self).__init__()\n self.contacts_view = contacts_view\n\n layout = QtWidgets.QGridLayout()\n self.add_contact = QtWidgets.QToolButton()\n self.add_contact.setText(\"\\uf234 Create new contact\")\n self.add_contact.clicked.connect(self.contacts_view.create_new_contact)\n\n self.label = QtWidgets.QLabel(\"No contacts found\")\n layout.addWidget(self.label, 1, 1, 1, 1, QtCore.Qt.AlignCenter)\n layout.addWidget(self.add_contact, 2, 1, 1, 1, QtCore.Qt.AlignCenter)\n layout.setColumnStretch(0, 1)\n layout.setColumnStretch(2, 1)\n layout.setRowStretch(0, 1)\n layout.setRowStretch(3, 1)\n self.setMinimumHeight(128)\n self.setLayout(layout)\n\n\n# NOTE(alexander): DEV mode entry point only!!!\nif __name__ == \"__main__\":\n from main import initialize_app\n import sys\n\n appctxt, window = initialize_app()\n window.set_active_view(4)\n exit_code = appctxt.app.exec_()\n sys.exit(exit_code)\n","sub_path":"src/main/python/contacts_view.py","file_name":"contacts_view.py","file_ext":"py","file_size_in_byte":13353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"172592715","text":"# -*- coding: utf-8 -*-\n\nimport pickle\nimport random\nimport re\nimport time\n\nimport pandas as pd\n\nfrom scrapy import Spider, Request\nfrom scrapy.exceptions import CloseSpider\n\nfrom items import DzenItem\n\n\nclass DzenSpider(Spider):\n name = 'dzen_spider'\n handle_httpstatus_list = [302, 403]\n\n start_urls = [\"https://zen.yandex.ru/top\"]\n\n # def start_requests(self):\n # urls = [\"https://zen.yandex.ru/t/авто?clid=300&token=\",\n # \"https://zen.yandex.ru/t/анекдоты?clid=300&token=\",\n # \"https://zen.yandex.ru/t/гаджеты?clid=300&token=\",\n # \"https://zen.yandex.ru/t/еда?clid=300&token=\",\n # \"https://zen.yandex.ru/t/животные?clid=300&token=\",\n # \"https://zen.yandex.ru/t/знаменитости?clid=300&token=\",\n # \"https://zen.yandex.ru/t/интересныефакты?clid=300&token=\",\n # \"https://zen.yandex.ru/t/интерьер?clid=300&token=\",\n # ]\n\n def sleep(self):\n time.sleep(2 + 5 * random.random())\n\n def parse(self, response):\n if response.status == 302:\n print(response.xpath(\"//body\").extract())\n return\n\n my_str = response.xpath('//body//script')[0].extract()\n\n channels = re.findall('\"feed_link\":\"https://zen.yandex.ru/t/[^\"]+\"', my_str)\n random.shuffle(channels)\n\n for res in channels:\n url = res.split('\"')[-2]\n yield Request(url, callback=self.parse_source)\n\n def parse_source(self, response):\n topic = response.xpath('//meta[@name=\"description\"]/@content').extract_first()\n\n # sources = response.xpath('//a[@class=\"card-channel-link _is-link\"]//@href').extract()\n # for source in sources:\n # yield Request(source, callback=self.parse_channel, flags=[topic])\n\n script_str = response.xpath('//body//script')[0].extract()\n for article_url in re.findall(r'\"link\":\"https://zen.yandex.ru/media/[^\"]+\"', script_str):\n url = article_url.split('\"')[-2].split(\"?\")[0]\n yield Request(url, callback=self.parse_article, flags=[topic, False])\n\n new_data = re.search(r'\"link\":\"https://zen.yandex.ru/api/v3/launcher/more[^\"]+\"', script_str) \\\n .group(0).split('\"')[-2]\n\n yield Request(new_data, callback=self.parse_newdata, flags=[topic, False])\n\n def parse_newdata(self, response):\n script_str = response.text\n\n for article_url in re.findall(r'\"link\":\"https://zen.yandex.ru/media/[^\"]+\"', script_str):\n url = article_url.split('\"')[-2].split(\"?\")[0]\n yield Request(url, callback=self.parse_article, flags=response.request.flags)\n\n new_data = re.search(r'\"link\":\"https://zen.yandex.ru/api/v3/launcher/more[^\"]+\"', script_str) \\\n .group(0).split('\"')[-2]\n yield Request(new_data, callback=self.parse_newdata, flags=response.request.flags)\n\n def parse_channel(self, response):\n topic = response.request.flags[0]\n for url in response.xpath('//a[@class=\"card-image-view__clickable\"]//@href').extract():\n yield Request(url, callback=self.parse_article, flags=[topic, True])\n\n def parse_article(self, response):\n text = \" \".join(response.xpath('//p//text()').extract()).strip()\n if not text:\n return\n\n item = DzenItem()\n item['text'] = text\n item['title'] = response.xpath('//h1//text()').extract_first()\n item['url'] = response.url\n item['topic'] = response.request.flags[0]\n item['from_channel'] = response.request.flags[1]\n\n if response.url.startswith(\"https://zen.yandex.ru/\"):\n item['source'] = re.search(r'\"authorName\":\"[^\"]+\"', response.xpath('//script[@id=\"init_data\"]')\n .extract_first())[0].split('\"')[-2]\n else:\n item['source'] = response.url.split(\"/\")[2]\n\n yield item\n","sub_path":"dzen/spiders/dzenSpider.py","file_name":"dzenSpider.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"191701367","text":"\"\"\"empty message\n\nRevision ID: 23061db18d3f\nRevises: 8a0bc5d2a79f\nCreate Date: 2019-12-17 00:40:52.025373\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '23061db18d3f'\ndown_revision = '8a0bc5d2a79f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('username', sa.String(length=128), nullable=True))\n op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)\n op.drop_index('ix_user_hash_id', table_name='user')\n op.drop_column('user', 'hash_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('hash_id', mysql.VARCHAR(length=256), nullable=True))\n op.create_index('ix_user_hash_id', 'user', ['hash_id'], unique=True)\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_column('user', 'username')\n # ### end Alembic commands ###\n","sub_path":"application/migrations/versions/23061db18d3f_.py","file_name":"23061db18d3f_.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"184481794","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Box(Sprite):\n\tdef __init__(self, screen, sttn, pos, collided=None):\n\t\tsuper(Box, self).__init__()\n\n\t\tself.screen, self.sttn = screen, sttn\n\t\tself.rect = pygame.Rect((0, 0, self.sttn.size, self.sttn.size))\n\t\tself.rect.center = pos\n\t\tself.x, self.y = pos\n\n\t\tself.collided = collided\n\n\tdef check_collisions(self, blocks, pos):\n\t\tfor block in blocks:\t\t\t\n\t\t\tif block.rect.collidepoint(pos):\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef check_places(self, places):\t\t\n\t\tcollides = pygame.sprite.spritecollide(self, places, False)\t\t\n\t\tif collides:\n\t\t\tcollides[0].active = True\n\t\t\tself.collided = collides[0]\n\t\telif self.collided != None: \n\t\t\tself.collided.active = False\n\t\t\tself.collided = None\n\n\tdef move(self, drct, boxes, walls, places):\n\t\tpos = self.x + drct[0] * self.sttn.size, self.y + drct[1] * self.sttn.size\n\n\t\tif not self.check_collisions(boxes, pos) or not self.check_collisions(walls, pos):\n\t\t\treturn False\n\n\t\tif pos[0] < self.sttn.u_width and pos[0] > 0 and pos[1] < self.sttn.u_height and pos[1] > 0:\n\t\t\tself.x, self.y = pos\n\t\t\tself.rect.center = pos\n\t\t\tself.check_places(places)\n\t\t\treturn True\n\n\t\treturn False\n\n\tdef draw(self):\n\t\tpygame.draw.rect(self.screen, self.sttn.box_color, self.rect)\n\t\tpygame.draw.rect(self.screen, self.sttn.background_color, self.rect, 1)\n","sub_path":"sprites/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"281638011","text":"\"\"\"\n\" Similar script with worker2D2, but the Outer Server is classified based on the Organization they belong to.\n\" The org info is get from the IP-DB and is cached in data/IPDB.log.\n\" The logic is defined under src.util.IPInfo.py\n\" Input: the target filename\n\" Output: the topK result collected from target file\n\" By Zhengping on 2019-01-10\n\"\"\"\n\nfrom collections import Counter\nimport json\n\ndef doCollectTask(filename, topK):\n \"\"\"\n Collect the topK result from filename\n :param filename: target filename\n :param topK: topK wants to select, by default is None.\n :return: top K count result.\n \"\"\"\n IPDBFilename = \"../../data/IPDB.log\"\n with open(IPDBFilename, 'r') as f:\n IPDB = json.load(f)\n f = open(filename)\n dataDict = json.load(f)\n weirdOutCollect = Counter()\n for key in dataDict:\n if dataDict[key][\"weird\"]:\n # Check direction first to get the inner server.\n srcIP = dataDict[key][\"addr\"][0]\n dstIP = dataDict[key][\"addr\"][2]\n try:\n if srcIP.startswith(\"136.159.\"):\n # Which means srcIP is within our campus. it should be an outbound traffic\n weirdOutCollect[IPDB[dstIP][\"org\"]] += 1\n else:\n weirdOutCollect[IPDB[srcIP][\"org\"]] += 1\n except KeyError:\n print(\"Info not found %s.\" % (dataDict[key]))\n\n return Counter(dict(weirdOutCollect.most_common(topK)))\n","sub_path":"src/integUtil/worker2D6WeirdOutORGCG.py","file_name":"worker2D6WeirdOutORGCG.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"446912738","text":"import os\nimport logging\nimport re\nimport time \n\nfrom PyPDF2 import PdfFileReader\nfrom util import BormeUtil\nfrom entities import Acto,MyBorme\n\nlogger = logging.getLogger(\"SeccionAParser\")\n\n# Generic PDF Reader as text\nclass PDFReader(object):\n\tdef __init__(self, filename):\n\t\tif not os.path.isfile(filename):\n\t\t\traise IOError(\"File not found: {}\".format(filename))\n\t\tself.filename = filename\n\n\tdef open(self):\n\t\tself.fp=open(self.filename, \"rb\")\n\t\tself.reader=PdfFileReader(self.fp)\n\t\tself.current_page=0\n\t\t#\n\t\tself._read_next_page_lines()\n\n\tdef close(self):\n\t\tself.fp.close()\n\t\tself.reader=None\n\t\tself.current_page=None\n\n\tdef _read_next_page_lines(self):\n\t\tif self.current_page < self.reader.getNumPages():\n\t\t\tlogger.debug(\"PDFReader._read_next_page_lines.Reading page {} of {}\".format(self.current_page+1, self.reader.getNumPages()))\n\t\t\taux = self.reader.getPage(self.current_page).getContents().getData()\n\t\t\tif isinstance(aux, bytes): # Python 3\n\t\t\t\taux=aux.decode(\"unicode_escape\")\n\t\t\tself.current_page_lines=aux.split(\"\\n\")\n\t\t\tself.current_page=self.current_page+1\n\t\t\tself.current_line=0\n\t\telse:\n\t\t\tlogger.debug(\"PDFReader._read_next_page_lines.No more pages. {} pages already read\".format(self.current_page))\n\t\t\tself.current_page_lines=None\n\t\t\tself.current_line=0\n\n\tdef read_next_line(self):\n\t\tif not self.current_page_lines:\n\t\t\tlogger.debug(\"PDFReader.read_next_line.Content already exhausted.\")\n\t\t\treturn None\n\t\tif self.current_line >= len(self.current_page_lines): ##read next page if available\n\t\t\tlogger.debug(\"PDFReader.read_next_line.Page {} exhausted\".format(self.current_page))\n\t\t\tself._read_next_page_lines()\n\t\t\tif not self.current_page_lines:\n\t\t\t\tlogger.debug(\"SeccionAParser.read_next_line.No more lines in the file\")\n\t\t\t\treturn None\n\t\t#\n\t\tline=self.current_page_lines[self.current_line]\n\t\tself.current_line=self.current_line+1\n\t\t#\n\t\tlogger.debug(\"PDFReader.read_next_line.Line read: {}.{}.{}\".format(self.current_page,self.current_line,line))\n\t\treturn line;\n\n# PDF Reader formatting texts (accents, Tj decode) and ignoring lines not relevant in BORME\nclass BormePDFReader(object):\n\tdef __init__(self, filename):\n\t\tself.pdf_reader = PDFReader(filename)\n\n\tdef open(self):\n\t\tself.pdf_reader.open()\n\n\tdef close(self):\n\t\tself.pdf_reader.close()\n\n\tdef read_next_line(self):\n\t\twhile True:\n\t\t\tline=self.pdf_reader.read_next_line()\n\t\t\t#\n\t\t\tif line:\n\t\t\t\t#\n\t\t\t\tline=BormeUtil.strip_accents(line);\n\t\t\t\t#\n\t\t\t\tif line.endswith(\"Tj\"):\n\t\t\t\t\tline=line[1:-3];\n\t\t\t\t\tline=line.replace(\"\\(\",\"(\").replace(\"\\)\",\")\");\n\t\t\t\t\tline=line+\"Tj\";\n\t\t\t\t#\n\t\t\t#\n\t\t\tif line==None or line==\"BT\" or line==\"ET\" or line.endswith(\" BDC\") or line.endswith(\"Tj\") or line.endswith(\" Tf\"):\n\t\t\t\tlogger.debug(\"BormePDFReader.read_next_line.Line read: {}\".format(line))\n\t\t\t\treturn line;\n\nclass SeccionAParser(object):\n\n\tTYPE_DATE=1\n\tTYPE_CAPITAL=2\n\tTYPE_ADDRESS=3\n\t\n\t#Anuncios to be parsed and formatting details.\n\t_anuncio_keywords_with_1_arg={\n\t\t\"CONSTITUCION\": {\n\t\t\t\"COMIENZO DE OPERACIONES\":TYPE_DATE,\n\t\t\t\"OBJETO SOCIAL\":None,\n\t\t\t\"DOMICILIO\":TYPE_ADDRESS,\n\t\t\t\"CAPITAL\":TYPE_CAPITAL,\n\t\t\t\"PATRIMONIO DEL FONDO\":TYPE_CAPITAL,\n\t\t\t\"CAPITAL SUSCRITO\":TYPE_CAPITAL,\n\t\t\t\"DESEMBOLSADO\":TYPE_CAPITAL,\n\t\t\t\"DURACION\":None\n\t\t\t}\n\t}\n\n\t_OPEN_BOLD=\"[[\"\n\t_CLOSE_BOLD=\"]]\"\n\n\tdef __init__(self, filename):\n\t\tself.reader=BormePDFReader(filename)\n\t\tself.actos=[]\n\t\tself.properties={}\n\n\tdef parse(self):\n\t\tlogger.debug(\"SeccionAParser.parse.Begin\")\n\t\t#\n\t\tself.reader.open()\n\t\tline=self.reader.read_next_line()\n\t\twhile line != None:\n\t\t\taux = self._parse_since_line(line)\n\t\t\tif aux:\n\t\t\t\tline=aux;\n\t\t\telse:\n\t\t\t\tline=self.reader.read_next_line()\n\t\tself.reader.close()\n\t\t#\n\t\tborme=MyBorme(self.properties[\"/Fecha\"],self.properties[\"/Provincia\"],self.actos)\n\t\t#\n\t\tself.actos=[]\n\t\tself.properties={}\n\t\t#\n\t\tlogger.debug(\"SeccionAParser.parse.Done\")\n\t\t#\n\t\treturn borme;\n\n\tdef _parse_since_line(self,line):\n\t\tif line.startswith(\"/Cabecera_acto\"):\n\t\t\treturn self._parse_cebecera_acto(line);\n\t\telif line==\"BT\":\n\t\t\tremaining,text,bdc=self._parse_bt(line);\n\t\t\tif remaining: \n\t\t\t\treturn remaining;\n\t\t\tif bdc or text: \n\t\t\t\tlogger.debug(\"SeccionAParser._parse_since_line.bt: {},{}\".format(bdc,text))\n\t\t\t\tif bdc==\"/Fecha\":\n\t\t\t\t\ttext=BormeUtil.parse_long_date(text)\n\t\t\t\tself.properties[bdc]=text\n\t\treturn None;\n\n\tdef _parse_bt(self, line, reading=\"BT\"):\n\t\tlogger.debug(\"SeccionAParser._parse_bt.Begin: {}\".format(reading))\n\t\t#\n\t\ttext=\"\"\n\t\tbdc=\"\"\n\t\tbuffer=\"\"\n\t\tbold=False\n\t\t#\n\t\tline=self.reader.read_next_line()\n\t\twhile line:\n\t\t\twhile line and line!=\"ET\":\n\t\t\t\tif line==\"/F1 8 Tf\": # Begin bold text\n\t\t\t\t\tbold=True\n\t\t\t\t\tif buffer:\n\t\t\t\t\t\ttext=text+SeccionAParser._clean_bt_text(buffer)\n\t\t\t\t\t\tbuffer=\"\"\n\t\t\t\telif line==\"/F2 8 Tf\" and bold: # End bold text\n\t\t\t\t\tbold=False\n\t\t\t\t\tif buffer:\n\t\t\t\t\t\tif reading==\"/Texto_acto\":\n\t\t\t\t\t\t\ttext=text+SeccionAParser.format_as_bold_text(buffer)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttext=text+SeccionAParser._clean_bt_text(buffer)\n\t\t\t\t\t\tbuffer=\"\"\n\t\t\t\telif line.endswith(\"Tj\"):\n\t\t\t\t\tclean_text=line[:-2];\n\t\t\t\t\tbuffer=buffer+clean_text\n\t\t\t\telif line.endswith(\"BDC\"):\n\t\t\t\t\tbdc=line.split(\" \")[0].strip();\n\t\t\t\tline=self.reader.read_next_line()\n\t\t\t# Check file structure\n\t\t\tif not line or line!=\"ET\":\n\t\t\t\tlogger.warn(\"SeccionAParser._parse_bt.ET expected. Found: {}\".format(line))\n\t\t\t\treturn line,None,None;\n\t\t\t# Skip new page header if reading acto\n\t\t\tif reading==\"/Texto_acto\":\n\t\t\t\tline=self.reader.read_next_line() # Read text right after ET\n\t\t\t\twhile line and not line.startswith(\"/Cabecera_acto\") and not line.startswith(\"/Texto_acto\"):\n\t\t\t\t\tline=self.reader.read_next_line()\n\t\t\tif not line or not (reading==\"/Texto_acto\" and line.startswith(\"/Texto_acto\")):\n\t\t\t\t# Append remaining content since last /F2\n\t\t\t\tif buffer:\n\t\t\t\t\tif not bold or reading!=\"/Texto_acto\": #Append non-bold conent in buffer\n\t\t\t\t\t\ttext=text+SeccionAParser._clean_bt_text(buffer)\n\t\t\t\t\telse: #Append bold conent in buffer\n\t\t\t\t\t\ttext=text+SeccionAParser.format_as_bold_text(buffer)\n\t\t\t\t# If not reading /Texto_acto, line might be None or ET. ET has already been processed, so we return None.\n\t\t\t\tif reading!=\"/Texto_acto\":\n\t\t\t\t\tline=None;\n\t\t\t\t#\n\t\t\t\tlogger.debug(\"SeccionAParser._parse_bt.End: {},{},{},{}\".format(line,text,bdc,reading))\n\t\t\t\treturn line,text,bdc;\n\n\t#Split in case bold text has more than one key \n\t#Tokens with : are ignored in roder not to have invalid keys\n\tdef format_as_bold_text(buffer):\n\t\taux=SeccionAParser._clean_bt_text(buffer)\n\t\tlist=aux.split(\".\")\n\t\tlist2=[]\n\t\tfor s in list:\n\t\t\tif not \":\" in s:\n\t\t\t\tlist2.append(s)\n\t\taux=((SeccionAParser._CLOSE_BOLD+SeccionAParser._OPEN_BOLD).join(list2))\n\t\taux=SeccionAParser._OPEN_BOLD+aux+SeccionAParser._CLOSE_BOLD\n\t\treturn aux;\n\n\tdef _parse_cebecera_acto(self,line):\n\t\tlogger.debug(\"SeccionAParser._parse_cebecera_acto.Begin:{}\".format(line))\n\t\tline=self.reader.read_next_line()\n\t\tif not line or line!=\"BT\":\n\t\t\tlogger.warn(\"SeccionAParser._parse_cebecera_acto.BT Expected. Found: {}\".format(line));\n\t\t\treturn line;\n\t\t#\n\t\tremaining,cabecera_acto_text,bdc=self._parse_bt(line,reading=\"/Cabecera_acto\");\n\t\tif remaining: \n\t\t\treturn remaining;\n\t\t#\n\t\tline=self.reader.read_next_line()\n\t\tif not line or not line.startswith(\"/Texto_acto\"):\n\t\t\tlogger.warn(\"SeccionAParser._parse_cebecera_acto./Texto_acto Expected. Found: {}\".format(line));\n\t\t\treturn line;\n\t\treturn self._parse_texto_acto(line,cabecera_acto_text);\n\n\tdef _parse_texto_acto(self,line,cabecera_acto_text):\n\t\tlogger.debug(\"SeccionAParser._parse_texto_acto.Begin:{}\".format(line))\n\t\tline=self.reader.read_next_line()\n\t\tif not line or line!=\"BT\":\n\t\t\tlogger.warn(\"SeccionAParser._parse_texto_acto.BT Expected. Found: {}\".format(line));\n\t\t\treturn line;\n\t\t#\n\t\tremaining,texts,bdc=self._parse_bt(line,reading=\"/Texto_acto\");\n\t\t#\n\t\tlogger.debug(\"SeccionAParser._parse_texto_acto.End:{},{}\".format(cabecera_acto_text,texts))\n\t\t#\n\t\tnew_acto=SeccionAParser._instantiate_acto(cabecera_acto_text,texts)\n\t\tif new_acto:\n\t\t\tself.actos.append(new_acto)\n\t\t#\n\t\treturn remaining;\n\n\tdef _instantiate_acto(cabecera_acto_text,texts):\n\t\tlogger.debug(\"SeccionAParser._instantiate_acto: {},{}\".format(cabecera_acto_text,texts))\n\t\t#\n\t\tsplit=re.split(\"^\\s*([0-9]+)\\s*\\-\\s*(.*)$\",cabecera_acto_text)\n\t\tif len(split)!=4: \n\t\t\tlogger.warn(\"SeccionAParser._instantiate_acto.Error parsing cabecera acto: {}\".format(cabecera_acto_text));\n\t\t\treturn None;\n\t\t#\n\t\tid=split[1]\n\t\tempresa=BormeUtil.clean_company(split[2])\n\t\t#\n\t\tanuncios={}\n\t\ttokens1=texts.split(SeccionAParser._OPEN_BOLD)\n\t\tif len(tokens1)==1:\n\t\t\tanuncios[SeccionAParser._clean_key(tokens1[0])]=None\n\t\telse:\n\t\t\tfor t1 in tokens1:\n\t\t\t\tt1=t1.strip()\n\t\t\t\tif t1: ## Son pares de la forma aa]]bb\n\t\t\t\t\ttokens2=t1.split(SeccionAParser._CLOSE_BOLD)\n\t\t\t\t\tif len(tokens2)==1:\n\t\t\t\t\t\tanuncios[SeccionAParser._clean_key(tokens2[0])]=None\n\t\t\t\t\telse:\n\t\t\t\t\t\tk=SeccionAParser._clean_key(tokens2[0]).upper()\n\t\t\t\t\t\tv=SeccionAParser._clean_key(tokens2[1])\n\t\t\t\t\t\t#\n\t\t\t\t\t\tif k in SeccionAParser._anuncio_keywords_with_1_arg:\n\t\t\t\t\t\t\tanuncios[k]=SeccionAParser._create_dictionary(k,v,SeccionAParser._anuncio_keywords_with_1_arg[k])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tanuncios[k]=v\n\t\t#\n\t\treturn Acto(id,empresa,anuncios)\n\n\tdef _create_dictionary(k,v,subkeys):\n\t\t#\n\t\tkeywords=\"|\".join(subkeys); expression=\"(\"+keywords+\")(((?!\"+keywords+\").)*)(?i)\"\n\t\taux=re.split(expression,v)\n\t\t#\n\t\td={};i=0;\n\t\twhile (i < (len(aux)-1)/4):\n\t\t\tkey=SeccionAParser._clean_key(aux[i*4+1]).upper();\n\t\t\tvalue=SeccionAParser._clean_key(aux[i*4+2]);\n\t\t\t#\n\t\t\tif subkeys[key]:\n\t\t\t\tif subkeys[key]==SeccionAParser.TYPE_DATE:\n\t\t\t\t\tvalue=BormeUtil.clean_date(value)\n\t\t\t\telif subkeys[key]==SeccionAParser.TYPE_CAPITAL:\n\t\t\t\t\tvalue=BormeUtil.clean_capital(value)\n\t\t\t\telif subkeys[key]==SeccionAParser.TYPE_ADDRESS:\n\t\t\t\t\tvalue=BormeUtil.clean_address(value)\n\t\t\t#\n\t\t\td[key]=value\n\t\t\t#\n\t\t\tif aux[i*4+4].strip() and aux[i*4+4].strip()!=\".\":\n\t\t\t\tlogger.warn(\"Constitucion information lost: {} for: {},{}\".format(aux[i*4+4],k,v))\n\t\t\t#\n\t\t\ti+=1;\n\t\t#\n\t\treturn d\n\n\tdef _clean_bt_text(text):\n\t\ttext=text.replace(\" \",\" \");\n\t\ttext=text.strip();\n\t\tif text.endswith(\".\") or text.endswith(\":\"):\n\t\t\ttext=text[0:-1]; text=text.strip();\n\t\t\tif text.endswith(\".\") or text.endswith(\":\"):\n\t\t\t\ttext=text[0:-1]; text=text.strip();\n\t\treturn text;\n\n\tdef _clean_key(s):\n\t\treturn s.strip(\" \\t.:-\");\n\nif __name__ == \"__main__\":\n\tlogger.setLevel(logging.DEBUG)\n\tch = logging.StreamHandler()\n\tch.setLevel(logging.INFO)\n\tlogger.addHandler(ch)\n\t#\n\tparser=SeccionAParser(\"sampleFiles/pdfs/BORME-A-2017-86-01.pdf\");\n\tborme=parser.parse()\n\tprint(\"BORME Read: {}.{}\".format(borme.date,borme.provincia))\n\tactos=borme.actos;\n\tfor acto in actos: \n\t\tprint(acto.id,acto.empresa)\n\t\tfor k1 in acto.anuncios: \n\t\t\tvalue=acto.anuncios[k1]\n\t\t\tif value:\n\t\t\t\tif isinstance(value,dict):\n\t\t\t\t\tfor k2 in value:\n\t\t\t\t\t\tv2=value[k2]\n\t\t\t\t\t\tprint(\" {} ->{} ->{}\".format(k1,k2,v2))\n\t\t\t\telse:\n\t\t\t\t\tprint(\" {} ->{}\".format(k1,value))\n\t\t\telse:\n\t\t\t\tprint(\" {}\".format(k1))\n\ttime.sleep(30)\n","sub_path":"scripts/seccion_a_parser.py","file_name":"seccion_a_parser.py","file_ext":"py","file_size_in_byte":10823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"167584963","text":"import requests\nimport json\nimport re\nimport time\nfrom requests.cookies import RequestsCookieJar\nfrom cnki.CnkiSpider import verify\nfrom selenium import webdriver\nfrom lxml import etree\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import quote\nimport csv\nimport setting as st\n\n\nclass CnkiFrom:\n\n def __init__(self):\n self.now_time = time.strftime(\"%m%d_%H%M\", time.localtime(time.time()))\n self.csv_file_name = self.now_time + \"_from.csv\"\n\n def get_data(self, keyword):\n '''\n 通过关键词搜索拿到列表数据\n '''\n all_data = []\n search = keyword\n driver = webdriver.Chrome()\n # 使用selenium打开知网网页进行搜索\n driver.get(\"http://kns.cnki.net/kns/brief/default_result.aspx\")\n driver.find_element_by_xpath('//*[@id=\"Ecp_top_login\"]/a/i').click()\n driver.find_element_by_id('Ecp_TextBoxUserName').send_keys(st.CNKI_USER)\n driver.find_element_by_id('Ecp_TextBoxPwd').send_keys(st.CNKI_PASSWD)\n driver.find_element_by_id('Ecp_Button1').click()\n\n # 大量js python无法执行大量js 所以获取浏览器的cookies 用于requests\n # 记得给每个请求加上cookies\n dcookies = driver.get_cookies()\n cookies = RequestsCookieJar()\n for cookie in dcookies:\n cookies.set(cookie['name'], cookie['value'])\n\n time.sleep(3)\n driver.find_element_by_xpath('//*[@id=\"txt_1_sel\"]/option[5]').click()\n driver.find_element_by_xpath('//*[@id=\"txt_1_value1\"]').send_keys(search)\n driver.find_element_by_xpath('//*[@id=\"btnSearch\"]').click()\n time.sleep(3)\n s = quote(search)\n # 请求头信息\n self.header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',\n 'Upgrade-Insecure-Requests': '1',\n 'Host': 'kns.cnki.net',\n 'Referer': 'http://kns.cnki.net/kns/brief/default_result.aspx',\n }\n self.data = {\n 'pagename': 'ASP.brief_default_result_aspx',\n 'dbPrefix': 'SCDB',\n 'dbCatalog': '中国学术文献网络出版总库',\n 'ConfigFile': 'SCDBINDEX.xml',\n 'research': 'off',\n 't': '1532051820667',\n 'keyValue': '{}'.format(search),\n 'S': '1'\n }\n proxyHost = st.proxyHost\n proxyPort = st.proxyPort\n\n # 代理隧道验证信息\n proxyUser = st.proxyUser\n proxyPass = st.proxyPass\n\n proxyMeta = \"http://%(user)s:%(pass)s@%(host)s:%(port)s\" % {\n \"host\": proxyHost,\n \"port\": proxyPort,\n \"user\": proxyUser,\n \"pass\": proxyPass,\n }\n\n proxies = {\n \"http\": proxyMeta,\n \"https\": proxyMeta,\n }\n self.contentUrl = 'http://kns.cnki.net/KCMS/detail/detail.aspx?dbcode={}&dbname={}&' \\\n 'filename={}&uid=WEEvREcwSlJHSldTTEYzU3EycEhmbWZBTkJwa0xJQ3dMWkdpWnd' \\\n 'MWWdDRT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&' \\\n 'v=MTk1OTJUM3FUcldNMUZyQ1VSTEtmWU9kbUZ5cm5VNzNNSmlYVGJMRzRIOVhOclk5RlpvUjhlWDFMdXhZUzdEaDE='\n\n page_url = 'http://kns.cnki.net/kns/brief/brief.aspx?curpage={}&' \\\n 'RecordsPerPage=20&QueryID=1&ID=&turnpage=1&tpagemode=L&' \\\n 'dbPrefix=SCDB&Fields=&DisplayMode=listmode&PageName=ASP.brief_default_result_aspx'\n # 文章api\n api_url = 'http://kns.cnki.net/kns/brief/brief.aspx?pagename=ASP.brief_default_result_aspx&' \\\n 'dbPrefix=SCDB&dbCatalog=%e4%b8%ad%e5%9b%bd%e5%ad%a6%e6%9c%af%e6%96%87%e7%8c%ae%e' \\\n '7%bd%91%e7%bb%9c%e5%87%ba%e7%89%88%e6%80%bb%e5%ba%93&ConfigFile=SCDBINDEX.xml&' \\\n 'research=off&t=1532051820667&keyValue={}&S=1'\n\n start_urls = api_url.format(s)\n req = requests.get(start_urls, headers = self.header, data = self.data, cookies = cookies).text\n soup = BeautifulSoup(req, 'lxml')\n MaxPage = soup.select('.countPageMark')[0].get_text().split('/')[1] # 获取最大页数\n count = 0\n for i in range(1, int(MaxPage) + 1):\n print(\"正在抓取第%d页内容...\" % i)\n time.sleep(2)\n page_data = {\n 'curpage': '{}'.format(i),\n 'RecordsPerPage': '20',\n 'QueryID': '1',\n 'ID': '',\n 'turnpage': '1',\n 'tpagemode': 'L',\n 'dbPrefix': 'SCDB',\n 'Fields': '',\n 'DisplayMode': 'listmode',\n 'PageName': 'ASP.brief_default_result_aspx'\n }\n try:\n Get = requests.get(page_url.format(i), headers = self.header, data = page_data, cookies = cookies,\n proxies = proxies) # 请求翻页数据\n\n if Get.status_code in [400, 402, 404]:\n print(\"请检查ip隧道是否到期....\")\n\n # 破解验证码\n # if '请输入验证码' in Get.text:\n # print(\"正在识别验证码....\")\n # verify.getCkCode(self.header, cookies)\n # vc = verify.ydmInit()\n # if len(vc) is 5:\n # if count == 5:\n # time.sleep(10)\n # if count == 10:\n # print(\"知网已经爬虫封禁,将已下载内容保存,请检查ip隧道和打码平台是否都到期,然后重新尝试下载...\")\n # break\n # verify.botCheck(page_url.format(i)[19:], vc, self.header, cookies, i)\n # Get = requests.get(page_url.format(int(i)), headers = self.header, data = page_data,\n # cookies = cookies, proxies = proxies)\n except Exception as e:\n print(e)\n continue\n soup = BeautifulSoup(Get.text, 'lxml')\n table = soup.select('.GridTableContent tr')[1:21]\n if len(table) == 0:\n count += 1\n else:\n count = 0\n for i in table:\n try:\n filename = i.select('.fz14')[0]['href'].split('=')[4].replace('&DbName', '') # 文章页面所需要的三个参数\n dbname = i.select('.fz14')[0]['href'].split('=')[5].replace('&DbCode', '')\n dbcode = i.select('.fz14')[0]['href'].split('=')[6].replace('&yx', '')\n ContTitle = i.select('.fz14')[0].get_text() # 标题\n author = re.compile('(.*?)').findall(str(i)) # 作者\n datetime = i.select('td')[4].get_text().strip() # 时间\n datetime = self.time_exchange(datetime)\n contUrl = self.contentUrl.format(dbcode, dbname, filename) # 论文url\n dicts = self.getCont(contUrl, ContTitle, author, datetime, cookies)\n all_data.append(dicts)\n except Exception as e:\n print(e)\n continue\n\n print(\"%s关键词内容抓取完成...\" % keyword)\n driver.quit()\n\n self.save_from_data(keyword, all_data)\n file_name = self.now_time + \"_from_%s\" % keyword + \".txt\"\n with open(\"cnki/output/\" + file_name, \"w\") as f:\n f.write(json.dumps(all_data))\n\n # 抓取论文摘要数据\n def getCont(self, contUrl, ContTitle, author, datetime, cookies):\n reqs = requests.get(contUrl, headers = self.header, allow_redirects = False, cookies = cookies)\n soup = BeautifulSoup(reqs.text, 'lxml')\n try:\n abstract = soup.select('#ChDivSummary')[0].get_text() # 摘要\n except IndexError:\n abstract = None\n company = etree.HTML(reqs.text).xpath('//*[@id=\"mainArea\"]/div[3]/div[1]/div[2]/span/a/text()') # 单位\n mark = etree.HTML(reqs.text).xpath('//*[@id=\"mainArea\"]/div[3]/div[3]/div[2]/div[2]/p[1]/a/text()') # 来源\n dicts = {\n 'ContTitle': ContTitle,\n 'ContUrl': contUrl,\n 'author': \",\".join(author),\n 'mark': \",\".join(mark),\n 'datetime': datetime,\n 'abstract': abstract,\n 'company': \",\".join(company),\n }\n print(dicts)\n return dicts\n\n def time_exchange(self, t):\n try:\n if \":\" in t:\n time_ = time.strptime(t, \"%Y-%m-%d %H:%M\")\n else:\n time_ = time.strptime(t, \"%Y-%m-%d\")\n time_ = time.strftime(\"%Y%m%d\", time_)\n except:\n time_ = time.strftime(\"%Y%m%d\", time.localtime(time.time()))\n return time_\n\n def time_in(self, time_):\n start = st.START\n end = st.END\n if start == \"\" or end == \"\":\n return True\n start_time = time.strptime(start, \"%Y%m%d\")\n end_time = time.strptime(end, \"%Y%m%d\")\n c_time = time.strptime(time_, \"%Y%m%d\")\n return start_time <= c_time and end_time >= c_time\n\n def save_from_data(self, keyword, all_data):\n with open(\"cnki/output/by_from/\" + self.csv_file_name, \"a\", encoding = 'utf-8', newline = '') as csv_file:\n csv_writer = csv.writer(csv_file)\n print(\"%s有%d篇文献\" % (keyword, len(all_data)))\n for dicts in all_data:\n journal = dicts[\"mark\"]\n from_ = dicts[\"company\"]\n title = dicts[\"ContTitle\"]\n author = dicts[\"author\"]\n time_ = dicts[\"datetime\"]\n article_abstract = dicts[\"abstract\"]\n if not self.time_in(time_):\n continue\n content_link = dicts[\"ContUrl\"]\n row = [keyword, journal, author, from_, time_, title, article_abstract, content_link]\n csv_writer.writerow(row)\n\n def crawl_cnki_by_from(self):\n froms = st.FROMS\n with open(\"cnki/output/by_from/\" + self.csv_file_name, \"w\", encoding = 'utf-8', newline = '') as csv_file:\n csv_writer = csv.writer(csv_file)\n header_row = [\"keyword\", \"journal\", \"author\", \"from\", \"time\", \"title\", \"article_abstract\", \"content_link\"]\n csv_writer.writerow(header_row)\n\n for f in froms:\n self.get_data(f)\n","sub_path":"cnki/CnkiSpider/cnki_from.py","file_name":"cnki_from.py","file_ext":"py","file_size_in_byte":10566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"524807183","text":"import math\n\ndef calcula_gaussiana(x, mi, sigma):\n if (x == 0 or mi == 0) and sigma == 1/(2*math.pi):\n return 0\n\n if sigma == 1/(2*math.pi):\n return 0\n\n if sigma == 1 and x == 0 and mi == 0:\n return 0\n\n neg_hf = -0.5\n exp = neg_hf*((x-mi)/sigma)**2\n pw = 10 ** exp\n return ( 1/(sigma*math.sqrt(2*math.pi)) )*pw\n\nprint(calcula_gaussiana(0, 0, 1))\nprint(calcula_gaussiana(0, 1, 1/(2*math.pi)))","sub_path":"gaussiana/ch3_2020_09_02_20_29_44_905708.py","file_name":"ch3_2020_09_02_20_29_44_905708.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"624010446","text":"\"\"\"\n2. Написать функцию is_year_leap, принимающую 1 аргумент — год, и возвращающую True, если год високосный, и\nFalse иначе.\n\n\"\"\"\n\n\ndef is_year_leap(year_now):\n if not year_now % 400:\n return True\n if not year_now % 4:\n if not year_now % 100:\n return False\n else:\n return True\n return False\n\n\nprint(is_year_leap(int(input('Укажите год: '))))\n","sub_path":"lesson_8/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"21462488","text":"import OpenGL.GL as gl\r\nimport OpenGL.GLU as glu\r\nimport OpenGL.GLUT as glut\r\n\r\nfrom src.Core import coreInstance\r\nfrom src.Core.AssetManage.Models import Models\r\nfrom src.Core.Device.Camera import Camera\r\n\r\n\r\ndef RenderEntity(entity):\r\n position = entity.pos\r\n modelName = entity.render.modelName\r\n size = entity.render.size\r\n dimensions = entity.render.dimensions\r\n color = entity.render.color\r\n\r\n #if hasattr(entity, 'name') and entity.name == 'Ball':\r\n # print(position)\r\n gl.glLoadIdentity()\r\n gl.glTranslate(position[0], position[1], position[2])\r\n gl.glColor(color[0], color[1], color[2])\r\n gl.glScalef(dimensions[0], dimensions[1], dimensions[2])\r\n glut.glutSolidCube(size)\r\n\r\n\r\ndef Rendering(*x):\r\n gl.glClear(gl.GL_COLOR_BUFFER_BIT)\r\n gl.glLoadIdentity()\r\n\r\n camPos = Camera.position\r\n\r\n glu.gluLookAt(\r\n camPos[0], camPos[1], camPos[2],\r\n 0, 0, 0,\r\n 0, 1, 0)\r\n for modelname, value in Models.modelNameToModel.items():\r\n # set model.shader\r\n # set model.texture\r\n for entityId in coreInstance.tagToEntityIdGroup[modelname]:\r\n entity = coreInstance.entities[entityId]\r\n RenderEntity(entity)\r\n\r\n glut.glutSwapBuffers()\r\n","sub_path":"src/Core/ECS/System/sysRendering.py","file_name":"sysRendering.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"602122814","text":"from django.db.models.signals import pre_delete, post_delete\nfrom django.dispatch import receiver\nfrom .models import SourceFile, SourceTrack, StaticMix, DynamicMix\n\n@receiver(pre_delete,\n sender=SourceFile,\n dispatch_uid='delete_temp_file_signal')\ndef delete_temp_file(sender, instance, using, **kwargs):\n \"\"\"Pre-delete signal to delete temporary uploaded file on disk before deleting instance.\"\"\"\n if instance.file:\n instance.file.delete()\n\n if instance.youtube_fetch_task:\n instance.youtube_fetch_task.delete()\n\n@receiver(post_delete,\n sender=SourceTrack,\n dispatch_uid='delete_source_track_signal')\ndef delete_source_track(sender, instance, using, **kwargs):\n \"\"\"Post-delete signal to source track file on disk before deleting instance.\"\"\"\n if instance.source_file:\n instance.source_file.delete()\n\n@receiver(pre_delete,\n sender=StaticMix,\n dispatch_uid='delete_static_mix_signal')\ndef delete_static_mix(sender, instance, using, **kwargs):\n \"\"\"\n Pre-delete signal to static mix file on disk before deleting instance.\n\n Cannot be post-delete or else submitting a separation task with 'overwrite' flag does\n not work.\n \"\"\"\n if instance.file:\n instance.file.delete()\n\n@receiver(pre_delete,\n sender=DynamicMix,\n dispatch_uid='delete_dynamic_mix_signal')\ndef delete_dynamic_mix(sender, instance, using, **kwargs):\n if instance.vocals_file:\n instance.vocals_file.delete()\n if instance.other_file:\n instance.other_file.delete()\n if instance.bass_file:\n instance.bass_file.delete()\n if instance.drums_file:\n instance.drums_file.delete()\n","sub_path":"api/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"37326270","text":"\"\"\"\n====================================\ncontour(X, Y, Z) / contourf(X, Y, Z)\n====================================\n\nSee `~matplotlib.axes.Axes.contour` / `~matplotlib.axes.Axes.contourf`.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.style.use('mpl_plot_gallery')\n\n# make data\nX, Y = np.meshgrid(np.linspace(-3, 3, 256), np.linspace(-3, 3, 256))\nZ = (1 - X/2. + X**5 + Y**3) * np.exp(-X**2 - Y**2)\nZ = Z - Z.min()\nlevels = np.linspace(np.min(Z), np.max(Z), 7)\n\n# plot\nfig, ax = plt.subplots()\n\nax.contourf(X, Y, Z, levels=levels)\nax.contour(X, Y, Z, levels=levels, colors=\"white\", linewidths=0.5)\n\nplt.show()\n","sub_path":"plot_types/arrays/contourf.py","file_name":"contourf.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"388917132","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status as http_status\n\nfrom edu_api.libs.geetest import GeetestLib\nfrom user.utils import get_user_by_account\n\npc_geetest_id = \"1ea3ed8b35299a931b6a3883ec4a05be\"\npc_geetest_key = \"9a13879615c1ae2500e356417cd5bcf9\"\n\nclass CaptchaAPIView(APIView):\n \"\"\"\n 滑块验证码\n \"\"\"\n user_id = 0\n status = False\n\n # pc端获取验证码的方法\n def get(self,request,*args,**kwargs):\n username = request.query_params.get('username')\n user = get_user_by_account(username)\n\n if user is None:\n return Response({'message':'用户不存在'},status=http_status.HTTP_400_BAD_REQUEST)\n\n self.user_id = user.id\n\n # 验证码的实例化对象\n gt = GeetestLib(pc_geetest_id, pc_geetest_key)\n self.status = gt.pre_process(self.user_id)\n\n response_str = gt.get_response_str()\n return Response(response_str)\n\n # pc端基于前后端分离校验验证码\n def post(self, request, *args, **kwargs):\n \"\"\"验证验证码\"\"\"\n gt = GeetestLib(pc_geetest_id, pc_geetest_key)\n challenge = request.POST.get(gt.FN_CHALLENGE, '')\n validate = request.POST.get(gt.FN_VALIDATE, '')\n seccode = request.POST.get(gt.FN_SECCODE, '')\n if self.user_id:\n result = gt.success_validate(challenge, validate, seccode, self.user_id)\n else:\n result = gt.failback_validate(challenge, validate, seccode)\n result = {\"status\": \"success\"} if result else {\"status\": \"fail\"}\n return Response(result)","sub_path":"edu_api/edu_api/apps/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"100081426","text":"# coding: utf-8\n#########################################################################\n# 网站: 疯狂Java联盟 #\n# author yeeku.H.lee kongyeeku@163.com #\n# #\n# version 1.0 #\n# #\n# Copyright (C), 2001-2018, yeeku.H.Lee #\n# #\n# This program is protected by copyright laws. #\n# #\n# Program Name: #\n# #\n#
Date: #\n#########################################################################\nfrom tkinter import *\n# 导入ttk\nfrom tkinter import ttk\nclass App:\n def __init__(self, master):\n self.master = master\n self.initWidgets()\n def initWidgets(self):\n # 创建Labelframe容器\n self.lf = ttk.Labelframe(self.master, padding=20)\n self.lf.pack(fill=BOTH, expand=YES, padx=10, pady=10)\n # 创建一个显示图片的Label\n bm = PhotoImage(file='images/z.png')\n lb = Label(self.lf, image=bm)\n lb.bm = bm\n # 将Labelframe的标题设为显示图片的Label\n self.lf['labelwidget'] = lb\n # 定义代表Labelframe的标题位置的12个常量\n self.books = ['e', 's', 'w', 'n', 'es', 'ws', 'en', 'wn',\n 'ne', 'nw', 'se', 'sw']\n i = 0\n self.intVar = IntVar()\n # 使用循环创建多个Radiobutton,并放入Labelframe中\n for book in self.books:\n Radiobutton(self.lf, text= book,\n value=i,\n command=self.change,\n variable=self.intVar).pack(side=LEFT)\n i += 1\n self.intVar.set(9)\n def change(self):\n # 通过labelanchor选项改变Labelframe的标题的位置\n self.lf['labelanchor'] = self.books[self.intVar.get()]\nroot = Tk()\nroot.title(\"Labelframe测试\")\n# 改变窗口图标\nroot.iconbitmap('images/fklogo.ico')\nApp(root)\nroot.mainloop()\n","sub_path":"官方配套代码/11/11.5/Labelframe_test2.py","file_name":"Labelframe_test2.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"203963000","text":"import itertools\nfrom jinja2 import Environment, FileSystemLoader\nimport os\nimport shutil\nimport sys\nfrom helper import *\n\nDIST = \"dist\"\nREPO = \"webplates/php\"\n\nVERSIONS = [\"5.5.38\", \"5.6.24\", \"7.0.9\"]\nBUILDS = [\"fpm\"]\nDISTROS = [\"alpine\"]\n\nEXCLUSIONS = set(itertools.product(VERSIONS, [\"apache\"], [\"alpine\"]))\n\nMATRIX = set(itertools.filterfalse(lambda x: x in EXCLUSIONS, itertools.chain(\n itertools.product(VERSIONS, BUILDS, [None]),\n itertools.product(VERSIONS, [None], DISTROS),\n itertools.product(VERSIONS, [None], [None]),\n itertools.product(VERSIONS, BUILDS, DISTROS)\n)))\n\nNODE = [\"6.3.1\"]\n\n# Prepare Jinja\nenv = Environment(loader=FileSystemLoader(os.path.dirname(os.path.realpath(__file__))))\n\n# Clear the dist folder\nif os.path.isdir(DIST):\n shutil.rmtree(DIST, ignore_errors=True)\nos.mkdir(DIST)\n\npaths = []\ntags = []\n\n# Build node containers\ntemplate = env.get_template('Dockerfile-node.template')\n\nfor element in MATRIX:\n for version in NODE:\n docker = template.render(parent=matrix_join(element, \"-\"), version=version, distro=element[2])\n path = DIST + \"/\" + matrix_join((minorize(element[0]),) + element[1:] + (\"node\", minorize(version)), \"/\")\n dockerfile = path + \"/Dockerfile\"\n os.makedirs(path, exist_ok=True)\n with open(dockerfile, \"w\") as f:\n f.write(docker)\n paths.append(path)\n tags.append(set(get_tags(element, itertools.product([\"node\"], [majorize(version), minorize(version), version]))))\n\nwith open(\".auth\", \"r\") as f:\n token = f.readline().rstrip()\n\ndelete_builds(REPO, token)\nadd_builds(REPO, token, paths, tags)\n\nFORMAT = \"%-35s %s\"\nprint (FORMAT % (\"PATH\", \"TAG\"))\n\nfor c1, c2 in zip(paths, tags):\n for tag in c2:\n print (\"%-35s %s\" % (c1, tag))\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"232711904","text":"\"\"\"\nProvides the moliorweb websocket message handler.\n\"\"\"\n\nimport logging\nimport asyncio\nimport json\nfrom pathlib import Path\n\nfrom .app import app\nfrom .messagetypes import Subject, Event, Action\nfrom .livelogger import LiveLogger\n\nBUILD_OUT_PATH = Path(\"/var/lib/molior/buildout\")\nlogger = logging.getLogger(\"molior-web\") # pylint: disable=invalid-name\n\n\nasync def start_livelogger(websocket, data):\n \"\"\"\n Starts the livelogger for the given\n websocket client.\n\n Args:\n websocket: The websocket instance.\n data (dict): The received data.\n \"\"\"\n if \"build_id\" not in data:\n return False\n\n path = BUILD_OUT_PATH / str(data.get(\"build_id\")) / \"build.log\"\n llogger = LiveLogger(websocket.send_str, path)\n\n if hasattr(websocket, \"logger\") and websocket.logger:\n await stop_livelogger(websocket, data)\n\n websocket.logger = llogger\n loop = asyncio.get_event_loop()\n loop.create_task(llogger.start())\n logger.debug(\"new logger task created for '%s'\", str(path))\n\n\nasync def stop_livelogger(websocket, _):\n \"\"\"\n Stops the livelogger.\n \"\"\"\n if hasattr(websocket, \"logger\") and websocket.logger:\n await websocket.logger.stop()\n\n\nasync def dispatch(websocket, message):\n \"\"\"\n Dispatchers websocket requests to different\n handler functions.\n\n Args:\n websocket: The websocket instance.\n message (dict): The received message dict.\n\n Returns:\n bool: True if successful, False otherwise.\n \"\"\"\n handlers = {\n Subject.livelog.value: {\n Action.start.value: start_livelogger,\n Action.stop.value: stop_livelogger,\n }\n }\n\n if \"subject\" not in message or \"action\" not in message:\n return False\n\n handler = handlers.get(message.get(\"subject\")).get(message.get(\"action\"))\n await handler(websocket, message.get(\"data\"))\n return True\n\n\n@app.websocket_connect()\nasync def websocket_connected(websocket):\n \"\"\"\n Sends a `success` message to the websocket client\n on connect.\n \"\"\"\n if asyncio.iscoroutinefunction(websocket.send_str):\n await websocket.send_str(json.dumps({\"subject\": Subject.websocket.value, \"event\": Event.connected.value}))\n else:\n websocket.send_str(json.dumps({\"subject\": Subject.websocket.value, \"event\": Event.connected.value}))\n\n logger.info(\"new authenticated connection, user: %s\", websocket.cirrina.web_session.get(\"username\"))\n\n\n@app.websocket_message(\"/api/websocket\")\nasync def websocket_message(websocket, msg):\n \"\"\"\n On websocket message handler.\n \"\"\"\n logger.debug(\"message received from user '%s'\", websocket.cirrina.web_session.get(\"username\"))\n try:\n data = json.loads(msg)\n logger.debug(\"received data %s\", str(data))\n except json.decoder.JSONDecodeError:\n logger.error(\n \"cannot parse websocket message from user '%s'\",\n websocket.cirrina.web_session.get(\"username\"),\n )\n\n await dispatch(websocket, data)\n\n\n@app.websocket_disconnect()\nasync def websocket_closed(_):\n \"\"\"\n On websocket disconnect handler.\n \"\"\"\n logger.debug(\"websocket connection closed\")\n","sub_path":"molior/api/websocket.py","file_name":"websocket.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"91010552","text":"from json import loads, dumps\nfrom winload import Winload\nfrom linload import Linload\nfrom network import Network\nfrom sys import platform\n\n\nclass LocalController:\n def __init__(self):\n self.platform = ''\n self.data = {}\n self.dataGetter = None\n self.networkWorker = None\n self.config = {}\n\n def setup(self):\n if platform.startswith('linux'):\n self.platform = 'linux'\n self.dataGetter = Linload()\n elif platform.startswith('win'):\n self.platform = 'win'\n self.dataGetter = Winload()\n\n self.config = self.loadJsonFile('./data/config.json')\n self.networkWorker = Network(self.config)\n\n def loadJsonFile(self, filename):\n with open(filename) as json_file:\n data = loads(json_file.read())\n return data\n\n def saveJsonFile(self, filename, variable):\n with open(filename, \"w\") as json_file:\n data = json_file.write(dumps(variable, indent=4))\n return data\n\n def main(self):\n self.setup()\n self.networkWorker.startHeartBeat()\n self.data = self.dataGetter.main()\n self.saveJsonFile('./data/data.json', self.data)\n self.networkWorker.sendFile()\n\n\nif __name__ == '__main__':\n LocalController().main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"206590275","text":"\"\"\"\nRetrieve a list of reviewers for given locales and timeframe with a number of\nsuggestions they approved or rejected.\nSelf-reviewed translations are excluded from calculation.\n\nOutput is formatted as CSV with the following columns:\n* Locale\n* User\n* Number of Approved Suggestions\n* Number of Rejected Suggestions\n\nRun the script in Pontoon's Django shell, e.g.:\nheroku run --app mozilla-pontoon ./manage.py shell\n\"\"\"\n\n# Configuration\n# Use empty list for all locales\nLOCALES = [\n 'it', 'ja', 'pl', 'ru', 'zh-CN',\n]\nSTART_DATE = '18/12/2018' # DD/MM/YYYY\nEND_DATE = '18/12/2019' # DD/MM/YYYY\n\n\n# Script\nfrom datetime import datetime\nfrom django.contrib.auth.models import User\nfrom django.db.models import F\nfrom django.utils.timezone import get_current_timezone\nfrom pontoon.base.models import Locale, Translation\n\nlocales = Locale.objects.all()\nif LOCALES:\n locales = Locale.objects.filter(code__in=LOCALES)\n\ntz = get_current_timezone()\nstart_date = tz.localize(datetime.strptime(START_DATE, '%d/%m/%Y'))\nend_date = tz.localize(datetime.strptime(END_DATE, '%d/%m/%Y'))\n\noutput = []\noutput.append(\n 'Locale,User,Role,Number of Approved Suggestions,Number of Rejected Suggestions')\n\nfor locale in locales:\n users = {}\n # Translations submitted in Pontoon for given locale and timeframe\n translations = Translation.objects.filter(\n locale=locale,\n date__gte=start_date,\n date__lte=end_date,\n )\n # Above translations that have been approved, but not self-approved\n approved = (\n translations\n .filter(approved_user__isnull=False)\n .exclude(user=F('approved_user'))\n )\n approved_users = User.objects.filter(\n pk__in=approved.values_list('approved_user', flat=True).distinct()\n )\n for user in approved_users:\n users[user.email] = {\n 'role': user.role(),\n 'approved': approved.filter(approved_user=user).count(),\n 'rejected': 0,\n }\n # Above translations that have been rejected, but not self-rejected\n rejected = (\n translations\n .filter(rejected_user__isnull=False)\n .exclude(user=F('rejected_user'))\n )\n rejected_users = User.objects.filter(\n pk__in=rejected.values_list('rejected_user', flat=True).distinct()\n )\n for user in rejected_users:\n if user.email in users:\n users[user.email]['rejected'] = rejected.filter(\n rejected_user=user).count()\n else:\n users[user.email] = {\n 'role': user.role(),\n 'approved': 0,\n 'rejected': rejected.filter(rejected_user=user).count(),\n }\n for email, stats in users.items():\n output.append('{},{},{},{},{}'.format(\n locale.code,\n email,\n stats['role'],\n stats['approved'],\n stats['rejected'],\n ))\n\nprint('\\n'.join(output))\n","sub_path":"pontoon/list_reviewers.py","file_name":"list_reviewers.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"107974716","text":"\"\"\"\n Software: Reader\n Description:\n Author: Team Cronos\n Data: 27/10/2019\n Version: 0.0.1\n\"\"\"\n\nimport sys\nimport os\nimport subprocess\nfrom subprocess import Popen, PIPE\nimport threading\n\n\nclass Reader(object):\n\n def __init__(self):\n pass\n\n def run(self):\n\n args = [\n \"/home/fasr/Developer/Cronos/checkpoint/app/collector/bin/impinj.jar\",\n \"192.168.0.99\",\n ]\n\n process = subprocess.Popen(\n [\"java\", \"-jar\"] + list(args),\n encoding=\"utf-8\",\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n def writeall(process):\n while True:\n # print(\"read data: \")\n data = process.stdout.readline()\n if data:\n print(data)\n process.stdout.flush()\n\n writer = threading.Thread(target=writeall, args=(process,))\n writer.start()\n\n try:\n while True:\n d = input()\n if not d:\n break\n self._write(process, d)\n\n except EOFError:\n pass\n\n def _write(self, process, message):\n process.stdin.write(message + \"\\n\")\n process.stdin.flush()\n\n\nshell = Reader()\nshell.run()\n","sub_path":"app/tests/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"404194904","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport json\nimport argparse\nimport tempfile\nimport shutil\nimport os\n\nimport random\nimport hashlib\nimport datetime\nimport docker\nimport logging\nimport tarfile\nimport cStringIO\n\n# Module level docker connection\n# TODO: This could be made configurable later\nDOCKER_CLIENT = docker.Client(\n base_url='unix://var/run/docker.sock',\n timeout=240)\n\n# Module level logger\nLOG = logging.getLogger()\n_HANDLER = logging.StreamHandler()\n_FORMATTER = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n_HANDLER.setFormatter(_FORMATTER)\nLOG.addHandler(_HANDLER)\nLOG.setLevel(logging.DEBUG)\n\nclass Chdir:\n \"\"\" Context manager for changing the current working directory \"\"\"\n def __init__(self, newPath):\n self.newPath = os.path.expanduser(newPath)\n\n def __enter__(self):\n self.savedPath = os.getcwd()\n os.chdir(self.newPath)\n\n def __exit__(self, etype, value, traceback):\n os.chdir(self.savedPath)\n\ndef _read_layers(layers, image_id):\n \"\"\" Reads the JSON metadata for specified layer / image id \"\"\"\n\n layer = DOCKER_CLIENT.inspect_image(image_id)\n layers.append(layer['Id'])\n\n if 'Parent' in layer and layer['Parent']:\n _read_layers(layers, layer['Parent'])\n\ndef _save_image(image_id, tar_file):\n \"\"\" Saves the image as a tar archive under specified name \"\"\"\n\n LOG.debug(\"Saving image %s to %s file...\" % (image_id, tar_file))\n\n image = DOCKER_CLIENT.get_image(image_id)\n\n with open(tar_file, 'w') as f:\n f.write(image.data)\n\n LOG.debug(\"Image saved!\")\n\ndef _unpack(tar_file, directory):\n \"\"\" Unpacks tar archive to selected directory \"\"\"\n\n LOG.debug(\"Unpacking %s tar file to %s directory\" % (tar_file, directory))\n\n with tarfile.open(tar_file, 'r') as tar:\n tar.extractall(path=directory)\n\n LOG.debug(\"Archive unpacked!\")\n\ndef _move_unmodified_layers(layers, squash_id, src, dest):\n \"\"\"\n This moves all the layers that should be copied as-is.\n In other words - all layers that are not meant to be squashed will be\n moved from the old image to the new image untouched.\n \"\"\"\n for layer in layers:\n LOG.debug(\"Moving umnodified layer %s...\" % layer)\n shutil.move(os.path.join(src, layer), dest)\n if layer == squash_id:\n # Stop if we are at the first layer that was squashed\n return\n\ndef _files_to_skip(to_squash, old_image_dir):\n to_skip = []\n\n LOG.debug(\"Searching for marker files...\")\n\n for layer_id in to_squash:\n layer_tar = os.path.join(old_image_dir, layer_id, \"layer.tar\")\n\n LOG.debug(\"Searching for marker files in '%s' archive...\" % layer_tar)\n\n with tarfile.open(layer_tar, 'r') as tar:\n for member in tar.getmembers():\n if '.wh.' in member.name:\n LOG.debug(\"Found '%s' marker file\" % member.name)\n to_skip.append(member.name)\n to_skip.append(member.name.replace('.wh.', ''))\n\n if to_skip:\n LOG.debug(\"Following files were found: %s\" % \" \".join(to_skip))\n\n return to_skip\n\ndef _generate_target_json(old_image_id, new_image_id, squash_id, squashed_dir):\n json_file = os.path.join(squashed_dir, \"json\")\n squashed_tar = os.path.join(squashed_dir, \"layer.tar\")\n # Read the original metadata\n metadata = DOCKER_CLIENT.inspect_image(old_image_id)\n\n # Update the fields\n metadata['Id'] = new_image_id\n metadata['Parent'] = squash_id\n metadata['Config']['Image'] = squash_id\n metadata['Created'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n metadata['Size'] = os.path.getsize(squashed_tar)\n\n # Remove unnecessary fields\n del metadata['ContainerConfig']\n del metadata['Container']\n del metadata['Config']['Hostname']\n\n with open(json_file, 'w') as f:\n json.dump(metadata, f)\n\ndef _generate_repositories_json(repositories_file, new_image_id, tag):\n if ':' in tag:\n name, tag = tag.split(':')\n else:\n name = tag\n tag = \"latest\"\n\n repos = {}\n repos[name] = {}\n repos[name][tag] = new_image_id\n\n with open(repositories_file, 'w') as f:\n json.dump(repos, f)\n\ndef _load_image(directory):\n c = cStringIO.StringIO()\n\n with tarfile.open(mode='w', fileobj=c) as tar:\n LOG.debug(\"Generating tar archive for the squashed image...\")\n with Chdir(directory):\n tar.add(\".\")\n LOG.debug(\"Archive generated\")\n\n LOG.debug(\"Uploading image...\")\n DOCKER_CLIENT.load_image(c.getvalue())\n LOG.debug(\"Image uploaded\")\n\n c.close()\n\ndef _layers_to_squash(layers, from_layer):\n \"\"\" Prepares a list of layer IDs that should be squashed \"\"\"\n to_squash = []\n\n for l in reversed(layers):\n if l == from_layer:\n break\n\n to_squash.append(l)\n\n to_squash.reverse()\n\n return to_squash\n\ndef _prepare_tmp_directory(provided_tmp_dir):\n \"\"\" Creates temporary directory that is used to work on layers \"\"\"\n if provided_tmp_dir:\n if os.path.exists(provided_tmp_dir):\n LOG.error(\"The '%s' directory already exists, please remove it before you proceed, aborting.\" % provided_tmp_dir)\n sys.exit(1)\n os.makedirs(provided_tmp_dir)\n return provided_tmp_dir\n else:\n return tempfile.mkdtemp(prefix=\"tmp-docker-squash-\")\n\ndef _squash_layers(layers_to_squash, squashed_tar_file, old_image_dir):\n\n # Find all files that should be skipped\n #\n # TODO: we probably should do it for current layer and\n # apply only on the previous layer\n to_skip = _files_to_skip(layers_to_squash, old_image_dir)\n\n LOG.debug(\"Starting squashing...\")\n\n with tarfile.open(squashed_tar_file, 'w') as squashed_tar:\n\n for layer_id in layers_to_squash:\n layer_tar_file = os.path.join(old_image_dir, layer_id, \"layer.tar\")\n\n LOG.debug(\"Squashing layer %s...\" % layer_id)\n\n # Open the exiting layer to squash\n with tarfile.open(layer_tar_file, 'r') as layer_tar:\n\n # Copy all the files to the new tar\n for member in layer_tar.getmembers():\n if not member.name in to_skip:\n # Special case: symlinks\n if member.issym():\n squashed_tar.addfile(member)\n else:\n squashed_tar.addfile(member, layer_tar.extractfile(member))\n else:\n LOG.debug(\"Skipping '%s' file because it's on the list to skip files\" % member.name)\n\n LOG.debug(\"Squashing done!\")\n\ndef main(args):\n\n # The image id or name of the image to be squashed\n try:\n old_image_id = DOCKER_CLIENT.inspect_image(args.image)['Id']\n except:\n LOG.error(\"Could not get the image ID to squash, please check provided 'image' argument: %s\" % args.image)\n sys.exit(1)\n\n # The id or name of the layer/image that the squashing should begin from\n # This layer WILL NOT be squashed, but all next layers will\n try:\n squash_id = DOCKER_CLIENT.inspect_image(args.layer)['Id']\n except:\n LOG.error(\"Could not get the layer ID to squash, please check provided 'layer' argument: %s\" % args.layer)\n sys.exit(1)\n\n old_layers = []\n\n # Read all layers in the image\n _read_layers(old_layers, old_image_id)\n\n old_layers.reverse()\n\n LOG.info(\"Old image has %s layers\", len(old_layers))\n LOG.debug(\"Old layers: %s\", old_layers)\n\n if not squash_id in old_layers:\n LOG.error(\"Couldn't find the provided layer (%s) in the %s image\" % (args.layer, args.image))\n sys.exit(1)\n\n # Find the layers to squash\n layers_to_squash = _layers_to_squash(old_layers, squash_id)\n\n LOG.info(\"We have %s layers to squash\", len(layers_to_squash))\n LOG.debug(\"Layers to squash: %s\", layers_to_squash)\n\n if len(layers_to_squash) == 0:\n LOG.error(\"There are no layers to squash, aborting\")\n sys.exit(1)\n\n # Prepare temporary directory where all the work will be executed\n tmp_dir = _prepare_tmp_directory(args.tmp_dir)\n\n # Location of the tar with the old image\n old_image_tar = os.path.join(tmp_dir, \"image.tar\")\n\n # Save the image in tar format in the tepmorary directory\n _save_image(old_image_id, old_image_tar)\n\n # Directory where the old layers will be unpacked\n old_image_dir = os.path.join(tmp_dir, \"old\")\n os.makedirs(old_image_dir)\n\n # Unpack the image\n LOG.info(\"Unpacking exported tar (%s)...\" % old_image_tar)\n _unpack(old_image_tar, old_image_dir)\n\n # Remove the tar file early to save some space\n LOG.info(\"Removing exported tar (%s)...\" % old_image_tar)\n os.remove(old_image_tar)\n\n # Directory where the new layers will be unpacked in prepareation to\n # import it to Docker\n new_image_dir = os.path.join(tmp_dir, \"new\")\n os.makedirs(new_image_dir)\n\n # Generate a new image id for the squashed layer\n new_image_id = hashlib.sha256(str(random.getrandbits(128))).hexdigest()\n\n LOG.info(\"New layer ID for squashed content will be: %s\" % new_image_id)\n\n # Prepare a directory for squashed layer content\n squashed_dir = os.path.join(new_image_dir, new_image_id)\n os.makedirs(squashed_dir)\n\n # Location of the tar archive with the squashed layers\n squashed_tar = os.path.join(squashed_dir, \"layer.tar\")\n\n # Append all the layers on each other\n _squash_layers(layers_to_squash, squashed_tar, old_image_dir)\n\n # Move all the layers that should be untouched\n _move_unmodified_layers(old_layers, squash_id, old_image_dir, new_image_dir)\n\n # Generate the metadata JSON based on the original one\n _generate_target_json(old_image_id, new_image_id, squash_id, squashed_dir)\n\n # Generate the metadata JSON with information about the images\n _generate_repositories_json(os.path.join(new_image_dir, \"repositories\"), new_image_id, args.tag)\n\n # And finally tar everything up and load into Docker\n _load_image(new_image_dir)\n\n # Cleanup the temporary directory\n shutil.rmtree(tmp_dir)\n\nif __name__ == \"__main__\":\n PARSER = argparse.ArgumentParser(description='Squashes all layers in the image from the layer specified as \"layer\" argument.')\n PARSER.add_argument('image', help='Image to be squashed')\n PARSER.add_argument('layer', help='ID of the layer or image ID or image name')\n PARSER.add_argument('tag', help='Specify the tag to be used for the new image')\n PARSER.add_argument('-t', '--tmp-dir', help='Temporary directory to be used')\n ARGS = PARSER.parse_args()\n\n main(ARGS)\n","sub_path":"squash/squash.py","file_name":"squash.py","file_ext":"py","file_size_in_byte":10002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"632459135","text":"import torch\nimport copy\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport networkFiles as NF\nimport numpy as np\nimport logging\n\n# Module ToDo List\n#\tDo we want to add weight decay as a hyperparameter?\n#\tWhat is a sensible value for the weight_decay\n#\tAdd function to generate different types of masks\n\n\ndef generateDictionary_Hyperopt(N_models, model_type, layers, input_size, hidden_size, image_size, loss_fn, dtype):\n\t\n\t# This is the parameters for the distribution of path lengths passed to generate samples\n\t# n is the longest path we want to generate uniformly over\n\n\t# Set up model dictionaries with meta entires that stores key properties of the model\n\tmodelBlock = {\"Meta\": {\"Model_Type\": model_type, \"Loss_Function\": loss_fn, \"Layers\": layers, \n\t\t\"Epochs_Trained\": 0, \"Type\": dtype, \"N\": image_size}}\n\tresultBlock = {}\n\n\t# Generate a vector of hyperparameters for the number of models\n\tlr_vec = np.power(10, (np.random.uniform(-2.5, -5, N_models)))\n\t#weight_decay_vec = np.power(10, (np.random.uniform(-2.5, -5, N_models)))\n\tbatch_size_vec = np.around(np.random.uniform(32, 256, N_models))\n\tbatch_size_vec = batch_size_vec.astype(int)\n\n\tfor i in range(N_models):\n\t\tmodelBlock[i] = {}\n\t\tmodelInit(modelBlock, model_type, i, input_size, hidden_size, layers, image_size)\n\t\tmodelBlock[i][\"Model\"].type(dtype)\n\t\tmodelBlock[i][\"Learning\"] = lr_vec[i]\n\t\tmodelBlock[i][\"Batch\"] = np.asscalar(batch_size_vec[i])\n\t\tmodelBlock[i][\"Weight_Decay\"] = 0.0 #np.asscalar(weight_decay_vec[i])\n\t\tmodelBlock[i][\"Optimizer\"] = optim.Adam(modelBlock[i][\"Model\"].parameters(), \n\t\t\tlr = modelBlock[i][\"Learning\"], weight_decay = modelBlock[i][\"Weight_Decay\"])\n\t\tmodelBlock[i][\"Loss\"] = 100.0\n\t\tmodelBlock[i][\"Accuracy\"] = 1.0\n\n\t\tresultBlock[\"Meta\"] = {\"Total_Epochs\": 0}\n\t\tresultBlock[i] = {\"Hyperparameter\":{}}\n\t\tresultBlock[i][\"Hyperparameter\"][\"Learning\"] = lr_vec[i]\n\t\tresultBlock[i][\"Hyperparameter\"][\"Batch\"] = np.asscalar(batch_size_vec[i])\n\t\tresultBlock[i][\"Hyperparameter\"][\"Weight_Decay\"] = np.asscalar(batch_size_vec[i])\n\t\tresultBlock[i][\"Hyperparameter\"][\"Max_Epoch\"] = 0\n\n\n\treturn modelBlock, resultBlock\n\n\ndef generateDictionary_Exp(N_models, model_type, layers, input_size, hidden_size, image_size, loss_fn, dtype, hyperparameter):\n\t\n\tmodelBlock = {}\n\tresultBlock = {}\n\n\t# This is the parameters for the distribution of path lengths passed to generate samples\n\t# n is the longest path we want to generate uniformly over\n\tn = 25\n\tdistribution = np.ones(n)/n\n\n\tmodelBlock[\"Meta\"] = {\"Model_Type\": model_type, \"Epochs_Trained\": 0, \n\t\t\"Type\": dtype, \"N\": image_size, \"Distribution\": distribution, \"Layers\": layers, \"Loss_Function\": loss_fn,\n\t\t\"Input\": input_size, \"Hidden\": hidden_size}\n\n\n\n\tlr = hyperparameter[model_type][layers][\"Learning\"]\n\tbatch_size = hyperparameter[model_type][layers][\"Batch\"]\n\tweight_decay = hyperparameter[model_type][layers][\"Weight_Decay\"]\n\n\tmodelBlock[\"Meta\"][\"Learning\"] = lr\n\tmodelBlock[\"Meta\"][\"Batch\"] = batch_size\n\tmodelBlock[\"Meta\"][\"Weight_Decay\"] = weight_decay\n\n\tfor i in range(N_models):\n\t\tmodelBlock[i] = {}\n\t\t# Note that here we need to pass i in rather than layers\n\t\tmodelInit(modelBlock, model_type, i, input_size, hidden_size, layers, image_size)\n\t\tmodelBlock[i][\"Model\"].type(dtype)\n\t\tmodelBlock[i][\"Learning\"] = lr\n\t\tmodelBlock[i][\"Batch\"] = batch_size\n\t\tmodelBlock[i][\"Optimizer\"] = optim.Adam(modelBlock[i][\"Model\"].parameters(), lr = lr, weight_decay = weight_decay)\n\t\tmodelBlock[i][\"Loss\"] = 100.0\n\t\tmodelBlock[i][\"Accuracy\"] = 1.0\n\n\t\tresultBlock[i] = {}\n\n\t# Then in the actual code, results get saved as resultBlock[layer][model id][epoch] = {Dictionary of results}\n\n\treturn modelBlock, resultBlock\n\n\ndef modelInit(modelBlock, model_type, key, input_size, hidden_size, layers, image_size):\n\tlogger = logging.getLogger(__name__)\n\tif (model_type == \"FixedWeights\"):\n\t\tnum_nodes = image_size**2\n\t\tmodelBlock[key][\"Model\"] = NF.PropagationOnly_FixedWeights(num_nodes, layers, num_nodes*5, image_size)\n\telif(model_type == \"SharedPixel\"):\n\t\tnum_nodes = image_size**2\n\t\tmodelBlock[key][\"Model\"] = NF.PropagationOnly_SharedPixel(num_nodes, layers, num_nodes*5, image_size)\n\telse:\n\t\tlogger.warning('Model type not recognized')\n\n\ndef convertStateDict(modelBlock):\n\n\t# The deep copy is important here. If not done, we end up modifying the original modelBlock\n\tmodelBlock_State = copy.deepcopy(modelBlock)\n\n\tfor key, val in modelBlock.items():\n\t\tif (key != \"Meta\"):\n\t\t\tmodel = modelBlock[key][\"Model\"].state_dict()\n\t\t\toptimizer = modelBlock[key][\"Optimizer\"].state_dict()\n\t\t\tmodelBlock_State[key][\"Model\"] = model\n\t\t\tmodelBlock_State[key][\"Optimizer\"] = optimizer\n\n\treturn modelBlock_State\n\n\ndef loadStateDict(modelBlock_State):\n\n\tmodelBlock = copy.deepcopy(modelBlock_State)\n\tmodel_type = modelBlock['Meta']['Model_Type']\n\tinput_size = modelBlock['Meta']['Input']\n\thidden_size = modelBlock['Meta']['Hidden']\n\tlayers = modelBlock['Meta']['Layers']\n\timage_size = modelBlock['Meta']['N']\n\n\t#lr = modelBlock['Meta']['Learning']\n\t#weight_decay = modelBlock['Meta']['Weight_Decay']\n\n\tfor key, val in modelBlock.items():\n\t\tif (key != \"Meta\"):\n\t\t\tmodelInit(modelBlock, model_type, key, input_size, hidden_size, layers, image_size)\n\t\t\t#modelBlock[key][\"Optimizer\"] = optim.Adam(modelBlock[key][\"Model\"].parameters(), lr = lr, weight_decay = weight_decay)\n\t\t\tmodelBlock[key][\"Model\"].load_state_dict(modelBlock_State[key][\"Model\"])\n\t\t\t#modelBlock[key][\"Optimizer\"].load_state_dict(modelBlock_State[key][\"Optimizer\"])\n\n\treturn modelBlock\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Propagation/generateDictionary.py","file_name":"generateDictionary.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"463085974","text":"import numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n# first five motivational quotes, next 5 benifits of eating fruits, next 5 views on dogs\ntext = [\"Each time we face our fear, we gain strength,courage, and confidence in the doing.\",\n \"If you hear a voice within you say, you cannot paint, then by all means paint, and what voice will be silenced\",\n \"Don't be satisfied with stories, how things have gone with others, unfold your own myth\",\n \"you wouldn't worry so much about what others think of you if you realized how seldom they do\",\n \"It is confidence in our bodies, minds, and spirits that allows us to keep looking for new adventures\",\n \"Eating a diet rich in vegetables and fruits as part of an overall healthy diet may reduce risk of heart disease.\",\n \"A fruit cointaining eating pattern is part of overall healthy diet and may protect aganist certain cancers.\",\n \"Eating a diet rich in fruit may reduce risk for stroke, other cardiovascular diseases and type-2 diabetes.\",\n \"Vitamin C in clementine prevents premature aging by fighting against free radicals and promoting collagen production\",\n \"Two phytonutrients in honeydew melon, lutein, and zeaxanthin, are essential for maintaining eye health as we get older.\",\n \" dogs one of the cutest animals on earth (even the bitey ones) and one of the most beautiful.\",\n \"Dogs don’t just fill your heart; they actually make it stronger.\",\n \"Research has repeatedly found that daily dog walks help you lose weight, since they force you to into moderate physical activity for 10, 20, and even 30 minutes at a time.\",\n \"There’s a reason therapy dogs are so effective\",\n \"If you’re over 65 and own a pet, odds are you seek medical help about 30 percent less often than people who don’t have a pet\",\n ]\n\nvectorizer = CountVectorizer()\n\n# tokenize and build the vocabulary\nvectorizer.fit(text)\n\n\n# encode the document\nvector = vectorizer.transform(text)\n\ncountVector = vector.toarray()\n\ndef kmeans(x):\n n = countVector.shape[1]\n # initialize centroids randomly\n centroid_1 = np.random.rand(1,n)\n centroid_2 = np.random.rand(1, n)\n centroid_3 = np.random.rand(1, n)\n\n labels = None\n\n i = 0\n\n while i<100:\n #compute the distance of each pt from the centroids\n distance_1 = np.sum(np.square(x-centroid_1), keepdims=True, axis=1)\n distance_2 = np.sum(np.square(x - centroid_2), keepdims=True, axis=1)\n distance_3 = np.sum(np.square(x - centroid_3), keepdims=True, axis=1)\n\n labels = np.argmin(np.hstack([distance_1, distance_2, distance_3]), axis=1)\n\n class_1 = x[labels==0, :]\n class_2 = x[labels==1, :]\n class_3 = x[labels==2, :]\n\n #update centroids\n if class_1.shape[0]>0:\n centroid_1 = np.mean(class_1, axis=0)\n\n if class_2.shape[0]>0:\n centroid_2 = np.mean(class_2, axis=0)\n\n if class_3.shape[0]>0:\n centroid_3 = np.mean(class_3, axis=0)\n\n i += 1\n\n return labels\n\n\nprint(kmeans(countVector))\n\n\n\n\n\n\n","sub_path":"preprocessing/countvectorizer.py","file_name":"countvectorizer.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"368168012","text":"# 크기가 N X N\n# 도시는 1 X 1\n# 치킨거리는 집과 가장 가까운 치킨집 사이의 거리\n# 도시의 치킨거리는 모든 집의 치킨 거리의 합\n# d = |r1 - r2| + |c1 - c2|\n# 0 : 빈칸, 1 : 집, 2 : 치킨집\n\nn, m = map(int, input().split())\nmap = [list(map(int, input().split())) for _ in range(n)]\n\ncities = []\nchickens = []\n\n\n# 도시와 치킨집을 따로 추출\nfor i in range(n):\n for j in range(n):\n if map[i][j] == 0:\n continue\n elif map[i][j] == 1:\n cities.append([i, j])\n elif map[i][j] == 2:\n chickens.append([i, j])\n\n# print(\"cities\", cities)\n# print(\"chickens\", chickens)\n\n# 치킨거리 추출\ndistances = []\nfor chicken in chickens:\n for city in cities:\n tmp = abs(chicken[0] - city[0]) + abs(chicken[1] - city[1]) # 치킨거리\n distances.append([chicken, tmp])\n\n# 치킨가게별 치킨거리 추출\nchickenSum = 0\ntotalSum = []\nfor i in range(1, len(distances)):\n if distances[i-1][0] != distances[i][0]:\n totalSum.append([distances[i-1][0], chickenSum])\n chickenSum = 0\n continue\n chickenSum += distances[i-1][1]\n\nchickenSum += distances[i][1]\ntotalSum.append([distances[i-1][0], chickenSum])\n\nanswer = int(n*n)\nfor i in range(len(totalSum)):\n if answer > totalSum[i][1]:\n answer = totalSum[i][1]\n\nprint(answer)\n","sub_path":"Python-for-coding-test/4-Implementation/Q13-치킨배달.py","file_name":"Q13-치킨배달.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"111188164","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom yacs.config import CfgNode as CN\n\n_C = CN()\n_C.data = CN(\n dict(\n batch_size=32, # batch size\n valid_size=64, # the first N examples are reserved for validation\n sample_rate=22050, # Hz, sample rate\n n_fft=1024, # fft frame size\n win_length=1024, # window size\n hop_length=256, # hop size between ajacent frame\n fmax=8000, # Hz, max frequency when converting to mel\n fmin=0, # Hz, min frequency when converting to mel\n d_mels=80, # mel bands\n padding_idx=0, # text embedding's padding index\n ))\n\n_C.model = CN(\n dict(\n vocab_size=70,\n n_tones=10,\n reduction_factor=1, # reduction factor\n d_encoder=512, # embedding & encoder's internal size\n encoder_conv_layers=3, # number of conv layer in tacotron2 encoder\n encoder_kernel_size=5, # kernel size of conv layers in tacotron2 encoder\n d_prenet=256, # hidden size of decoder prenet\n # hidden size of the first rnn layer in tacotron2 decoder\n d_attention_rnn=1024,\n # hidden size of the second rnn layer in tacotron2 decoder\n d_decoder_rnn=1024,\n d_attention=128, # hidden size of decoder location linear layer\n attention_filters=32, # number of filter in decoder location conv layer\n attention_kernel_size=31, # kernel size of decoder location conv layer\n d_postnet=512, # hidden size of decoder postnet\n postnet_kernel_size=5, # kernel size of conv layers in postnet\n postnet_conv_layers=5, # number of conv layer in decoder postnet\n p_encoder_dropout=0.5, # droput probability in encoder\n p_prenet_dropout=0.5, # droput probability in decoder prenet\n\n # droput probability of first rnn layer in decoder\n p_attention_dropout=0.1,\n # droput probability of second rnn layer in decoder\n p_decoder_dropout=0.1,\n p_postnet_dropout=0.5, # droput probability in decoder postnet\n guided_attention_loss_sigma=0.2,\n d_global_condition=256,\n\n # whether to use a classifier to predict stop probability\n use_stop_token=False,\n # whether to use guided attention loss in training\n use_guided_attention_loss=True, ))\n\n_C.training = CN(\n dict(\n lr=1e-3, # learning rate\n weight_decay=1e-6, # the coeff of weight decay\n grad_clip_thresh=1.0, # the clip norm of grad clip.\n valid_interval=1000, # validation\n save_interval=1000, # checkpoint\n max_iteration=500000, # max iteration to train\n ))\n\n\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()\n","sub_path":"examples/tacotron2_aishell3/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"489546404","text":"from collections import OrderedDict\nimport copy\nimport numpy\nfrom numpy import mean\nimport pandas\nfrom pandas import DataFrame\nfrom pandas import Series\nimport scipy\nimport sklearn\nimport sklearn.cross_validation\nfrom sklearn.cross_validation import ShuffleSplit\nimport sklearn.feature_selection\nimport sklearn.linear_model\nimport sklearn.pipeline\n\nimport MaclearnUtilities\nfrom MaclearnUtilities import bhfdr\nfrom MaclearnUtilities import colcor\n\nimport RestrictedData\nxs = RestrictedData.xs\nxnorms = RestrictedData.xnorms\nannots = RestrictedData.annots\nys = RestrictedData.ys\nynums = RestrictedData.ynums\n\ncvSchedules = {k : ShuffleSplit(len(ys[k]),\n n_iter = 5,\n test_size = 0.2,\n random_state = 123)\n for k in xnorms}\n\n\ndef pandaize(f):\n def pandaized(estimator, X, y, **kwargs):\n return f(estimator, array(X), y, **kwargs)\n return pandaized\n\n@pandaize\ndef cross_val_score_pd(estimator, X, y, **kwargs):\n return sklearn.cross_validation.cross_val_score(\n estimator, X, y, **kwargs)\n\ndef fitModelWithNFeat(fitter, n, setname, cv=None):\n if cv is None:\n cv = cvSchedules[setname]\n if n > xnorms[setname].shape[1]:\n return None\n fsFitter = sklearn.pipeline.Pipeline([\n ('featsel', sklearn.feature_selection.SelectKBest(\n sklearn.feature_selection.f_regression, k=n)),\n ('classifier', fitter)\n ])\n return mean(cross_val_score_pd(estimator = fsFitter,\n X = xnorms[setname],\n y = ynums[setname],\n cv = cv))\n\ndef accPlot(accsByNFeats):\n ax = plt.subplot(111)\n for s in accsByNFeats:\n plotdata = pandas.concat([DataFrame({\"p\" : p,\n \"acc\" : accsByNFeats[s][p]},\n index = [str(p)])\n for p in accsByNFeats[s]],\n axis = 0)\n plotdata.plot(x = \"p\",\n y = \"acc\",\n ax = ax,\n logx = True,\n label = s)\n\n\nnFeatures = [2, 5, 10, 20, 50, 100, 200, 500,\n 1000, 2000, 5000, 10000]\n\n\n## -----------------------------------------------------------------\n## no (err...very little) regularization\n## -----------------------------------------------------------------\ndef fitLogisticWithNFeat(**kwargs):\n fitter = sklearn.linear_model.LogisticRegression(\n penalty=\"l2\", C=1e10)\n return fitModelWithNFeat(fitter=fitter, **kwargs)\n\nnFeatNoReg = [2, 5, 10, 20, 50, 100, 200]\naccsByNFeats = OrderedDict([(s,\n OrderedDict([(\n n,\n fitLogisticWithNFeat(n=n, setname=s))\n for n in nFeatNoReg]))\n for s in xnorms])\nfor s in accsByNFeats:\n for n in accsByNFeats[s]:\n if n > xnorms[s].shape[0]:\n accsByNFeats[s][n] = None\n\nplt.clf()\naccPlot(accsByNFeats)\n\n\n## -----------------------------------------------------------------\n## L2 regularization\n## -----------------------------------------------------------------\ndef fitL2LogisticWithNFeat(**kwargs):\n fitter = sklearn.linear_model.LogisticRegression(\n penalty=\"l2\", C=1)\n return fitModelWithNFeat(fitter=fitter, **kwargs)\n\naccsByNFeatsL2 = OrderedDict([(s,\n OrderedDict([(\n n,\n fitL2LogisticWithNFeat(n=n, setname=s))\n for n in nFeatures]))\n for s in xnorms])\n\nplt.clf()\naccPlot(accsByNFeatsL2)\n\n\n\n## -----------------------------------------------------------------\n## L1 regularization\n## -----------------------------------------------------------------\ndef fitL1LogisticWithNFeat(**kwargs):\n fitter = sklearn.linear_model.LogisticRegression(\n penalty=\"l1\", C=1)\n return fitModelWithNFeat(fitter=fitter, **kwargs)\n\naccsByNFeatsL1 = OrderedDict([(s,\n OrderedDict([(\n n,\n fitL1LogisticWithNFeat(n=n, setname=s))\n for n in nFeatures]))\n for s in xnorms])\n\nplt.clf()\naccPlot(accsByNFeatsL1)\n","sub_path":"LogisticReal.py","file_name":"LogisticReal.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"643376584","text":"import logging\nimport pymc3 as pm\nimport theano.tensor as tt\nfrom theano.compile.ops import as_op\nimport numpy as np\nfrom scipy import stats\n\nlogger = logging.getLogger('root')\n\n\n@as_op(itypes=[tt.dvector, tt.dvector, tt.dvector], otypes=[tt.dmatrix])\ndef outcome_probabilities(theta, mu, sigma):\n out = np.empty((theta.shape[0], mu.shape[0]), dtype=np.float)\n # out = np.empty((n_y_levels, hierarchical_model.n_groups), dtype=np.float)\n normal_dist = stats.norm(loc=mu, scale=sigma)\n out[0, :] = normal_dist.cdf(theta[0])\n for i in range(1, theta.shape[0] - 1):\n out[i, :] = np.max([[0, 0], normal_dist.cdf(theta[i]) - normal_dist.cdf(theta[i - 1])], axis=0)\n out[-1, :] = 1 - normal_dist.cdf(theta[-2])\n return out\n\n\ndef add_ordinal_model(hierarchical_model, min_level=0, max_level=2):\n \"\"\"\n Adding a model that estimates decisions on \"ordinal\" data. In particular, \"ordinal\" data\n is a categorical where the variables have *ordered categories*, however the distances\n between the categories is not known. Each of the categories are represented by \"levels\"\n (the range of [min_level, max_level].)\n Credits of the implementation of this model in pymc3 belongs to\n http://nbviewer.jupyter.org/github/JWarmenhoven/DBDA-python/blob/master/Notebooks/Chapter%2023.ipynb\n For a discussion on this model and implementation on R refer to Chapter 23 in the book \n 'Doing Bayesian Data Analysis: A Tutorial with R, JAGS, and Stan', Second Edition, by John Kruschke (2015).\n \"\"\"\n mean_y = np.mean([hierarchical_model.stats_y[i].mean for i in range(hierarchical_model.n_groups)])\n sd_y = np.mean([hierarchical_model.stats_y[i].variance for i in range(hierarchical_model.n_groups)]) ** (0.5)\n logger.debug(f\"sd_y={sd_y}\")\n logger.debug(f\"mean_y={mean_y}\")\n n_y_levels = max_level - min_level + 1\n\n thresh = np.arange(n_y_levels, dtype=np.float) + min_level + 0.5\n thresh_obs = np.ma.asarray(thresh)\n thresh_obs[1:-1] = np.ma.masked\n\n\n with pm.Model() as hierarchical_model.pymc_model:\n theta = pm.Normal('theta', mu=thresh, tau=np.repeat(.5 ** 2, len(thresh)),\n shape=len(thresh), observed=thresh_obs)\n mu = pm.Normal('mu', mu=n_y_levels / 2.0, tau=1.0 / (n_y_levels ** 2), shape=hierarchical_model.n_groups)\n sigma = pm.Uniform('sigma', n_y_levels / 1000.0, n_y_levels * 10.0, shape=hierarchical_model.n_groups)\n logger.debug((mu.shape[0], n_y_levels, theta.shape[0]))\n levelProbs = pm.Deterministic(\"levelProbs\", outcome_probabilities(theta, mu, sigma))\n\n observations = []\n hierarchical_model.mu_parameter = \"mu\"\n hierarchical_model.sigma_parameter = \"sigma\"\n\n def add_observations():\n with hierarchical_model.pymc_model:\n for i in range(hierarchical_model.n_groups):\n observations.append(pm.Categorical(f'y_{i}', levelProbs[:, i], observed=hierarchical_model.y[i]))\n\n hierarchical_model.add_observations_function = add_observations\n","sub_path":"HyBayes/models/ordinal_model.py","file_name":"ordinal_model.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"583192205","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/coils/core/vcard/parse_vcard.py\n# Compiled at: 2012-10-12 07:02:39\nimport datetime, re, vobject\nfrom utility import determine_ogo_tel_type_from_caldav_type\n\ndef take_integer_value(values, key, name, vcard, default=None):\n key = key.replace('-', '_')\n if hasattr(vcard.key):\n try:\n values[name] = int(getattr(vcard, key).value)\n except:\n values[name] = default\n\n\ndef take_string_value(values, key, name, vcard, default=None):\n key = key.replace('-', '_')\n if hasattr(vcard.key):\n try:\n values[name] = str(getattr(vcard, key).value)\n except:\n values[name] = default\n\n\ndef determine_adr_type(attributes, **params):\n entity_name = params.get('entity_name', 'Contact')\n if 'X-COILS-ADDRESS-TYPE' in attributes:\n return attributes['X-COILS-ADDRESS-TYPE'][0]\n else:\n if 'TYPE' in attributes:\n if entity_name == 'Contact':\n if 'home' in attributes:\n return 'private'\n else:\n if 'work' in attributes:\n return 'mailing'\n return 'location'\n elif entity_name == 'Enterprise':\n raise NotImplementedException()\n else:\n if entity_name == 'Team':\n return\n raise CoilException('Unknown vCard to entity correspondence')\n else:\n raise CoilsException('Cannot parse vCard; address with no type')\n return\n\n\ndef parse_vcard(card, ctx, log, **params):\n entity_name = params.get('entity_name', 'Contact')\n if entity_name not in ('Contact', 'Enterprise'):\n raise CoilsException('Parsing to this kind of entity not supported.')\n values = {}\n emails = []\n for line in card.lines():\n if line.name == 'UID':\n if line.value[:8] == 'coils://':\n if entity_name == 'Contact' and line.value[:16] == 'coils://Contact/' and line.value[16:].isdigit():\n values['objectId'] = int(line.value[16:])\n elif entity_name == 'Enterprise' and line.value[:19] == 'coils://Enterprise/' and line.value[19:].isdigit():\n values['objectId'] = int(line.value[19:])\n elif entity_name == 'Team' and line.value[:13] == 'coils://Team/' and line.value[13:].isdigit():\n values['objectId'] = int(line.value[13:])\n else:\n log.warn(('Corrupted COILS UID String: {0}').format(line.value))\n else:\n log.debug(('vCard UID not a COILS id: {0}').format(line.value))\n elif line.name == 'ADR':\n kind = determine_adr_type(line.params, **params)\n if kind is not None:\n if '_ADDRESSES' not in values:\n values['_ADDRESSES'] = []\n address = {'type': kind}\n address['name1'] = line.value.extended\n address['city'] = line.value.city\n address['postalCode'] = line.value.code\n address['country'] = line.value.country\n address['state'] = line.value.region\n address['street'] = line.value.street\n values['_ADDRESSES'].append(address)\n elif line.name == 'X-JABBER':\n values['imAddress'] = line.value\n elif line.name == 'TITLE':\n if '_COMPANYVALUES' not in values:\n values['_COMPANYVALUES'] = []\n values['_COMPANYVALUES'].append({'attribute': 'job_title', 'value': line.value})\n elif line.name == 'TEL':\n if '_PHONES' not in values:\n values['_PHONES'] = []\n telephone = {'type': None}\n if 'TYPE' in line.params:\n telephone['caldav_types'] = [ x.upper() for x in line.params['TYPE'] ]\n if 'X-COILS-TEL-TYPE' in line.params:\n telephone['type'] = line.params['X-COILS-TEL-TYPE'][0]\n elif 'caldav_types' in telephone:\n telephone['type'] = determine_ogo_tel_type_from_caldav_type(telephone)\n if not telephone['type']:\n raise CoilsException('Cannot parse vCard; telephone with no type')\n telephone['number'] = line.value\n values['_PHONES'].append(telephone)\n elif line.name == 'N':\n values['lastName'] = line.value.family\n values['firstName'] = line.value.given\n elif line.name == 'NICKNAME':\n values['descripion'] = line.value\n elif line.name == 'X-EVOLUTION-FILE-AS':\n values['fileAs'] = line.value\n elif line.name == 'X-EVOLUTION-MANAGER':\n values['managersname'] = line.value\n elif line.name == 'X-EVOLUTION-ASSISTANT':\n values['assistantName'] = line.value\n elif line.name == 'X-EVOLUTION-SPOUSE':\n pass\n elif line.name == 'X-EVOLUTION-ANNIVERSARY':\n pass\n elif line.name == 'ROLE':\n values['occupation'] = line.value\n elif line.name == 'BDAY':\n pass\n elif line.name == 'CALURL':\n pass\n elif line.name == 'FBURL':\n values['comment'] = line.value\n elif line.name == 'NOTE':\n pass\n elif line.name == 'CATEGORIES':\n pass\n elif line.name == 'CLASS':\n pass\n elif line.name == 'ORG':\n values['associatedcompany'] = line.value[0]\n if len(line.value) > 1:\n values['department'] = line.value[1]\n if len(line.value) > 2:\n values['office'] = line.value[2]\n elif line.name == 'EMAIL':\n emails.append({'value': line.value, 'slot': int(line.params.get('X-EVOLUTION-UI-SLOT', [0])[0]), \n 'types': line.params.get('TYPE', [])})\n elif line.name == 'FN':\n pass\n elif line.name[:22] == 'X-COILS-COMPANY-VALUE-':\n attribute = line.name[22:].lower().replace('-', '_')\n if len(attribute) > 0:\n if '_COMPANYVALUES' not in values:\n values['_COMPANYVALUES'] = []\n values['_COMPANYVALUES'].append({'attribute': attribute, 'value': line.value})\n else:\n log.debug(('unprocessed vcard attribute {0}').format(line.name))\n\n if len(emails) > 0:\n if '_COMPANYVALUES' not in values:\n values['_COMPANYVALUES'] = []\n count = 1\n for email in emails:\n values['_COMPANYVALUES'].append({'attribute': ('email{0}').format(count), 'value': email['value'], \n 'xattr': ('1:{0}:{1}:').format(email['slot'], (',').join(email['types']))})\n count += 1\n if count == 4:\n break\n\n if 'objectId' not in values:\n if len(emails) == 0:\n log.debug('No e-mail address provided in vCard, cannot attempt identification via e-mail search')\n else:\n for email in emails:\n x = ctx.run_command('contact::search', criteria=[\n {'key': 'email1', 'value': email['value']}])\n if len(x) == 0:\n log.debug('Unable to identify contact via e-mail search: no candidates')\n elif len(x) == 1:\n object_id = x[0].object_id\n log.debug(('Identified vCard via e-mail search result; objectId = {0}').format(object_id))\n values['objectId'] = object_id\n break\n else:\n log.debug('Unable to identify contact via e-mail search: too many candidates')\n else:\n log.debug('Identification of vCard via e-mail search failed.')\n return values","sub_path":"pycfiles/OpenGroupware-0.1.48-py2.6/parse_vcard.py","file_name":"parse_vcard.py","file_ext":"py","file_size_in_byte":7935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"502936251","text":"# MIT License\n#\n# Copyright (c) 2019 Jason Brackman\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\ndef generate_codes(num_start: int = 20151125):\n while True:\n yield num_start\n num_next = num_start * 252533\n num_start = num_next % 33554393\n\n\ndef infinite_grid(max_row: int, max_col: int):\n codes = generate_codes()\n items = dict()\n row = 1\n col = 1\n new_row = row\n while True:\n\n items[(new_row, col)] = next(codes)\n while col != row:\n new_row -= 1\n col += 1\n items[(new_row, col)] = next(codes)\n if new_row == max_row and col == max_col:\n return items\n\n row += 1\n new_row = row\n col = 1\n\n\ndef main():\n \"\"\"\n To continue, please consult the code grid in the manual.\n Enter the code at row 3010, column 3019.\n \"\"\"\n results = infinite_grid(3010, 3019)\n assert results[(3010, 3019)] == 8997277\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/day_25.py","file_name":"day_25.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"273277101","text":"import sys\nsys.stdin = open(\"input.txt\")\n\nT=int(input())\nfor tc in range(1, T+1):\n words = list(map(str, input()))\n N = int(input())\n temp = ['-']\n res=''\n num = list(map(int, input().split()))\n \n for n in num:\n if n!=len(words):\n words[n] = '-' + words[n]\n else:\n words[-1] = words[-1] + '-'\n\n res = ''.join(words)\n print('#{} {}'.format(tc, res))","sub_path":"SWexpert/D3/늘어지는소리.py","file_name":"늘어지는소리.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"38184675","text":"# coding: utf-8\r\nimport sys, os, glob, re, subprocess\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\nimport matplotlib.colors as colors\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport csv\r\n\r\n\r\ndef predict2d(X, Y, Z, lx, ly, lz):\r\n fig = plt.figure(figsize=(8, 7))\r\n plt.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.95, wspace=0.2, hspace=0.2)\r\n # ax = Axes3D(fig)\r\n plt.xlabel(lx, size=14)\r\n plt.ylabel(ly, size=14)\r\n plt.tick_params(labelsize=14)\r\n\r\n im = plt.pcolor(X, Y, Z, cmap='bwr', linewidth=0)\r\n cbar = fig.colorbar(im)\r\n cbar.set_label(lz, size=14)\r\n # plt.contour(X, Y, Z, zdir='z')\r\n plt.show()\r\n\r\ndef predict3d(X, Y, Z, lx, ly, lz):\r\n fig = plt.figure()\r\n fig.subplots_adjust(left=0.15, bottom=0.1, right=0.7, top=0.95, wspace=0.1, hspace=0.2)\r\n ax = Axes3D(fig)\r\n ax.set_xlabel(lx, size=14)\r\n ax.set_ylabel(ly, size=14)\r\n ax.set_zlabel(lz, labelpad=30, size=14)\r\n ax.tick_params(labelsize=14)\r\n ax.tick_params(axis='z', pad=20)\r\n\r\n # plt.gca().zaxis.set_tick_params(which='both', direction='in',bottom=True, top=True, left=True, right=True)\r\n # ax.set_aspect(0.2)\r\n\r\n ax.plot_surface(X, Y, Z, cmap='bwr', linewidth=0)\r\n ax.contour(X, Y, Z, zdir='z', offset=np.min(Z))\r\n plt.show()\r\n # ax.imwrite(\"out.png\")\r\n\r\ndef predict3d2(X, Y, Z, P, Q, R, lx, ly, lz):\r\n fig = plt.figure()\r\n fig.subplots_adjust(left=0.15, bottom=0.1, right=0.7, top=0.95, wspace=0.1, hspace=0.2)\r\n ax = Axes3D(fig)\r\n ax.set_xlabel(lx, size=14)\r\n ax.set_ylabel(ly, size=14)\r\n ax.set_zlabel(lz, labelpad=30, size=14)\r\n ax.tick_params(labelsize=14)\r\n ax.tick_params(axis='z', pad=20)\r\n\r\n # plt.gca().zaxis.set_tick_params(which='both', direction='in',bottom=True, top=True, left=True, right=True)\r\n # ax.set_aspect(0.2)\r\n\r\n ax.plot_surface(X, Y, Z, cmap='bwr', linewidth=0)\r\n ax.plot(P, Q, R, \"o\")\r\n ax.contour(X, Y, Z, zdir='z', offset=np.min(Z))\r\n plt.show()\r\n # ax.imwrite(\"out.png\")\r\n\r\ndef main(argv):\r\n if len(argv) == 0:\r\n print(\"ファイル番号を指定してください: { 1, 2, 3 }\")\r\n no = sys.stdin.readline().rstrip(\"\\n\")\r\n print(\"表示形式を指定してください: 2d=>2, 3d=>3\")\r\n dim = sys.stdin.readline()[0]\r\n elif len(argv) == 1:\r\n no = argv[1]\r\n dim = \"2\"\r\n else:\r\n no = argv[0]\r\n dim = argv[1][0]\r\n\r\n file = \"test\" + no + \".csv\"\r\n if not os.path.exists(file):\r\n print(\"入力ファイルがありません: \" + file)\r\n return\r\n\r\n labels = [\"$\\\\alpha, deg$\", \"$elv, deg$\", \"$Ma$\", \"$x_1$\", \"$x_2$\"]\r\n x_label = labels[3]\r\n y_label = labels[4]\r\n z_label = [\"$c_x$\", \"$c_m$\", \"$c_z$\", \"$z$\"][int(no) - 1]\r\n\r\n with open(file, \"r\") as f:\r\n X, Y, Z = (np.array(x, dtype=np.float).reshape((101, -1)) for x in list(zip(*csv.reader(f)))[0:3])\r\n {\"2\": predict2d, \"3\": predict3d}[dim](X, Y, Z, x_label, y_label, z_label)\r\n # with open(\"table_\"+[\"xl\", \"ym\", \"zn\"][int(no) - 1]+\".csv\", \"r\") as f:\r\n # P, Q, R = (np.array(x, dtype=np.float) for x in list(zip(*csv.reader(f)))[0:3])\r\n # predict3d2(X, Y, Z, P, Q, R, x_label, y_label, z_label)\r\n\r\nmain(sys.argv[1:])\r\n","sub_path":"plot_krig.py","file_name":"plot_krig.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"340070767","text":"import numpy as np\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\nfilename = \"bitalino_Wilson_0.5m.txt\"\n\ndef start(filename,plot):\n f = open(filename, \"r\")\n \n #space\n f.readline()\n time_bip_end = f.readline()\n f.readline()\n \n lines = f.readlines()\n f.close()\n \n fs = 1000\n time_start = lines[0::4]\n time_end = lines[4::4]\n amplitude = lines[1::4]\n \n time_start_list = []\n time_end_list = []\n amplitude_list = []\n \n for row in time_start:\n time_start_list.append(np.fromstring(row, dtype=float, sep=\" \"))\n \n for row in time_end:\n time_end_list.append(np.fromstring(row, dtype=float, sep=\" \"))\n \n for row in amplitude:\n amplitude_list.append(np.fromstring(row, dtype=float, sep=\" \")) \n \n amplitude_list = np.asarray(amplitude_list)\n \n \n time_start_list = np.asarray(time_start_list)\n #time_start_list = time_start_list - time_start_list[0]\n \n time_end_list = np.asarray(time_end_list)\n #time_end_list = time_end_list - time_end_list[0]\n \n ecg_signal = []\n \n for line in amplitude_list:\n ecg_signal.extend(line[5::6])\n \n if plot == 1:\n plt.plot(ecg_signal)\n plt.title(\"ECG test\")\n plt.show()\n \n t = np.linspace(0, int(len(ecg_signal)/fs), num = len(ecg_signal))\n \n return ecg_signal, t, fs\n","sub_path":"Processamento Prova de Conceito/bitalino_process.py","file_name":"bitalino_process.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"488869049","text":"__author__ = 'pretymoon'\nimport itertools\n\n#________________________________________#\ndef table_on_fly2(cur_row, prev_pat): # n-1 , best_path\n while cur_row > 0:\n grid.insert(0, list(legal_row_patterns_list[prev_pat]))\n prev_pat = sum_table[cur_row - 1][prev_pat][1]\n cur_row -= 1\n\n#________________________________________#\ndef table_on_fly(cur_row, prev_pat): # , prev_pat\n while cur_row > 0:\n grid.insert(1, list(legal_row_patterns_list[prev_pat]))\n prev_pat = sum_table[cur_row - 1][prev_pat][1]\n cur_row -= 1\n\n#________________________________________#\ndef is_compatible(my_table, my_row):\n legal_col_patterns = legal_row_patterns(len(my_table)+1)\n left_diag = []\n right_diag = []\n curr_col = []\n for k in range(len(my_row)): # col - my row\n left_diag = [my_row[k]]\n right_diag = [my_row[k]]\n curr_col = [my_row[k]]\n for i in range(len(my_table[0])): # col - table's cols\n for j in range(len(my_table)): # row - table's rows\n if tuple(my_table[j]) not in legal_patterns:\n return False\n if i == k: # same col\n curr_col.append(my_table[j][i])\n if i+j == k+len(my_table): # right diag\n right_diag.append(my_table[j][i])\n if i-j == k-len(my_table):\n left_diag.append(my_table[j][i])\n\n if i == k and tuple(curr_col) not in legal_col_patterns:\n return False\n if right_diag.count(\"o\") + right_diag.count(\"+\") > 1 or left_diag.count(\"o\") + left_diag.count(\"+\") > 1:\n return False\n return True\n\n#________________________________________#\ndef is_lpads(row_id, pattern): # check for legal patterns applicable, considering designer selection for this row\n\n tmp = [choice for choice in director_choices if choice[0] == row_id]\n # selected = [\".\" for i in range(len(pattern))]\n # for choice in tmp:\n # selected[choice[1]] = choice[2]\n # print(\"selected: \", selected)\n # print(\" pattern: \", pattern)\n\n for choice in tmp:\n if pattern[choice[1]] == \".\":\n # print(\" False1\")\n return False\n elif pattern[choice[1]] != choice[2] and pattern[choice[1]] != \"o\":\n # print(\" False2\")\n return False\n # print(\" True\")\n return True\n\n#________________________________________#\ndef legal_row_patterns(girth):\n symbols = {\"o\": 2, \"+\": 1, \"x\": 1}\n tmp = [\".\" for j in range(girth)]\n legal_patterns = {tuple(tmp): 0}\n\n for i in range(girth): # single symbol in a row\n for symbol in symbols:\n tmp = [\".\" for j in range(girth)]\n tmp[i] = symbol\n # print(tuple(tmp))\n legal_patterns[tuple(tmp)] = symbols[symbol]\n\n for cnt in range(2, girth+1): # only \"+\"'s in a row\n myString = \"+\"*cnt\n myString += \".\"*(girth-cnt)\n tmp = [tuple(c) for c in itertools.permutations(myString, girth)]\n for tpl in tmp:\n legal_patterns[tpl] = cnt\n\n for cnt in range(1, girth): # one or more \"+\"'s plus one more symbol\n myString = \"+\"*cnt\n myString += \".\"*(girth-cnt-1)\n for s in {\"o\", \"x\"}:\n this_string = myString\n this_string += s\n tmp = [tuple(c) for c in itertools.permutations(this_string, girth)]\n for tpl in tmp:\n # print(tpl, cnt, symbols[s])\n legal_patterns[tpl] = cnt + symbols[s]\n return legal_patterns\n\n###############################################\n# ff = open(\"\\\\Mahnaz\\\\PycharmProjects\\\\codeJam_2017_qualification\\\\problem_3\\\\C-small.in\", \"r\")\n# numOfCases = int(ff.readline())\n# for test_case in range(1, numOfCases+1):\n# print(\"Case #{}: \".format(test_case), end='')\n# strLine = ff.readline()\n# a = strLine.split(\" \")\n# n, m = [int(x) for x in a]\n# director_choices = []\n# select_rows = set([])\n# for choice_row in range(m):\n# strLine = ff.readline()\n# a = strLine.split(\" \")\n# symbol = a[0]\n# row = int(a[1]) - 1\n# col = int(a[2]) - 1\n# director_choices.append([row, col, symbol])\n# select_rows.add(row)\n###############################################\nnumOfCases = int(input())\nfor test_case in range(1, numOfCases+1):\n print(\"Case #{}: \".format(test_case), end='')\n strLine = input()\n a = strLine.split(\" \")\n n, m = [int(x) for x in a]\n director_choices = []\n select_rows = set([])\n for choice_row in range(m):\n strLine = input()\n a = strLine.split(\" \")\n symbol = a[0]\n row = int(a[1]) - 1\n col = int(a[2]) - 1\n director_choices[(row, col)] = symbol\n select_rows.add(row)\n###############################################\n\n legal_patterns = legal_row_patterns(n)\n legal_row_patterns_list = list(legal_patterns)\n\n # print(\"legal_patterns \\n\", legal_patterns)\n # print(\"legal_row_patterns_list \\n\", legal_row_patterns_list)\n\n sum_table = [[[0, -1] for i in range(len(legal_row_patterns_list))] for j in range(n)]\n grid = []\n\n # initialize first row with value of each pattern\n for i in range(len(legal_row_patterns_list)):\n if 0 in select_rows and not is_lpads(0, legal_row_patterns_list[i]):\n sum_table[0][i][0] = - float(\"inf\")\n else:\n sum_table[0][i][0] = legal_patterns[legal_row_patterns_list[i]]\n\n # print(\"sum_table0 \\n\", sum_table )\n\n # fill the sum table\n for u in range(1, n): # row in stage\n for i in range(len(legal_row_patterns_list)): # curr_row's column - one col per each legal patterns\n for p in range(len(legal_row_patterns_list)): # prev_row's column col - one col per each legal patterns\n # is the curr pattern compatible (with designer selection of models) for this row\n\n if (u in select_rows and not is_lpads(u, legal_row_patterns_list[i])) or \\\n (u == 1 and 0 in select_rows and not is_lpads(0, legal_row_patterns_list[p])):\n continue\n else:\n # is the pattern compatible col-wise and diag-wise\n grid = []\n table_on_fly2(u, p) # simulate the grid so far\n\n # print(\"grid---\")\n # for i in range(len(grid)):\n # print(grid[i])\n if is_compatible(grid, list(legal_row_patterns_list[i])):\n tmp_sum = sum_table[u-1][p][0] + legal_patterns[legal_row_patterns_list[i]]\n if sum_table[u][i][0] < tmp_sum:\n sum_table[u][i][0] = tmp_sum\n sum_table[u][i][1] = p\n\n #\n # print(\"sum_table1\")\n # for i in range(len(sum_table)):\n # print(sum_table[i])\n #\n\n # find the total max sum possible\n # keep track of the col results the max sum possible\n max_sum = 0\n best_pat = -1\n col = -1\n for i in range(len(sum_table[0])):\n if sum_table[-1][i][0] > max_sum:\n max_sum = sum_table[-1][i][0]\n best_pat = sum_table[-1][i][1]\n col = i\n\n #\n # print(\"col: \", col)\n #\n\n grid = []\n\n table_on_fly2(n-1, best_pat)\n #\n # print(grid)\n #\n grid.append(list(legal_row_patterns_list[col]))\n\n #\n # for i in range(len(grid)):\n # print(grid[i])\n #\n\n count = 0\n for i in range(len(grid)): # row\n for j in range(len(grid)): # col\n if grid[i][j] != \".\":\n count += 1\n if i in select_rows:\n tmp = [choice for choice in director_choices if choice[0] == i]\n for choice in tmp:\n if choice[2] == grid[i][choice[1]]:\n count -= 1\n\n print(max_sum, count)\n\n for i in range(len(grid)): # row\n for j in range(len(grid)): # col\n if grid[i][j] != \".\" and [i, j, grid[i][j]] not in director_choices:\n print(grid[i][j], i+1, j+1)\n\n\n\n\n\n","sub_path":"codeJam_2017_qualification/problem_3/p3_code_small.py","file_name":"p3_code_small.py","file_ext":"py","file_size_in_byte":8089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"329832227","text":"\"\"\"Compare speed of different models with batch size 12\"\"\"\nimport torch\nimport torchvision.models as models\nimport platform,psutil\nimport torch.nn as nn\nimport time,os\nfrom os import path\n\nimport json\n\nimport argparse\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom tqdm import tqdm\n\nfrom torch.cuda.amp import autocast\n\ntorch.backends.cudnn.benchmark = True\n# https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936\n# This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. \n# If you check it using the profile tool, the cnn method such as winograd, fft, etc. is used for the first iteration and the best operation is selected for the device.\n\n\nMODEL_LIST = {\n\n # models.mnasnet:models.mnasnet.__all__[1:],\n models.resnet: models.resnet.__all__[1:],\n # models.densenet: models.densenet.__all__[1:],\n # models.squeezenet: models.squeezenet.__all__[1:],\n # models.vgg: models.vgg.__all__[1:],\n # models.mobilenet:models.mobilenet.__all__[1:],\n # models.shufflenetv2:models.shufflenetv2.__all__[1:]\n}\n\nprecisions=[\"auto\", \"float\", \"half\"]\n\n# For post-voltaic architectures, there is a possibility to use tensor-core at half precision.\n# Due to the gradient overflow problem, apex is recommended for practical use.\ndevice_name=str(torch.cuda.get_device_name(0))\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch Benchmarking')\nparser.add_argument('--WARM_UP','-w', type=int,default=4, required=False, help=\"Num of warm up\")\nparser.add_argument('--NUM_TEST','-n', type=int,default=50,required=False, help=\"Num of Test\")\nparser.add_argument('--BATCH_SIZE','-b', type=int, default=16, required=False, help='Num of batch size')\nparser.add_argument('--NUM_CLASSES','-c', type=int, default=1000, required=False, help='Num of class')\nparser.add_argument('--NUM_GPU','-g', type=int, default=1, required=False, help='Num of gpus')\nparser.add_argument('--folder','-f', type=str, default='result', required=False, help='folder to save results')\nargs = parser.parse_args()\n\n\n\n\nclass RandomDataset(Dataset):\n\n def __init__(self, length):\n self.len = length\n self.data = torch.randn(length, 3, 224, 224, dtype=torch.float16)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return self.len\n\nclass Autocast(nn.Module):\n def __init__(self, model):\n super().__init__()\n self.m = model\n\n @autocast()\n def forward(self, input):\n return self.m.forward(input)\n\n\n\n\n \n\ndef prepare_model(model, precision):\n\n types = dict(auto = torch.float16, float = torch.float32, half = torch.float16)\n input_type = types[precision]\n if precision == \"auto\":\n model = Autocast(model)\n else:\n model = model.to(input_type)\n\n model = nn.DataParallel(model, device_ids=range(args.NUM_GPU))\n model = model.to('cuda')\n\n return model, input_type\n\ndef train(model, input_type, loader):\n target = torch.LongTensor(loader.batch_size).random_(args.NUM_CLASSES).cuda()\n criterion = nn.CrossEntropyLoss()\n\n model.train()\n\n for step, img in enumerate(loader):\n img = img.to(input_type)\n\n model.zero_grad()\n prediction = model(img)\n loss = criterion(prediction, target)\n loss.backward() \n\ndef test(model, input_type, loader):\n model.eval()\n for step, img in enumerate(loader):\n model(img.to(input_type))\n\ndef gmean(input_x, dim=0):\n log_x = torch.log(input_x)\n return torch.exp(torch.mean(log_x, dim=dim))\n\nbatch_size = args.BATCH_SIZE * args.NUM_GPU \n\nrand_loader = DataLoader(dataset=RandomDataset(batch_size * args.NUM_TEST), \n batch_size=batch_size, shuffle=False,num_workers=0)\n\nwarmup_loader = DataLoader(dataset=RandomDataset(batch_size * args.WARM_UP), \n batch_size=batch_size, shuffle=False,num_workers=0)\n\n\ndef benchmark_models(name, task, precision=\"auto\", output=\"results\"):\n\n benchmark = {}\n for model_type in MODEL_LIST.keys():\n for model_name in MODEL_LIST[model_type]:\n model = getattr(model_type, model_name)(pretrained=False)\n model, input_type = prepare_model(model, precision)\n\n task(model, input_type, warmup_loader)\n\n torch.cuda.synchronize()\n start = time.time()\n\n task(model, input_type, rand_loader)\n\n torch.cuda.synchronize()\n end = time.time()\n\n rate = len(rand_loader) * batch_size / (end - start)\n print(model_name, precision, rate, 'images/sec')\n del model\n\n benchmark[model_name] = rate\n\n total = torch.tensor(list(benchmark.values()), dtype=torch.float32)\n mean = gmean(total)\n\n print(\"------------------------------------------------\")\n print(\"geometric mean ({}, precision={}): {:.4f}\".format(name, precision, mean))\n\n benchmark['gmean'] = mean.item()\n\n filename = path.join(output, \"{}_{}.json\".format(name, precision))\n with open(filename, 'w') as outfile:\n json.dump(benchmark, outfile)\n\n\nclass no_op():\n def __enter__(self):\n return None\n def __exit__(self, exc_type, exc_value, traceback):\n return False\n\n\n\nif __name__ == '__main__':\n folder_name= \"{}_batch{}_gpus{}\".format(args.folder, args.BATCH_SIZE, args.NUM_GPU)\n\n device_name=\"\".join((device_name, '_',str(args.NUM_GPU),'_gpus_'))\n system_configs=str(platform.uname())\n system_configs='\\n'.join((system_configs,str(psutil.cpu_freq()),'cpu_count: '+str(psutil.cpu_count()),'memory_available: '+str(psutil.virtual_memory().available)))\n gpu_configs=[torch.cuda.device_count(),torch.version.cuda,torch.backends.cudnn.version(),torch.cuda.get_device_name(0)]\n gpu_configs=list(map(str,gpu_configs))\n temp=['Number of GPUs on current device : ','CUDA Version : ','Cudnn Version : ','Device Name : ']\n\n os.makedirs(folder_name, exist_ok=True)\n now = time.localtime()\n start_time=str(\"%04d/%02d/%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec))\n \n print('benchmark start : ',start_time)\n\n for idx,value in enumerate(zip(temp,gpu_configs)):\n gpu_configs[idx]=''.join(value)\n print(gpu_configs[idx])\n\n print(system_configs)\n\n with open(os.path.join(folder_name,\"system_info.txt\"), \"w\") as f:\n f.writelines('benchmark start : '+start_time+'\\n')\n f.writelines('system_configs\\n\\n')\n f.writelines(system_configs)\n f.writelines('\\ngpu_configs\\n\\n')\n f.writelines(s + '\\n' for s in gpu_configs )\n\n for precision in precisions:\n benchmark_models(\"train\", train, precision, folder_name)\n benchmark_models(\"test\", test, precision, folder_name)\n\n\n now = time.localtime()\n end_time=str(\"%04d/%02d/%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec))\n print('benchmark end : ',end_time)\n with open(os.path.join(folder_name,\"system_info.txt\"), \"a\") as f:\n f.writelines('benchmark end : '+end_time+'\\n')\n\n\n","sub_path":"benchmark_models.py","file_name":"benchmark_models.py","file_ext":"py","file_size_in_byte":7094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"35426512","text":"\n\nfrom xai.brain.wordbase.nouns._scholar import _SCHOLAR\n\n#calss header\nclass _SCHOLARS(_SCHOLAR, ):\n\tdef __init__(self,): \n\t\t_SCHOLAR.__init__(self)\n\t\tself.name = \"SCHOLARS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"scholar\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_scholars.py","file_name":"_scholars.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"483746958","text":"\"\"\"CPE202\nJohn Wright\nLab 5\n\"\"\"\nimport unittest\nimport random\nfrom comparison_sort import insertion_sort, merge_sort, merge_sort_helper\nimport time\n\nclass MyTest(unittest.TestCase):\n\n def test_insertion_sort(self):\n random.seed(1)\n list = random.sample(range(1000),1000)\n listsorted = []\n list2sorted = []\n for i in range(1000):\n listsorted.append(i)\n for i in range(100000):\n list2sorted.append(i)\n self.assertEqual(insertion_sort([5,4,3,2,1]), (15))\n self.assertEqual(insertion_sort(list), (251393))\n self.assertEqual(insertion_sort(listsorted), 1000)\n self.assertEqual(insertion_sort(list2sorted), 100000)\n\n def test_merge_sort(self):\n random.seed(1)\n list = random.sample(range(1000), 1000)\n list2 = random.sample(range(8000), 8000)\n listsorted = []\n list2sorted = []\n for i in range(1000):\n listsorted.append(i)\n for i in range(100000):\n list2sorted.append(i)\n testsort = merge_sort_helper(list)\n self.assertEqual(testsort[0], (listsorted))\n self.assertEqual(merge_sort(list), 9976)\n self.assertEqual(merge_sort(list2), 103808)\n self.assertEqual(merge_sort(list2sorted), 1668928)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Lab 6/comparison_sort_test.py","file_name":"comparison_sort_test.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"510640860","text":"import matplotlib.pyplot as plt\nfrom math import sqrt, fabs, ceil\n\n\ngate_w_x = 5\ngate_w_y = 15\nmodule_coor_x = [12,1,8,22,3,17,22,4,32,6,11,30,6,24,12]\nmodule_coor_y = [4,10,1,22,29,3,16,1,7,12,32,7,3,6,23]\nrobot_path = [[15,16,19,22,25,27,32],[12,24,32,32,24,17,12]]\nmodule_dist = {}\nready_module = [[gate_w_x, gate_w_y]]\n\ndistance = 3\n\n\n# make dots on robot's path and add their coors to module_coor\nrobot_coor = []\nfor coor in zip(robot_path[0], robot_path[1]):\n if robot_coor:\n dist = sqrt((fabs(robot_coor[0] - coor[0])) ** 2 + (fabs(robot_coor[1] - coor[1])) ** 2)\n module_num = ceil(dist / distance)\n angle1 = (robot_coor[1] - coor[1]) / dist\n angle0 = (robot_coor[0] - coor[0]) / dist\n\n for num in range(1, module_num + 1):\n new_mod_coor_x = robot_coor[0] - angle0 * (dist / module_num) * num\n new_mod_coor_y = robot_coor[1] - angle1 * (dist / module_num) * num\n #ready_module.append([new_mod_coor_x, new_mod_coor_y])\n module_coor_x.append(new_mod_coor_x)\n module_coor_y.append(new_mod_coor_y)\n\n robot_coor = coor\n else:\n robot_coor = coor\n\n\n# make list of distance between modules to gate_way\nfor coor in range(len(module_coor_x)):\n dist = sqrt((fabs(gate_w_x-module_coor_x[coor]))**2 + (fabs(gate_w_y-module_coor_y[coor]))**2)\n module_dist[dist] = [module_coor_x[coor], module_coor_y[coor]]\n\n# module_dist_sort = dict(sorted(module_dist.items()))\n# print(module_dist_sort)\n\nfor module in dict(sorted(module_dist.items())).values():\n dist_to_mod = []\n\n # find the nearest module in ready_module for connection to it\n for ready_m in ready_module:\n dist = sqrt((fabs(ready_m[0] - module[0])) ** 2 + (fabs(ready_m[1] - module[1])) ** 2)\n angle1 = (ready_m[1]-module[1])/dist\n angle0 = (ready_m[0] - module[0]) / dist\n\n if dist_to_mod == []:\n dist_to_mod.extend([dist, angle0, angle1, module, ready_m])\n elif dist_to_mod[0] > dist:\n dist_to_mod = [dist, angle0, angle1, module, ready_m]\n else:\n continue\n\n # find the coors of additional points between module and nearest ready module\n module_num = ceil(dist_to_mod[0]/distance)\n for num in range(1, module_num+1):\n # new_mod_coor = ready_m[x or y] - angle * (dist/module_num)*num\n new_mod_coor_x = dist_to_mod[4][0] - dist_to_mod[1] * (dist_to_mod[0]/module_num)*num\n new_mod_coor_y = dist_to_mod[4][1] - dist_to_mod[2] * (dist_to_mod[0]/module_num)*num\n ready_module.append([new_mod_coor_x, new_mod_coor_y])\n\n # from gateway to every point\n # new_mod_coor_x = num * (dist_to_mod[4][0] - dist_to_mod[1] * dist_to_mod[0]) / module_num\n # new_mod_coor_y = num * (dist_to_mod[4][1] - dist_to_mod[2] * dist_to_mod[0]) / module_num\n # ready_module.append([new_mod_coor_x, new_mod_coor_y])\n\nprint('ready_module:', ready_module)\n\n\nfor coor in ready_module:\n plt.plot(coor[0], coor[1], 'yo')\nplt.plot(gate_w_x, gate_w_y, 'ro')\nplt.plot(module_coor_x, module_coor_y, 'bo')\nplt.plot(*robot_path)\nplt.show()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"303576577","text":"from PIL import Image\nfrom io import BytesIO\nimport pickle\nimport json\nimport numpy as np\nfrom pykafka import KafkaClient\nfrom pykafka.common import OffsetType\n\n\ndef gen_client(hosts=\"127.0.0.1:9092\", topic_name='people-detection'):\n client = KafkaClient(hosts=hosts)\n topic = client.topics[topic_name]\n return client, topic\n\n\ndef decode(msg):\n msg = pickle.loads(msg)\n img = Image.open(BytesIO(msg['img_bytes']))\n msg['img_arr'] = np.array(img)\n del(msg['img_bytes'])\n return msg\n\n\ndef model(msg):\n \"\"\"Call the model from here maybe\"\"\"\n print('Request Time:', msg['req_time'],\n 'Image Dimensions:', msg['img_arr'].shape)\n\n\nif __name__ == \"__main__\":\n client, topic = gen_client(\n hosts=\"127.0.0.1:9092\", topic_name='people-detection')\n consumer = topic.get_simple_consumer()\n for msg in consumer:\n if msg is not None:\n msg = decode(msg.value)\n model(msg)\n\n\n# To dos:\n# Dockerize the application\n# Improve the consumer - currently it is a simple consumer\n","sub_path":"consumer/consumer_shakeshack.py","file_name":"consumer_shakeshack.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"13981581","text":"#!/usr/bin/env python3.7\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport os\n\nos.remove('OrkShop.csv')\n\nwith open('OrkShop.csv', 'w') as file:\n writer = csv.writer(file)\n writer.writerow(('side', 'race', 'title', 'price', 'status', 'url'))\n\nstart_url = 'https://goodork.ru/categories/adeptus-astartes-blood-angels?page=1'\nbase = 'https://goodork.ru/categories/adeptus-astartes-blood-angels?page=?page='\nname = 'Blood Angels'\nside1 = 'Imperium'\n\ndef get_html(url):\n r = requests.get(url)\n return r.text\n\ndef get_total_pages(html):\n soup = BeautifulSoup(html, 'html5lib')\n pages = soup.find('div', class_='pagenumberer')\n if pages is None:\n total_pages = '1'\n else:\n pages = \\\n soup.find('div', class_='pagenumberer').find_all('a', class_='pagenumberer-item pagenumberer-item-link')[-1].get('href')\n total_pages = pages.split('=')[1]\n\n return int(total_pages)\n\ndef write_csv(data):\n with open('OrkShop.csv', 'a') as f:\n writer = csv.writer(f)\n writer.writerow((data['side'],\n data['race'],\n data['title'],\n data['price'],\n data['status'],\n data['url']))\n\ndef get_page_data(html):\n soup = BeautifulSoup(html, 'html5lib')\n\n ads = soup.find('div', class_='row products-view products-view-tile productview-wow').find_all('div', class_='products-view-block')\n\n for ad in ads:\n\n try:\n title = ad.find('div', class_='products-view-name products-view-name-default').find('a').get('title')\n except:\n title = ''\n try:\n price = ad.find('div', class_='price').find('div', class_='price-number').text.strip()\n except:\n price = ''\n try:\n status = ad.find('div', class_='products-view-buttons').find('a', class_='btn btn-big btn-buy products-view-buy').text.strip()\n if status == 'В корзину':\n status = 'В наличии'\n else:\n status = 'Нет в наличии'\n except:\n status = 'Нет в наличии'\n try:\n url = ad.find('figure', class_='products-view-pictures').find('a', class_='products-view-picture-link products-view-shadow-hover').get('href')\n except:\n url = ''\n race = name\n side = side1\n data = {'side': side,\n 'race': race,\n 'title': title,\n 'price': price,\n 'status': status,\n 'url': url}\n write_csv(data)\n\ndef main():\n url = start_url\n base_url = base\n\n total_pages = get_total_pages(get_html(url))\n\n for i in range(1, total_pages+1):\n url_gen = base_url + str(i)\n html = get_html(url_gen)\n get_page_data(html)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Blood Angels.py","file_name":"Blood Angels.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"141417735","text":"#!/usr/bin/env python\n\n\"\"\"Tests for ImagesDataset.\"\"\"\n\nimport random\nimport tempfile\nimport unittest\nfrom pathlib import Path\nimport numpy as np\nimport nibabel as nib\nimport torchio\nfrom torchio import INTENSITY, LABEL\n\n\nclass TestRandomElasticDeformation(unittest.TestCase):\n \"\"\"Tests for `RandomElasticDeformation`.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test fixtures, if any.\"\"\"\n self.dir = Path(tempfile.gettempdir()) / '.torchio_tests'\n self.dir.mkdir(exist_ok=True)\n random.seed(42)\n np.random.seed(42)\n\n subject_a = {\n 't1': dict(path=self.get_image_path('t1_a'), type=INTENSITY),\n }\n subject_b = {\n 't1': dict(path=self.get_image_path('t1_b'), type=INTENSITY),\n 'label': dict(path=self.get_image_path('label_b'), type=LABEL),\n }\n subject_c = {\n 'label': dict(path=self.get_image_path('label_c'), type=LABEL),\n }\n subject_d = {\n 't1': dict(path=self.get_image_path('t1_d'), type=INTENSITY),\n 't2': dict(path=self.get_image_path('t2_d'), type=INTENSITY),\n 'label': dict(path=self.get_image_path('label_d'), type=LABEL),\n }\n self.paths_list = [\n subject_a,\n subject_b,\n subject_c,\n subject_d,\n ]\n\n def tearDown(self):\n \"\"\"Tear down test fixtures, if any.\"\"\"\n import shutil\n shutil.rmtree(self.dir)\n\n def get_image_path(self, stem):\n data = np.random.rand(10, 20, 30)\n affine = np.eye(4)\n suffix = random.choice(('.nii.gz', '.nii'))\n path = self.dir / f'{stem}{suffix}'\n nib.Nifti1Image(data, affine).to_filename(str(path))\n path = str(path) if np.random.rand() > 0.5 else path\n return path\n\n def test_images(self):\n self.iterate_dataset(self.paths_list)\n\n def test_wrong_paths_list(self):\n with self.assertRaises(ValueError):\n self.iterate_dataset([])\n with self.assertRaises(ValueError):\n self.iterate_dataset(())\n with self.assertRaises(TypeError):\n self.iterate_dataset(0)\n with self.assertRaises(TypeError):\n self.iterate_dataset([0])\n with self.assertRaises(ValueError):\n self.iterate_dataset([{}])\n with self.assertRaises(ValueError):\n self.iterate_dataset([{}, {}])\n with self.assertRaises(FileNotFoundError):\n self.iterate_dataset([{'t1':dict(path='nopath', type=INTENSITY)}])\n with self.assertRaises(TypeError):\n self.iterate_dataset([{'t1': dict(path=5, type=INTENSITY)}])\n with self.assertRaises(ValueError):\n path = self.dir / 'test.txt'\n path.touch()\n self.iterate_dataset([{'t1': dict(path=path, type=INTENSITY)}])\n with self.assertRaises(KeyError):\n self.iterate_dataset([{'t1': dict(path='nopath')}])\n with self.assertRaises(KeyError):\n self.iterate_dataset([{'t1': dict(type=INTENSITY)}])\n with self.assertRaises(KeyError):\n self.iterate_dataset([{'t1': dict(test='', type=INTENSITY)}])\n with self.assertRaises(TypeError):\n self.iterate_dataset([{'t1': 6}])\n\n def test_others(self):\n dataset = torchio.ImagesDataset(\n self.paths_list, verbose=True, transform=lambda x: x)\n _ = len(dataset) # for coverage\n sample = dataset[0]\n output_path = self.dir / 'test.nii.gz'\n paths_dict = {'t1': output_path}\n dataset.save_sample(sample, paths_dict)\n nii = nib.load(str(output_path))\n ndims_output = len(nii.shape)\n ndims_sample = len(sample['t1']['data'].shape)\n assert ndims_sample == ndims_output + 1\n\n def iterate_dataset(self, paths_list):\n dataset = torchio.ImagesDataset(paths_list)\n for _ in dataset:\n pass\n","sub_path":"tests/test_images_dataset.py","file_name":"test_images_dataset.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"404939641","text":"import re\nfrom advent_of_code import advent_input_reader\nimport numpy as np\nimport pandas as pd\npd.set_option('display.width', 1200)\n\n\ndef rotate_array(a, rotate_by):\n a = list(a)\n n = abs(rotate_by) % len(a)\n return a[-n:] + a[:-n] if rotate_by > 0 else a[n:] + a[:n]\n\n'''\nrotate column x=0 by 1\nrect 3x1\nrotate row y=0 by 6\nrotate column x=0 by 1\nrect 4x1\nrotate column x=10 by 1\nrotate row y=2 by 16\nrotate row y=0 by 8\nrotate column x=5 by 1\nrotate column x=0 by 1\n'''\n\nscreen = np.zeros((6, 50), dtype=int)\n\n\ndef parse_and_execute(instruction):\n rotatey = re.search(r'rotate row y=(\\d+) by (\\d+)', instruction)\n rotatex = re.search(r'rotate column x=(\\d+) by (\\d+)', instruction)\n rect = re.search(r'rect (\\d+)x(\\d+)', instruction)\n\n if rect is not None:\n x, y = rect.group(1, 2)\n x = int(x)\n y = int(y)\n screen[:y, :x] = np.ones((y, x), dtype=int)\n #print('rect', x, y)\n\n if rotatex is not None:\n column, down_shift = rotatex.group(1, 2)\n column = int(column)\n down_shift = int(down_shift)\n screen[:, column] = rotate_array(screen[:, column], down_shift)\n #print('rotatex', column, down_shift)\n\n if rotatey is not None:\n row, right_shift = rotatey.group(1, 2)\n row = int(row)\n right_shift = int(right_shift)\n screen[row, :] = rotate_array(screen[row, :], right_shift)\n #print('rotatey', row, right_shift)\n\n\n#parse_and_execute('rect 4x3')\n#parse_and_execute('rotate row y=2 by 40')\n#parse_and_execute('rotate column x=3 by 3')\n\ninstructions = advent_input_reader.get_input(8)\n#print(len(instructions))\n\nfor instruction in instructions:\n parse_and_execute(instruction.strip())\n\nprint(np.sum(screen))\n\nscreen_df = pd.DataFrame(screen).applymap(lambda x: '' if int(x) == 0 else x)\n\nfor i in range(0, 6):\n print(screen_df.loc[:, 10*i :10 + 10*i])\n","sub_path":"advent_of_code/q8.py","file_name":"q8.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"521975323","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\ndef has_site_user(self):\n try:\n self.site_user\n except Exception as e:\n return False\n return True\n\n\nUser.add_to_class(\"has_site_user\", has_site_user)\n\n\nclass Skill(models.Model):\n # TODO: enums for skills and Domain Skill\n name = models.TextField(null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\n# Create your models here.\nclass Group(models.Model):\n name = models.CharField(max_length=250, null=True, blank=True)\n group_size = models.IntegerField(null=True, blank=True)\n estimated_work_duration = models.IntegerField(null=True, blank=True)\n skill = models.ForeignKey(Skill, on_delete=models.DO_NOTHING, null=False)\n owner = models.ForeignKey(User, related_name=\"groupings\"\n , on_delete=models.CASCADE, null=False)\n\n def __str__(self):\n return self.name\n\n\nclass UserSkill(models.Model):\n Beginner = 'BG'\n Intermiediate = 'IT'\n Expert = 'EX'\n Master = 'MS'\n LEVELS = (\n ('BG', 'Beginner'),\n ('IT', 'Intermiediate'),\n ('EX', 'Expert'),\n ('MS', 'Master'),\n )\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=False)\n skill = models.ForeignKey(Skill, on_delete=models.CASCADE, null=False)\n # TODO: enums for level\n level = models.CharField(max_length=2,\n choices=LEVELS,\n default=Beginner, )\n years_of_experience = models.IntegerField()\n\n\nclass Notification(models.Model):\n isVideo = models.BooleanField(default=False)\n isMessage = models.BooleanField(default=True)\n message = models.TextField(null=True, blank=True)\n # TODO: enums for colors\n background_color = models.CharField(null=False, blank=False, max_length=250)\n # TODO: add the video for notification\n\n\nclass UserGroup(models.Model): # Members table\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name=\"groupings\")\n isTeacher = models.BooleanField(default=False)\n isLearner = models.BooleanField(default=True)\n start_at = models.DateField(null=True)\n\n class Meta:\n unique_together = ((\"user\", \"group\"),)\n\n def __str__(self):\n return self.user.username + \" assigned to \" + self.group.name\n\n\nclass GroupNotification(models.Model):\n LOW = 'LW'\n MEDIUM = 'MD'\n HIGH = 'HG'\n PRIORITIES = (\n ('LW', 'LOW'),\n ('MD', 'MEDIUM'),\n ('HG', 'HIGH'),\n )\n group = models.ForeignKey(Group, on_delete=models.CASCADE)\n message = models.TextField()\n color = models.TextField()\n created_at = models.DateTimeField(null=True)\n priority = models.CharField(max_length=2,\n choices=PRIORITIES,\n default=LOW, )\n\n def __str__(self):\n return self.group.name + \" -- \" + self.message\n\n\nclass Survey(models.Model):\n name = models.CharField(max_length=250, null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n aim_for_survey = models.TextField(null=True, blank=True)\n\n\nclass EvaluationSession(models.Model):\n start_date = models.DateField()\n end_date = models.DateField()\n name = models.CharField(max_length=250, null=True, blank=True)\n group = models.ForeignKey(Group, on_delete=models.CASCADE)\n survey = models.ForeignKey(Survey, on_delete=models.DO_NOTHING)\n\n\nclass SurveyQuestion(models.Model):\n # TODO:make enum for type\n type = models.CharField(max_length=250, null=True, blank=True)\n points = models.IntegerField()\n text = models.TextField()\n survey = models.ForeignKey(Survey, on_delete=models.CASCADE)\n\n\nclass SurveyAnswer(models.Model):\n isValid = models.BooleanField(default=False)\n text = models.TextField()\n points_get = models.IntegerField()\n survey_question = models.ForeignKey(SurveyQuestion, on_delete=models.CASCADE)\n\n\nclass UserMessage(models.Model):\n owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name='from_user')\n to_user_msg = models.ForeignKey(User, on_delete=models.CASCADE, related_name='to_user')\n time = models.DateTimeField(null=True, blank=True)\n text = models.TextField(null=True, blank=True)\n\n\nclass Site_User(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n # TODO: enums for Location ,maybe county added\n location = models.TextField(null=True, blank=True)\n first_name = models.TextField(null=True, blank=True)\n description = models.TextField(null=True, blank=True, default=\" Eager to teach things\")\n\n # TODO: add image file\n\n def __str__(self):\n return self.user.username\n\n\nclass RequestToGroup(models.Model):\n ACCEPTED = 'AC'\n PENDING = 'PG'\n CLOSED = 'CL'\n PRIORITIES = (\n ('AC', 'ACCEPT'),\n ('PG', 'PENDING'),\n ('CL', 'CLOSED'),\n )\n status = models.CharField(max_length=2,\n choices=PRIORITIES,\n default=ACCEPTED, null=False)\n request_from = models.ForeignKey(User, on_delete=models.CASCADE, related_name='from_user_request')\n request_to = models.ForeignKey(User, on_delete=models.CASCADE, related_name='to_user_request')\n time = models.DateTimeField(null=True, blank=True)\n group = models.ForeignKey(Group, on_delete=models.CASCADE)\n\n def __str__(self):\n return \"From \" + self.request_from.username + \" to: \" + self.request_to.username + \" for group \" + self.group.name\n","sub_path":"monitor/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"388867367","text":"import random\nfrom Card import Card\nfrom player import Player\nfrom Goal import Goal\nfrom Objective import Objective\n\nclass PlayerTurn():\n def __init__(self, numPlayers, mapInstance):\n self.endGame = False\n self.num = 0\n self.numPlayers = numPlayers\n self.turnList = list(range(1, numPlayers + 1))\n random.shuffle(self.turnList)\n self.numTerritories = mapInstance.numTerritories\n self.players = []\n\n for k in range(0, numPlayers):\n self.players.append(Player(k + 1, mapInstance, self))\n\n # assigns player goals\n self.goal = Goal(mapInstance, self)\n for k in range(0, numPlayers):\n self.players[k].obj = Objective(self.goal, self.players[k])\n self.id_turnList = 0\n self.map = mapInstance\n self.list_phase = [\"Placement\", \"Attack\", \"Movement\"]\n self.phase = 0\n self._player_ = self.turnList[self.id_turnList]\n\n def playerName(self):\n return self.players[self.turnCount -1].name\n\n def next(self):\n if self.players[self.turnCount - 1].num_troops > 0:\n raise ValueError(\"Need to deploy\", self.players[self.turnCount - 1].num_troops)\n\n if self.num == 0: # Placement phase\n self.id_turnList = (self.id_turnList + 1) % len(self.turnList)\n if self.id_turnList == 0:\n self.num += 1\n self.phase = (self.phase + 1) % len(self.list_phase)\n\n elif self.num == 1: # Attack phase\n self.phase = (self.phase + 1) % len(self.list_phase)\n if self.phase == 0:\n self.phase += 1\n self.id_turnList = (self.id_turnList + 1) % len(self.turnList)\n\n # Update territory info if captured\n self.players[self.turnCount - 1].attack_success = False\n if self.id_turnList == 0:\n self.num += 1\n self.phase = 0\n\n # Updates reinforcement troops for start of player turn\n self.players[self.turnCount - 1].num_troops += self.players[self.turnCount - 1].troopsPerTurn\n else:\n self.phase = (self.phase + 1) % len(self.list_phase)\n if self.phase == 0:\n self.id_turnList = (self.id_turnList + 1) % len(self.turnList)\n\n # Update territory captured boolean\n self.players[self.turnCount - 1].attack_success = False\n\n # Updates reinforcement troops for start of player turn\n self.players[self.turnCount - 1].num_troops += self.players[self.turnCount - 1].troopsPerTurn\n if self.id_turnList == 0:\n self.num += 1\n\n print(\"Turn Number :\", self.num, \"order\", self.turnList, \"player turn\", self.turnList[self.id_turnList])\n print(self.list_phase[self.phase])\n\n # Next player turn\n def next_player(self):\n if self.num == 0: # Initial placement phase\n self.id_turnList = (self.id_turnList + 1) % len(self.turnList)\n if self.id_turnList == 0:\n self.num += 1\n self.phase = (self.phase + 1) % len(self.list_phase)\n\n elif self.num == 1: # Skip placement\n self.phase = 1\n self.id_turnList = (self.id_turnList + 1) % len(self.turnList)\n self.players[self.turnCount - 1].attack_success = False\n if self.id_turnList == 0:\n self.num += 1\n self.phase = 0\n self.players[self.turnCount - 1].num_troops += self.players[self.turnCount - 1].troopsPerTurn\n\n else:\n # Move to next player turn\n self.id_turnList = (self.id_turnList + 1) % len(self.turnList)\n self.phase = 0\n self.players[self.turnCount - 1].attack_success = False\n self.players[self.turnCount - 1].num_troops += self.players[self.turnCount - 1].troopsPerTurn\n if self.id_turnList == 0:\n self.num += 1\n\n # Method allocates starting troops for each player\n def initialTroops(self):\n if self.numPlayers == 2:\n num_troops = 50\n elif self.numPlayers == 3:\n num_troops = 40\n elif self.numPlayers == 4:\n num_troops = 30\n elif self.numPlayers == 5:\n num_troops = 20\n elif self.numPlayers == 6:\n num_troops = 15\n else:\n print(\"Troop allocation error! Please restart game!\")\n num_troops = 0\n for p in self.players:\n p.num_troops = num_troops\n\n # Distributes territories as evenly as possible among players\n def distributeTerritories(self, territories):\n listTerritoryID = []\n for k in territories:\n listTerritoryID.append(k.id)\n random.shuffle(listTerritoryID)\n n = self.numTerritories // self.numPlayers\n for idx, i in enumerate(range(0, len(listTerritoryID), n)):\n if idx < self.numPlayers:\n self.players[idx].territories = listTerritoryID[i:i + n]\n else:\n for pays_restant in listTerritoryID[i:i + n]: # After distribution, remaing countrys randomly assigned\n self.players[random.randint(0, self.numPlayers - 1)].territories.append(pays_restant)\n for p in self.players:\n for territories in p.territories:\n self.map.territories[territories - 1].id_player = p.id\n self.map.territories[territories - 1].num_troops = 1 # Min 1 troop per territory\n p.num_troops -= 1\n return listTerritoryID\n\n # Get dice roll results\n def rollDice(self, attack, defense):\n d_a = []\n d_b = []\n losses = [0, 0, d_a, d_b] # Attacker deaths, defender deaths\n\n for k in range(0, attack):\n d_a.append(random.randint(1, 6))\n d_a.sort(reverse=True)\n\n for k in range(0, defense):\n d_b.append(random.randint(1, 6))\n d_b.sort(reverse=True)\n\n for k in range(0, min(attack, defense)):\n if d_b[k] < d_a[k]: # On attacker win\n losses[1] = losses[1] + 1\n else:\n losses[0] = losses[0] + 1\n return losses\n\n # Tests attack vs defense forces\n def attack(self, attacker, defender, attackingTroops):\n diceResults = []\n\n while (True):\n if attackingTroops > 2:\n dice_atck = 3\n elif attackingTroops > 1:\n dice_atck = 2\n elif attackingTroops > 0:\n dice_atck = 1\n else:\n raise ValueError(\"not enough troops:\", attackingTroops)\n if defender.num_troops > 1:\n dice_def = 2\n elif defender.num_troops > 0:\n dice_def = 1\n\n res = self.rollDice(dice_atck, dice_def)\n print(res)\n\n diceResults.append(res)\n attacker.num_troops -= res[0]\n attackingTroops -= res[0]\n defender.num_troops -= res[1]\n\n if attackingTroops == 0: # Attack failed\n return False, diceResults\n\n elif defender.num_troops == 0: # Territory captured\n\n # Update list of territories for players\n self.players[attacker.id_player - 1].territories.append(defender.id)\n self.players[defender.id_player - 1].territories.remove(defender.id)\n\n # Update captured territor id\n defender.id_player = attacker.id_player\n\n # Moves surviving troops to new territory, remove from old\n self.troopMovement(attacker, defender, dice_atck)\n\n # Attacker gets a card if it is the first captured territory this turn\n if self.players[attacker.id_player - 1].attack_success == False:\n self.players[attacker.id_player - 1].attack_success = True\n\n # If player has 5+ cards, card is discarded\n if len(self.players[attacker.id_player - 1].cards) > 4:\n self.players[attacker.id_player - 1].del_card(0)\n\n self.players[attacker.id_player - 1].cards.append(Card())\n return True, diceResults\n\n # Remove troops from old territory, add survivors to new\n def troopMovement(self, origin, destination, num_troops):\n if num_troops < origin.num_troops:\n origin.num_troops -= num_troops\n destination.num_troops += num_troops\n else:\n print(\"trying to move too many troops\")\n\n # Troop assigner during placement\n def placeTroops(self, territories, num_troops):\n player = next((p for p in self.players if p.id == territories.id_player), None)\n if (player.num_troops - num_troops <= 0):\n territories.num_troops += player.num_troops\n player.num_troops -= player.num_troops\n self.next()\n else:\n player.num_troops -= num_troops\n territories.num_troops += num_troops\n\n # getter for turn\n @property\n def turnCount(self):\n return self.turnList[self.id_turnList]\n\n # # for debug\n # def print_pays(self):\n # for territories in self.territories:\n # territories.print_carac()\n\n # Checks if path is valid\n def chemin_exist(self, playerTerritories, territoryA, territoryB):\n validNeighbors = []\n if territoryA.id in playerTerritories:\n validNeighbors.append(territoryA.id)\n self.pathDepth(territoryA, playerTerritories, validNeighbors)\n if territoryB.id in validNeighbors:\n print(\"A path exists\")\n return True\n else:\n print(\"no valid path\")\n return False\n else:\n print(\"Player cannot select this territory\")\n return False\n\n def pathDepth(self, territories, playerTerritories, validNeighbors):\n for p_id in territories.neighbors:\n if p_id in playerTerritories and p_id not in validNeighbors:\n validNeighbors.append(p_id)\n self.pathDepth(territories[p_id - 1], playerTerritories, validNeighbors)","sub_path":"Duncan's Side Branch/playerTurn.py","file_name":"playerTurn.py","file_ext":"py","file_size_in_byte":10230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"534118639","text":"import math\nimport torch\nfrom torch.nn import MaxPool2d, ReLU, Sequential, Linear, CrossEntropyLoss\nfrom torch.nn.modules.batchnorm import BatchNorm2d\nfrom torch.nn.modules.conv import Conv2d\nfrom torch.nn.modules.dropout import Dropout\nfrom torch.nn.modules.module import Module\nfrom torch.optim.adam import Adam\nfrom torch.utils.data.dataloader import DataLoader\nfrom torchvision.datasets import CIFAR10\nfrom torchvision.transforms import Compose, ToTensor, Normalize, RandomCrop, RandomHorizontalFlip\n\n\nepochs = 20\ndata_download = False\nbatch_size_train = 100\nbatch_size_test = 100\nlr = 0.01\ndropout = 0.2\nconfig = {\n 'vgg11': [64, 'm', 128, 'm', 256, 256, 'm', 512, 512, 'm', 512, 512, 'm'],\n 'vgg13': [64, 64, 'm', 128, 128, 'm', 256, 256, 'm', 512, 512, 'm', 512, 512, 'm'],\n 'vgg16': [64, 64, 'm', 128, 128, 'm', 256, 256, 256, 'm', 512, 512, 512, 'm', 512, 512, 512, 'm'],\n 'vgg19': [64, 64, 'm', 128, 128, 'm', 256, 256, 256, 256, 'm', 512, 512, 512, 512, 'm', 512, 512, 512, 512, 'm']\n}\n\n\nclass VggNet(Module):\n def __init__(self, vgg_name):\n super(VggNet, self).__init__()\n self.conv = self.make_layers(config[vgg_name])\n # self.classifier = Sequential(\n # Dropout(dropout),\n # Linear(in_features=512, out_features=512),\n # ReLU(inplace=True),\n # Dropout(dropout),\n # Linear(in_features=512, out_features=512),\n # ReLU(inplace=True),\n # Linear(in_features=512, out_features=10)\n # )\n self.classifier = Linear(in_features=512, out_features=10)\n # for m in self.modules():\n # if isinstance(m, Conv2d):\n # n = m.kernel_size[0]*m.kernel_size[1]*m.out_channels\n # m.weight.data.uniform_(0, math.sqrt(2./n))\n # m.bias.data.zero_()\n # print('Build Model: Initialization Finished')\n\n def forward(self, x):\n x = self.conv(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n def make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'm':\n layers += [MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [Conv2d(in_channels=in_channels, out_channels=x, kernel_size=3, padding=1), BatchNorm2d(x), ReLU(inplace=True)]\n in_channels = x\n return Sequential(*layers)\n\n\nprint('Preparing Data')\ntransform_train = Compose([RandomCrop(32, padding=4), RandomHorizontalFlip(), ToTensor(), Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\ntransform_test = Compose([ToTensor(), Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\ncifar10_train = CIFAR10(root='/home/qiangde/Data', train=True, download=data_download, transform=transform_train)\ncifar10_test = CIFAR10(root='/home/qiangde/Data', train=False, download=data_download, transform=transform_test)\ntrainloader = DataLoader(dataset=cifar10_train, batch_size=batch_size_train, shuffle=True, num_workers=2)\ntestloader = DataLoader(dataset=cifar10_test, batch_size=batch_size_test, shuffle=False, num_workers=2)\nprint('Building Model')\nmodel = VggNet('vgg13')\noptimizer = Adam(model.parameters(), lr=lr)\ncriterion = CrossEntropyLoss()\nprint('Starting Training')\nfor epoch in range(epochs):\n for i, (images, label) in enumerate(trainloader):\n optimizer.zero_grad()\n out = model(images)\n loss = criterion(out, label)\n loss.backward()\n optimizer.step()\n total_train = label.size(0)\n predict_train = torch.max(out.data, 1)[1]\n correct_train = (predict_train == label).sum().item()\n accuracy_train = correct_train / total_train\n if (i + 1) % 50 == 0:\n print('Epoch ', epoch + 1, ' step ', i + 1, ' Training Accuracy: ', accuracy_train)\nprint('Start Testing')\nmodel.eval()\ntotal_test = 0\ncorrect_test = 0\nfor i, (images, label) in enumerate(testloader):\n output = model(images)\n predict_test = torch.max(output, 1)[1]\n total_test += label.size(0)\n correct_test += (predict_test==label).sum().item()\nprint('Testing Accuracy: ', correct_test/total_test)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Pytorch_VggNet_Cifar10.py","file_name":"Pytorch_VggNet_Cifar10.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"297096186","text":"import columns\nfrom component import *\nfrom xml.etree import ElementTree\nfrom xml.dom import minidom\nfrom preferences import BomPref\n\n\"\"\"\nWrite BoM out to an XML file\nfilename = path to output file (must be a .xml)\ngroups = [list of ComponentGroup groups]\nnet = netlist object\nheadings = [list of headings to display in the BoM file]\nprefs = BomPref object\n\"\"\"\n \ndef WriteXML(filename, groups, net, headings, prefs):\n \n if not filename.endswith(\".xml\"):\n return False\n \n xml = ElementTree.Element('KiCAD_BOM', attrib = {\n 'Schematic_Source' : net.getSource(),\n 'Schematic_Version' : net.getVersion(),\n 'Schematic_Date' : net.getSheetDate(),\n 'BOM_Date' : net.getDate(),\n 'KiCad_Version' : net.getTool(),\n 'groups' : str(len(groups)),\n 'components' : str(sum([group.getCount() for group in groups]))\n })\n \n for group in groups:\n if prefs.ignoreDNF and not group.isFitted():\n continue\n row = group.getRow(headings)\n \n attrib = {}\n \n for i,h in enumerate(headings):\n h = h.replace(' ','_') #replace spaces, xml no likey\n h = h.replace('\"','')\n h = h.replace(\"'\",'')\n \n attrib[h] = row[i]\n \n sub = ElementTree.SubElement(xml, \"group\", attrib=attrib)\n \n with open(filename,\"w\") as output:\n out = ElementTree.tostring(xml, 'utf-8')\n \n output.write(minidom.parseString(out).toprettyxml(indent=\"\\t\"))\n \n return True\n \n","sub_path":"KiBOM/xml_writer.py","file_name":"xml_writer.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"131769391","text":"class FilterSocket:\n\n def __init__(self, data, swearwords=['shit', 'fuck', 'damn', 'bitch',\n 'crap', 'piss', 'dick', 'darn',\n 'cock', 'pussy', 'asshole', 'fag',\n 'bastard', 'slut', 'douche']):\n self.data = data\n self.swearwords = swearwords\n\n def filter(self):\n split_message = [word for word in self.data.decode('utf-8').strip().split()]\n for i, word in enumerate(split_message):\n if word in self.swearwords:\n split_message[i] = '*' * len(word)\n self.data = bytes(' '.join(split_message), 'utf-8')\n return self.data\n","sub_path":"decorator/filtersocket.py","file_name":"filtersocket.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"275271982","text":"import json\nfrom random import choice\nfrom django.test import TestCase, Client\nfrom django import test\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.urlresolvers import reverse\nfrom stocks.models import *\nfrom stocks.views import join_floor\nfrom stocks.forms import TradeForm, LoginForm, RegistrationForm, EditFloorForm\nimport json\nimport time\nfrom functools import reduce\nimport urllib\n\nclass TradeTestCase(StaticLiveServerTestCase):\n fixtures = [\"fixture.json\"]\n def get_trade(self):\n floor = Floor.objects.all()[0]\n available_players = [p for p in Player.objects.filter(floor=floor) if not p.isFloor()]\n sender = available_players[0]\n recipient = available_players[1]\n sender_stocks = list(sender.stocks.all())\n recipient_stocks = list(recipient.stocks.all())\n trade = Trade.objects.create(floor=floor, sender=sender, recipient=recipient)\n trade.senderStocks = sender.stocks.all()\n trade.recipientStocks = recipient.stocks.all()\n return trade\n def test_self_trade(self):\n trade = self.get_trade()\n recipient = trade.recipient\n sender = trade.sender\n floor = trade.floor\n senderStocks = list(trade.senderStocks.all())\n recipientStocks = list(trade.recipientStocks.all())\n recipient = sender\n for i in Stock.objects.all():\n if not i in floor.stocks.all():\n new_stock = i\n break\n floor.stocks.add(new_stock)\n sender.stocks.add(new_stock)\n floor.save()\n sender.save()\n recipientStocks = [recipient.stocks.all().exclude(symbol=new_stock.symbol)[0]]\n senderStocks = [new_stock]\n trade.delete()\n form = TradeForm({\"other_user\": recipient.user.username,\n \"user_stocks\": \",\".join(s.symbol for s in senderStocks),\n \"other_stocks\": \",\".join(s.symbol for s in recipientStocks)})\n self.assertFalse(form.is_valid(pkFloor=floor.pk, user=sender.user))\n self.assertIn(\"You can't send a trade to yourself\", repr(form.errors))\n def test_trade_simple(self):\n trade = self.get_trade()\n recipient = trade.recipient\n sender = trade.sender\n senderStocks = list(trade.senderStocks.all())\n recipientStocks = list(trade.recipientStocks.all())\n trade.accept()\n trade.save()\n self.assertEqual(list(recipient.stocks.all()), senderStocks)\n self.assertEqual(list(sender.stocks.all()), recipientStocks)\n def test_stock_cap_simple(self):\n SMALL_NUMBER = 2\n floor = Floor.objects.all()[0]\n floor.num_stocks = SMALL_NUMBER\n floor.save()\n player = Player.objects.all()[0]\n for i in range(SMALL_NUMBER):\n player.stocks.add(Stock.objects.all()[i])\n player.save()\n with self.assertRaises(TradeError):\n trade = Trade.objects.create(sender=player, floor=floor, recipient=floor.floorPlayer)\n trade.recipientStocks = [Stock.objects.all()[SMALL_NUMBER + 1]]\n trade.verify()\n def test_private_floor(self):\n floor = Floor.objects.all()[0]\n floor.public = False\n floor.save()\n client = Client()\n user = User.objects.create_user(\"privateFloorUser\", \"privateer@mailmail.mail\", \"thePasswordIs\")\n client.force_login(user)\n response = client.get(reverse(\"floorsJson\"))\n # This has to be number 2 because there are three templates: base (0), loggedIn (1), and joinFloor (2). \n self.assertListEqual([Floor.objects.get(name=i[\"name\"]) for i in json.loads(response.content.decode(\"UTF-8\"))], [])\n floor.public = True\n floor.save()\n response = client.get(reverse(\"floorsJson\"))\n self.assertListEqual([Floor.objects.get(name=i[\"name\"]) for i in json.loads(response.content.decode(\"UTF-8\"))], [floor])\n def test_trade_counter(self):\n trade = self.get_trade()\n old_recipientStocks = list(trade.recipientStocks.all())\n old_senderStocks = list(trade.senderStocks.all())\n client = Client()\n client.force_login(trade.recipient.user)\n response = client.get(reverse(\"receivedTrade\", kwargs={\"pkTrade\": trade.pk}))\n self.assertEqual(trade.toFormDict(), response.context[-1][\"form\"].data)\n self.assertContains(response, reverse(\"counterTrade\", kwargs={\"pkTrade\": trade.pk, \"pkFloor\": trade.floor.pk}))\n response = client.get(reverse(\"counterTrade\", kwargs={\"pkTrade\": trade.pk, \"pkFloor\": trade.floor.pk}))\n response = client.post(reverse(\"trade\", kwargs={\"pkCountering\": trade.pk, \"pkFloor\": trade.floor.pk}), {\"other_user\": trade.sender.user.username, \"user_stocks\": \",\".join(i.symbol for i in trade.recipientStocks.all()), \"other_stocks\": \",\".join(i.symbol for i in trade.senderStocks.all())})\n self.assertRedirects(response, reverse(\"dashboard\"))\n self.assertQuerysetEqual(Trade.objects.filter(pk=trade.pk), [])\n newTrade = Trade.objects.all()[0]\n self.assertEqual(list(newTrade.senderStocks.all()), old_recipientStocks)\n self.assertEqual(list(newTrade.recipientStocks.all()), old_senderStocks)\n def test_add_stock(self):\n floor = Floor.objects.all()[0]\n for s in Stock.objects.all():\n if not s in floor.stocks.all():\n new_stock = s.symbol\n break\n self.assertNotEqual(new_stock, \"\")\n new_stocks = \",\".join([s.symbol for s in floor.stocks.all()] + [new_stock])\n form = EditFloorForm({\"name\": floor.name, \"privacy\": not floor.public, \"number_of_stocks\": floor.num_stocks, \"stocks\": new_stocks, \"permissiveness\": floor.permissiveness})\n self.assertTrue(form.is_valid())\n self.assertTrue(form.cleaned_data['stocks'])\n form.apply(floor)\n stock = Stock.objects.get(symbol=new_stock)\n self.assertTrue(stock)\n self.assertIn(stock, floor.floorPlayer.stocks.all())\n def test_delete_stock(self):\n floor = Floor.objects.all()[0]\n stock_to_delete = choice(floor.stocks.all())\n new_stocks = ','.join([s.symbol for s in floor.stocks.all() if not s.symbol == stock_to_delete.symbol])\n form = EditFloorForm({\"name\": floor.name, \"privacy\": not floor.public, \"number_of_stocks\": floor.num_stocks, \"stocks\": new_stocks, \"permissiveness\": floor.permissiveness})\n self.assertIn(stock_to_delete, floor.stocks.all())\n self.assertTrue([p for p in Player.objects.filter(floor=floor) if stock_to_delete in p.stocks.all()])\n self.assertTrue(form.is_valid())\n self.assertTrue(form.cleaned_data['stocks'])\n form.apply(floor)\n self.assertNotIn(stock_to_delete, floor.stocks.all())\n self.assertFalse([p for p in Player.objects.filter(floor=floor) if stock_to_delete in p.stocks.all()])\n\nclass PlayerTestCase(StaticLiveServerTestCase):\n fixtures = [\"fixture.json\"]\n def test_join_empty_floor(self):\n floor = Floor.objects.all()[0]\n user = User.objects.all().exclude(username=\"Floor\")[0]\n player = Player.objects.get(user=user, floor=floor)\n client = Client()\n client.force_login(user)\n for i in [f for f in Floor.objects.all() if not f in [p for p in Player.objects.filter(user=user)]]:\n client.get(reverse(\"join\", args=[i.pk]))\n\n response = client.get(reverse(\"joinFloor\"))\n self.assertFalse(response.context[-1][\"floors_exist\"])\n Player.objects.filter(user=user).delete()\n response = client.get(reverse(\"joinFloor\"))\n self.assertTrue(response.context[-1][\"floors_exist\"])\n def test_scoring(self):\n start = time.clock()\n floor = Floor.objects.all()[0]\n DEFAULT_PRICE = 5\n for s in floor.stocks.all():\n s.price = DEFAULT_PRICE\n s.update()\n for p in Player.objects.filter(floor=floor):\n if not p.isFloor():\n self.assertAlmostEqual(p.points, reduce(lambda x, y: x + y, [s.get_score() for s in p.stocks.all()]), delta=1)\n print(\"Finished! Took {} seconds!\".format(time.clock() - start))\nclass SuggestionTestCase(StaticLiveServerTestCase):\n fixtures = [\"fixture.json\"]\n def setUp(self):\n self.floor = Floor.objects.all()[0]\n self.floor.permissiveness = \"permissive\"\n self.floor.save()\n self.player = [p for p in Player.objects.filter(floor=self.floor) if not p.isFloor()][0]\n self.user = self.player.user\n for i in Stock.objects.all():\n if not i in self.floor.stocks.all():\n self.new_stock = i\n break\n else:\n continue\n self.form = TradeForm({\"other_user\": self.floor.floorPlayer.user.username, \"other_stocks\": self.new_stock.symbol, \"user_stocks\": \"\"})\n if self.form.is_valid(pkFloor=self.floor.pk, user=self.user):\n self.trade = self.form.to_trade(pkFloor=self.floor.pk, user=self.user)\n else:\n raise RuntimeError(\"There was an error in validation. {}\".format(form.errors))\n def test_suggestions(self):\n # If this fails, the trade isn't getting automatically accepted by the floor. \n self.assertQuerysetEqual(Trade.objects.all(), [])\n self.assertNotIn(self.new_stock, self.player.stocks.all())\n self.assertNotIn(self.new_stock, self.floor.stocks.all())\n self.assertNotEqual(StockSuggestion.objects.all(), [])\n self.assertNotIn(self.new_stock, self.floor.stocks.all())\n StockSuggestion.objects.filter(stock=self.new_stock)[0].accept()\n self.assertIn(self.new_stock, self.player.stocks.all())\n self.assertIn(self.new_stock, self.floor.stocks.all())\n def test_capped_suggestion(self):\n SMALL_NUMBER = 2\n self.floor.num_stocks = SMALL_NUMBER\n self.floor.save()\n suggestion = StockSuggestion.objects.all()[0]\n for i in Player.objects.all():\n if i != self.player and not i.isFloor():\n otherPlayer = i\n break\n else:\n continue\n newTrade = Trade.objects.create(recipient=otherPlayer, floor=self.floor, sender=self.player)\n newStock = otherPlayer.stocks.all()[0]\n newTrade.recipientStocks.add(newStock)\n newTrade.save()\n newTrade.verify()\n newTrade.accept()\n self.assertEqual(list(otherPlayer.stocks.all()), [])\n self.assertIn(newStock, self.player.stocks.all())\n self.assertEqual(self.player.stocks.count(), SMALL_NUMBER)\n suggestion.accept()\n self.assertNotIn(self.new_stock, self.player.stocks.all())\n self.assertIn(self.new_stock, self.floor.stocks.all())\n self.assertIn(self.new_stock, self.floor.floorPlayer.stocks.all())\n\nclass UserTestCase(StaticLiveServerTestCase):\n fixtures = [\"fixture.json\"]\n def test_login_page(self):\n user = User.objects.all().exclude(username=\"Floor\")[0]\n client = Client()\n origResponse = client.get(reverse(\"dashboard\"), follow=True)\n self.assertTemplateUsed(origResponse, \"index.html\")\n self.assertTrue(origResponse.context[-1][\"registrationForm\"])\n self.assertTrue(origResponse.context[-1][\"loginForm\"])\n response = client.post(origResponse.redirect_chain[-1][0], {\"username\": \"notAusername\", \"password\": \"certainlynotapassword\", \"nextPage\": reverse(\"dashboard\")})\n self.assertFormError(response, \"loginForm\", None, \"That username does not exist\") \n\n response = client.get(reverse(\"dashboard\"), follow=True)\n response = client.post(origResponse.redirect_chain[-1][0], {\"username\": user.username, \"password\": \"certainlynotapassword\", \"nextPage\": reverse(\"dashboard\")})\n self.assertFormError(response, \"loginForm\", None, \"That is the wrong password\") \n\n response = client.get(reverse(\"dashboard\"), follow=True)\n # See the code I used to create the fixture for how I'm getting the password\n response = client.post(origResponse.redirect_chain[-1][0], {\"username\": user.username, \"password\": \"thePasswordIs{}\".format(user.username.split(\"_\")[1]), \"nextPage\": reverse(\"dashboard\")}, follow=True)\n self.assertEqual(response.redirect_chain[-1][0], reverse(\"dashboard\")) \n\n\n\n\n\n# This is stuff I don't need anymore. If I need to change the standard database\n# setup, I might though. \n# class OldTests(StaticLiveServerTestCase):\n# def test_serialize(self):\n# from django.core.serializers import serialize\n# l = []\n# for i in [Stock, User, Group, Floor, Player]:\n# l += list(i.objects.all())\n# with open(\"fixture\", \"w\") as w:\n# w.write(serialize(\"json\", l))\n# def setUp(self):\n# NUMBER_OF_USERS = 100\n# with urllib.request.urlopen(self.live_server_url + static(\"stocks.json\")) as f:\n# available_stocks = [Stock.objects.create(symbol=i[\"symbol\"]) for i in json.loads(f.read().decode(\"UTF-8\"))]\n# floor_user = User.objects.create_user(\"Floor\", \"floor@floors.net\", \"flooring\")\n# floor_group = Group.objects.create(name=\"Floor\")\n# floor_group.save()\n# floor_user.groups.add(floor_group)\n# floor_user.save()\n# floor = Floor.objects.create(name=\"TestingFloor\", permissiveness=\"open\")\n# floor_player = Player.objects.create(user=floor_user, points=0, floor=floor)\n# floor.floorPlayer = floor_player\n# floor_player.save()\n# floor.save()\n# print(\"start creating users\")\n# for i in range(NUMBER_OF_USERS):\n# user = User.objects.create_user(\"user_{}\".format(i), \"user_{}@mailmail.mail\".format(i), \"thePasswordIs{}\".format(i))\n# player = Player.objects.create(user=user, floor=floor)\n# stock = available_stocks.pop()\n# player.stocks.add(stock)\n# user.save()\n# player.save()\n# floor.stocks.add(stock)\n# floor.save()\n# print(\"done creating users\")\n","sub_path":"fantasyStocks/stocks/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":14027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"499322081","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n if not l1: return l2\n if not l2: return l1\n curr = l1\n while curr != None:\n if l2 != None: curr.val = curr.val + l2.val\n if curr.val >= 10:\n if curr.next == None:\n curr.next = ListNode(1)\n else:\n curr.next.val += 1\n curr.val -= 10\n if curr.next == None and l2 != None: \n curr.next = l2.next\n break\n curr = curr.next\n if l2 != None: l2 = l2.next\n return l1","sub_path":"Top_Interview_Questions/LinkedList/AddTwoNumbers.py","file_name":"AddTwoNumbers.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"572446381","text":"from Map.Maze import maze as maze\nfrom Map.Player import man as man\nfrom Map.Core import core as core\nfrom Learning.Lesson1 import lessonOne as lessonOne\n\nimport pygame\nimport time\nimport sys\n\n\nmydict = {0: \"level_0\", 1: \"level_1\", 2: \"level_2\",\n 3: \"level_3\", 4: \"level_4\", 5: \"level_5\", 6: \"level_6\"}\n\n\ndef main_function():\n global my_mode\n if not (man.right or man.left or man.down or man.up):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_RIGHT]:\n man.right = True\n print(man.place)\n elif keys[pygame.K_LEFT]:\n man.left = True\n print(man.place)\n elif keys[pygame.K_UP]:\n man.up = True\n print(man.place)\n elif keys[pygame.K_DOWN]:\n man.down = True\n print(man.place)\n elif keys[pygame.K_SPACE]:\n my_mode = mydict[man.place]\n time.sleep(0.2)\n print(my_mode)\n\n if man.can_move_right(maze):\n if maze.levels[man.movement][0] > man.x:\n man.x += man.vel\n else:\n man.right = False\n man.place = man.movement\n man.movement = -100\n\n elif man.can_move_left(maze):\n if maze.levels[man.movement][0] < man.x:\n man.x -= man.vel\n else:\n man.left = False\n man.place = man.movement\n man.movement = -100\n\n elif man.can_move_down(maze):\n if maze.levels[man.movement][1] > man.y:\n man.y += man.vel\n else:\n man.down = False\n man.place = man.movement\n man.movement = -100\n\n elif man.can_move_up(maze):\n if maze.levels[man.movement][1] < man.y:\n man.y -= man.vel\n else:\n man.up = False\n man.place = man.movement\n man.movement = -100\n else:\n man.walkCount = 0\n man.right = False\n man.left = False\n man.up = False\n man.down = False\n\n core.redrawGameWindow(man, maze)\n\n\ndef one_function():\n global my_mode\n lessonOne.renderlevel()\n my_mode = 'main'\n time.sleep(0.2)\n\n\nrun = True\nmy_mode = 'main'\nwhile run:\n core.clock.tick(27)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if my_mode == 'main':\n main_function()\n else:\n one_function()\n\npygame.quit()\n","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"319219674","text":"from flask import request\nfrom flask_restful import Resource\nfrom pymongo import ReturnDocument\nimport jwt\nimport os\n\nfrom app.db.user import users\nfrom app.controllers.jwt_validator import validate_jwt\nfrom app.validators.create_app import create_app_validator\n\n\nclass CreateApp(Resource):\n def post(self):\n decoded = None\n try:\n decoded = validate_jwt(request.headers['Authorization'])\n except:\n return {'error': True, 'errorMessage': 'Invalid access_token'}, 400\n\n if not decoded:\n return {'error': True, 'errorMessage': 'Invalid access_token'}, 400\n\n data = request.get_json()\n\n if not create_app_validator.validate({'data': data}):\n return {'error': True, 'errorMessage': create_app_validator.errors['data'][0]}, 400\n\n db_data = users.find_one({'email': decoded['email']})\n\n if db_data['account_type'] == 'free' and len(db_data['applications']) == 3:\n return {'error': True, 'errorMessage': {'applications': ['You have reached maximum limit of applications you can create. Subscribe to premium and create unlimited applications.']}}\n\n for app in db_data['applications']:\n if app['name'] == data['name']:\n return {'error': True, 'errorMessage': {'name': ['Application with the similar name already exist.']}}\n\n data['secret_token'] = jwt.encode(\n {'email': db_data['email'],\n 'allowed_apis': data['allowed_apis'],\n 'app_name': data['name']\n },\n os.getenv('JWT_SECRET'), algorithm='HS256').decode()\n\n results = users.find_one_and_update({'email': db_data['email']}, {\n '$push': {'applications': data}}, return_document=ReturnDocument.AFTER)\n\n if not results:\n print(results)\n return {'error': True, 'errorMessage': 'Something went wrong from our side. Sorry for the incovenience.'}, 500\n\n return {'error': False, 'results': results['applications'], 'updated': data}, 200\n","sub_path":"app/routes/create_app.py","file_name":"create_app.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"487184656","text":"\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom PIL import Image\nfrom pybrain.datasets import SupervisedDataSet\nfrom pybrain.supervised.trainers import RPropMinusTrainer\nimport numpy as np\nimport os\n\n\nfiles = os.listdir(\"img/\")\nfiles1 = os.listdir(\"test_img/\")\nds = SupervisedDataSet(200*60*3, 1)\n\nfor i in range(len(files)):\n img = Image.open(\"img/\"+files[i])\n data = np.array(img)\n data = data.reshape(-1)\n ds.addSample((data), (files[i][0:6]))\n\n\n\n\nimg1 = Image.open(\"test_img/152830.png\")\n\ndata1 = np.array(img1)\ndata1 = data1.reshape(-1)\n\nnet = buildNetwork(200*60*3, 1)\n\ntrainer = RPropMinusTrainer(net)\ntrainer.setData(ds)\n\ntrainer.trainEpochs(100)\n\n\ndef calculation(a,b):\n i=0\n if((int(a) // 100000)%10 == (int(b) // 100000)%10):\n i=i+1\n if ((int(a) // 10000) % 10 == (int(b) // 10000) % 10):\n i = i + 1\n if((int(a) // 1000)%10 == (int(b) // 1000)%10):\n i=i+1\n if((int(a) // 100)%10 == (int(b) // 100)%10):\n i=i+1\n if((int(a) // 10)%10 == (int(b) // 10)%10):\n i=i+1\n if((int(a) // 1)%10 == (int(b) // 1)%10):\n i=i+1\n return i/6*100\nsum = 0\nfor fname in files1:\n img3 = Image.open(\"test_img/%s\" % fname)\n data6 = np.array(img3)\n data6 = data6.reshape(-1)\n print(fname, \"Результат распознования равен\", np.round(net.activate(data6)))\n sum += calculation(np.round(net.activate(data6)), fname.split('.')[0])\n\nprint('%s' % (sum / 3))\n\n\n\n\n\n","sub_path":"сеть.py","file_name":"сеть.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"460732661","text":"class BST:\n def __init__(self):\n self.data = None\n self.left = None\n self.right = None\n\n def insert(self, value):\n if not self.data:\n self.data = value\n elif self.data < value:\n if not self.right:\n self.right = BST()\n self.right.data = value\n else:\n self.right.insert(value)\n else:\n if not self.left:\n self.left = BST()\n self.left.data = value\n else:\n self.left.insert(value)\n\n\n def print_tree_inorder(self):\n if self.left:\n self.left.print_tree_inorder()\n print(self.data)\n if self.right:\n self.right.print_tree_inorder()\n\n\n\nbst = BST()\nbst.insert(5)\nbst.insert(3)\nbst.insert(4)\nbst.insert(2)\nbst.insert(7)\nbst.insert(6)\nbst.insert(1)\nbst.print_tree_inorder()","sub_path":"Binary_Search_Tree.py","file_name":"Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"206886051","text":"from django.shortcuts import render, reverse\n\n\nninja_turtles_list = {'red':\"raphael.jpg\", 'blue':\"leonardo.jpg\", 'purple':\"donatello.jpg\", 'orange':\"Michelangelo.jpeg\"}\napril = 'april.jpg'\nall_turtles = \"tmnt.png\"\n# Create your views here.\ndef index(request):\n return render(request,\n 'disappearing_ninja/index.html')\n\ndef show_turtles(request, color):\n if len(color) < 1:\n image=all_turtles\n else:\n if color in ninja_turtles_list.keys():\n image = ninja_turtles_list[color]\n else:\n image = april\n data = {\"turtle\":image}\n return render(request,\n 'disappearing_ninja/display_turtles.html', data)\n","sub_path":"django/integration_project/apps/disappearing_ninja/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"264768317","text":"def commaToAdd(list_name):\n spamString=\"'\"\n if len(list_name) == 1:\n #print(str(list_name)) This prints ['apples']\n print(spamString + list_name[0] + spamString)\n else:\n for i in range(len(list_name)-2):\n spamString = spamString + list_name[i] + ', '\n spamString = spamString + list_name[-2] + ' and ' + list_name[-1] + \"'\"\n #list_name = spamString You dont need this as you have already built spamString\n #print(list_name)\n print(spamString)\n\nspam = ['apples']\ncommaToAdd(spam)\n# 'apples'\n\nspam2 = ['apples', 'oranges']\ncommaToAdd(spam2)\n#'apples and oranges'\n\nspam3 = ['apples', 'oranges', 'bananas', 'tofu', 'cat']\ncommaToAdd(spam3)\n#'apples, oranges, bananas, tofu and cat'","sub_path":"commacode.py","file_name":"commacode.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"441746551","text":"# 问题一:字符串排序\n\ns = \"hello world\"\n\n# 请编写代码,将 s 以 [a-z] 顺序输出\n\n\n# 先将传入的字符串转换成对应的ascII码,在比较大小按顺序打印\n# 但是题目中有两种意思,不清楚是整体按照打印,还是按照每个字母中的字母顺序打印,姑且写了如下两种\n\n\n# 整体按照顺序打印\ndef output_in_order(item):\n list1 = []\n for item in s:\n list1.append(ord(item))\n list1 = sorted(list1)\n for i in map(chr, list1):\n print(i, end=' ')\n\n\nif __name__ == '__main__':\n s = 'hello world'\n output_in_order(s)\n\n\n# 按照每一个单词字符中的顺序打印\ndef change(items):\n list1 = []\n for item in items:\n list1.append(ord(item))\n list1 = sorted(list1)\n return list1\n\n\ndef output_in_order(item):\n i = 0\n list1 = []\n list2 = []\n new_item = item.split(' ')\n while i < len(new_item):\n list1 = change(new_item[i])\n i += 1\n for items in list1:\n list2.append(chr(items))\n list2.append(' ')\n for j in list2:\n print(j,end='')\n\n\nif __name__ == '__main__':\n s = 'hello world'\n output_in_order(s)\n\n\n# 问题二:数值比较\n\nn = [9,15,23,89,33,26,2,76]\n\n# 请编写代码,找出数组中的最大数与最小数\n\n\ndef find_max_and_min(item: list):\n print(max(item))\n print(min(item))\n\n\nif __name__ == '__main__':\n n = [9, 15, 23, 89, 33, 26, 2, 76]\n find_max_and_min(n)\n\n\n# 问题三:替换\n\na = \"i,am,a,student,in,chengdu\"\n\n# 请编写代码,将 “student” 和 “chengdu” 变为可基于参数输入配置的输出\n# 通过参数输入打印出完整的句子\nstudent = input('please input student')\nposition = input('please input position')\na = \"i,am,a,%s,in,%s\" % (student, position)\nprint(a.replace(',', ' ').capitalize())","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"256595623","text":"import logging\nlogger = logging.getLogger('pac4cli')\n\nimport platform\nimport configparser\n\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks\n\nif 'Linux' == platform.system():\n import txdbus.client\n # work around txdbus assuming python 2\n txdbus.client.basestring = str\n\n\n# TODO: move this to a more appropriate module\n@inlineCallbacks\ndef install_network_state_changed_callback(reactor, callback):\n dbus = yield txdbus.client.connect(reactor, 'system')\n nm = yield dbus.getRemoteObject('org.freedesktop.NetworkManager',\n '/org/freedesktop/NetworkManager')\n nm.notifyOnSignal('StateChanged', callback)\n\nclass WPAD:\n def __init__(self, reactor, config_file):\n self.reactor = reactor\n self.config_file = config_file\n\n @inlineCallbacks\n def get_dhcp_domains(self):\n res = []\n if 'Linux' != platform.system():\n logger.info(\"No NetworkManager available.\")\n return res\n\n dbus = yield txdbus.client.connect(self.reactor, 'system')\n nm = yield dbus.getRemoteObject('org.freedesktop.NetworkManager',\n '/org/freedesktop/NetworkManager')\n active_connection_paths = yield nm.callRemote('Get',\n 'org.freedesktop.NetworkManager', 'ActiveConnections')\n\n for path in active_connection_paths:\n logger.debug(\"Inspecting connection %s\", path)\n try:\n conn = yield dbus.getRemoteObject('org.freedesktop.NetworkManager',\n path)\n config_path = yield conn.callRemote('Get',\n 'org.freedesktop.NetworkManager.Connection.Active', 'Ip4Config')\n logger.debug(\"Its IP4 configuration is %s\", config_path)\n # this is what networkmanager returns in case there is no associated\n # configuration, e.g. vpns and tunnels\n if config_path != \"/\":\n config = yield dbus.getRemoteObject('org.freedesktop.NetworkManager',\n config_path)\n domains = yield config.callRemote('Get',\n 'org.freedesktop.NetworkManager.IP4Config', 'Domains')\n logger.debug(\"Its domains are %s\", domains)\n res.extend(domains)\n else:\n logger.debug(\"Skipping /\")\n except Exception as e:\n logger.warning(\"Problem getting domain for connection %s\", path, exc_info=True)\n\n return res\n\n @inlineCallbacks\n def get_wpad_url(self):\n if 'Linux' != platform.system():\n logger.info(\"No NetworkManager available.\")\n return None\n\n dbus = yield txdbus.client.connect(self.reactor, 'system')\n nm = yield dbus.getRemoteObject('org.freedesktop.NetworkManager',\n '/org/freedesktop/NetworkManager')\n active_connection_paths = yield nm.callRemote('Get',\n 'org.freedesktop.NetworkManager', 'ActiveConnections')\n\n for path in active_connection_paths:\n logger.debug(\"Inspecting connection %s\", path)\n try:\n conn = yield dbus.getRemoteObject('org.freedesktop.NetworkManager',\n path)\n config_path = yield conn.callRemote('Get',\n 'org.freedesktop.NetworkManager.Connection.Active', 'Dhcp4Config')\n logger.debug(\"Its Dhcp4 configuration is %s\", config_path)\n\n # this is what networkmanager returns in case there is no associated\n # configuration, e.g. vpns and tunnels\n if config_path != \"/\":\n config = yield dbus.getRemoteObject('org.freedesktop.NetworkManager',\n config_path)\n options = yield config.callRemote('Get',\n 'org.freedesktop.NetworkManager.DHCP4Config', 'Options')\n logger.debug(\"Its options are %s\", options)\n\n if 'wpad' in options:\n return options['wpad']\n else:\n logger.debug(\"Skipping /\")\n except Exception as e:\n logger.warning(\"Problem getting wpad option for connection %s\", path, exc_info=True)\n\n return None\n\n def get_config_wpad_url(self, config_file):\n logger.info(\"Trying to read config file '%s'\", config_file)\n config = configparser.SafeConfigParser()\n config.read(config_file)\n try:\n url = config.get('wpad', 'url')\n logger.info(\"Read wpad url: %s\", url)\n return url\n except configparser.NoOptionError:\n logger.info(\"No wpad url specified\")\n return None\n\n @inlineCallbacks\n def getUrls(self):\n if self.config_file:\n try:\n wpad_url = self.get_config_wpad_url(self.config_file)\n if wpad_url is not None:\n return [ wpad_url ]\n except Exception as e:\n logger.warning(\"Problem reading configuration file %s\", self.config_file, exc_info=True)\n else:\n logger.debug(\"No configuration file specified\")\n\n logger.info(\"Trying to get wpad url from NetworkManager DHCP...\")\n wpad_url = yield self.get_wpad_url()\n if wpad_url is not None:\n return [ wpad_url ]\n else:\n logger.info(\"Trying to get wpad url from NetworkManager domains...\")\n domains = yield self.get_dhcp_domains()\n return [\n \"http://wpad.{}/wpad.dat\".format(domain)\n for domain in domains\n ]\n","sub_path":"pac4cli/wpad.py","file_name":"wpad.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"103399257","text":"import base64\n\nfrom . import pdfparser\nfrom . import kaitai\nfrom .logger import getStatusLogger\nfrom .polyfile import Match, Matcher, Submatch, submatcher\n\nlog = getStatusLogger(\"PDF\")\n\n\ndef token_length(tok):\n if hasattr(tok, 'token'):\n return len(tok.token)\n else:\n return len(tok[1])\n\n\ndef content_length(content):\n return content[-1].offset.offset - content[0].offset.offset + token_length(content[-1])\n\n\ndef _emit_dict(parsed, parent, pdf_offset):\n dict_obj = Submatch(\n \"PDFDictionary\",\n '',\n relative_offset=parsed.start.offset.offset - parent.offset + pdf_offset,\n length=parsed.end.offset.offset - parsed.start.offset.offset + len(parsed.end.token),\n parent=parent\n )\n yield dict_obj\n for key, value in parsed:\n if isinstance(value, pdfparser.ParsedDictionary):\n value_end = value.end.offset.offset + len(value.end.token)\n else:\n value_end = value[-1].offset.offset + len(value[-1].token)\n pair_offset = key.offset.offset - dict_obj.offset\n pair = Submatch(\n \"KeyValuePair\",\n '',\n relative_offset=pair_offset + pdf_offset,\n length=value_end - key.offset.offset,\n parent=dict_obj\n )\n yield pair\n yield Submatch(\n \"Key\",\n key.token,\n relative_offset=0,\n length=len(key.token),\n parent=pair\n )\n if isinstance(value, pdfparser.ParsedDictionary):\n yield from _emit_dict(value, pair, pdf_offset)\n else:\n value_length = value[-1].offset.offset + len(value[-1].token) - value[0].offset.offset\n yield Submatch(\n \"Value\",\n ''.join(v.token for v in value),\n relative_offset=value[0].offset.offset - key.offset.offset,\n length=value_length,\n parent=pair\n )\n\n\ndef ast_to_matches(ast: kaitai.AST, parent: Submatch):\n stack = [(parent, ast)]\n while stack:\n parent, node = stack.pop()\n if not hasattr(node.obj, 'uid'):\n continue\n if len(node.children) == 1 and not hasattr(node.children[0], 'uid'):\n match = node.children[0].obj\n else:\n match = ''\n new_node = Submatch(\n name=node.obj.uid,\n match_obj=match,\n relative_offset=node.relative_offset,\n length=node.length,\n parent=parent\n )\n yield new_node\n stack.extend(reversed([(new_node, c) for c in node.children]))\n\n\ndef parse_object(file_stream, object, matcher: Matcher, parent=None):\n log.status('Parsing PDF obj %d %d' % (object.id, object.version))\n objtoken, objid, objversion, endobj = object.objtokens\n pdf_length=endobj.offset.offset - object.content[0].offset.offset + 1 + len(endobj.token)\n if parent is None or isinstance(parent, PDF):\n parent_offset = 0\n else:\n parent_offset = parent.offset\n obj = Submatch(\n name=\"PDFObject\",\n display_name=f\"PDFObject{object.id}.{object.version}\",\n match_obj=(object.id, object.version),\n relative_offset=objid.offset.offset - parent_offset,\n length=pdf_length + object.content[0].offset.offset - objid.offset.offset,\n parent=parent\n )\n yield obj\n yield Submatch(\n \"PDFObjectID\",\n object.id,\n relative_offset=0,\n length=len(objid.token),\n parent=obj\n )\n yield Submatch(\n \"PDFObjectVersion\",\n object.version,\n relative_offset=objversion.offset.offset - objid.offset.offset,\n length=len(objversion.token),\n parent=obj\n )\n log.debug(' Type: %s' % pdfparser.ConditionalCanonicalize(object.GetType(), False))\n log.debug(' Referencing: %s' % ', '.join(map(lambda x: '%s %s %s' % x, object.GetReferences())))\n dataPrecedingStream = object.ContainsStream()\n if dataPrecedingStream:\n log.debug(' Contains stream')\n log.debug(' %s' % pdfparser.FormatOutput(dataPrecedingStream, False))\n oPDFParseDictionary = pdfparser.cPDFParseDictionary(dataPrecedingStream, False)\n else:\n log.debug(' %s' % pdfparser.FormatOutput(object.content, False))\n oPDFParseDictionary = pdfparser.cPDFParseDictionary(object.content, False)\n #log.debug('')\n #pp = BytesIO()\n #oPDFParseDictionary.PrettyPrint(' ', stream=pp)\n #pp.flush()\n #dict_content = pp.read()\n #log.debug(dict_content)\n dict_offset = oPDFParseDictionary.content[0].offset.offset - objid.offset.offset\n dict_length = content_length(oPDFParseDictionary.content)\n if oPDFParseDictionary.parsed is not None:\n yield from _emit_dict(oPDFParseDictionary.parsed, obj, parent.offset)\n #log.debug('')\n #log.debug('')\n content_start = dict_offset + dict_length\n content_len = endobj.offset.offset - content_start - objid.offset.offset\n if content_len > 0:\n content = Submatch(\n \"PDFObjectContent\",\n (),\n relative_offset=content_start,\n length=content_len,\n parent=obj\n )\n yield content\n stream_len = None\n if oPDFParseDictionary.parsed is not None:\n is_dct_decode = '/Filter' in oPDFParseDictionary.parsed \\\n and oPDFParseDictionary.parsed['/Filter'].strip() == '/DCTDecode'\n if '/Length' in oPDFParseDictionary.parsed:\n try:\n stream_len = int(oPDFParseDictionary.parsed['/Length'])\n except ValueError:\n pass\n old_pos = file_stream.tell()\n try:\n file_stream.seek(content.root_offset)\n raw_content = file_stream.read(content_len)\n finally:\n file_stream.seek(old_pos)\n streamtoken = b'stream'\n if raw_content.startswith(streamtoken):\n raw_content = raw_content[len(streamtoken):]\n if raw_content.startswith(b'\\r'):\n streamtoken += b'\\r'\n raw_content = raw_content[1:]\n if raw_content.startswith(b'\\n'):\n streamtoken += b'\\n'\n raw_content = raw_content[1:]\n if raw_content.endswith(b'\\n'):\n endtoken = b'endstream'\n if raw_content.endswith(b'\\r\\n'):\n endtoken += b'\\r\\n'\n else:\n endtoken += b'\\n'\n if raw_content.endswith(endtoken):\n raw_content = raw_content[:-len(endtoken)]\n if raw_content.endswith(b'\\n') and stream_len is not None and len(raw_content) > stream_len:\n endtoken = b'\\n' + endtoken\n raw_content = raw_content[:-1]\n yield Submatch(\n \"StartStream\",\n streamtoken,\n relative_offset=0,\n length=len(streamtoken),\n parent=content\n )\n streamcontent = Submatch(\n \"StreamContent\",\n raw_content,\n relative_offset=len(streamtoken),\n length=len(raw_content),\n parent=content\n )\n yield streamcontent\n # Temporarily disabled this until we figure out how to handle incorrect matches:\n # with file_stream.save_pos() as fs:\n # with fs[streamcontent.offset:streamcontent.offset + streamcontent.length] as f:\n # f.seek(0)\n # yield from matcher.match(\n # f,\n # streamcontent\n # )\n if is_dct_decode and raw_content[:1] == b'\\xff':\n # This is most likely a JPEG image\n try:\n ast = kaitai.parse('jpeg', raw_content)\n except Exception as e:\n log.error(str(e))\n ast = None\n if ast is not None:\n iterator = ast_to_matches(ast, parent=streamcontent)\n try:\n jpeg_match = next(iterator)\n jpeg_match.img_data = f\"data:image/jpeg;base64,{base64.b64encode(raw_content).decode('utf-8')}\"\n yield jpeg_match\n yield from iterator\n except StopIteration:\n pass\n\n yield Submatch(\n \"EndStream\",\n endtoken,\n relative_offset=len(streamtoken) + len(raw_content),\n length=len(endtoken),\n parent=content\n )\n log.clear_status()\n\n\ndef parse_pdf(file_stream, matcher: Matcher, parent=None):\n if parent is None or isinstance(parent, PDF):\n parent_offset = 0\n else:\n parent_offset = parent.offset\n with file_stream.tempfile(suffix='.pdf') as pdf_path:\n parser = pdfparser.cPDFParser(pdf_path, True)\n while True:\n object = parser.GetObject()\n if object is None:\n break\n elif object.type == pdfparser.PDF_ELEMENT_COMMENT:\n log.debug(f\"PDF comment at {object.offset}, length {len(object.comment)}\")\n yield Submatch(\n name='PDFComment',\n match_obj=object,\n relative_offset=object.offset.offset - parent_offset,\n length=len(object.comment),\n parent=parent\n )\n elif object.type == pdfparser.PDF_ELEMENT_XREF:\n log.debug('PDF xref')\n yield Submatch(\n name='PDFXref',\n match_obj=object,\n relative_offset=object.content[0].offset.offset - parent_offset,\n length=content_length(object.content),\n parent=parent\n )\n elif object.type == pdfparser.PDF_ELEMENT_TRAILER:\n pdfparser.cPDFParseDictionary(object.content[1:], False)\n yield Submatch(\n name='PDFTrailer',\n match_obj=object,\n relative_offset=object.content[0].offset.offset - parent_offset,\n length=content_length(object.content),\n parent=parent\n )\n elif object.type == pdfparser.PDF_ELEMENT_STARTXREF:\n yield Submatch(\n name='PDFStartXRef',\n match_obj=object.index,\n relative_offset=object.offset.offset - parent_offset,\n length=object.length,\n parent=parent\n )\n elif object.type == pdfparser.PDF_ELEMENT_INDIRECT_OBJECT:\n yield from parse_object(file_stream, object, matcher=matcher, parent=parent)\n\n\n@submatcher('adobe_pdf.trid.xml', 'adobe_pdf-utf8.trid.xml')\nclass PDF(Match):\n def submatch(self, file_stream):\n yield from parse_pdf(file_stream, matcher=self.matcher, parent=self)\n","sub_path":"polyfile/pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":11703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"253608864","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom widgets.ButtonWidget import *\nfrom widgets.ComboWidget import *\nfrom widgets.LabelWidget import *\nfrom widgets.CodeWidget import *\nfrom widgets.SpinWidget import *\nfrom widgets.FunctionWidget import *\nfrom widgets.TimeFunctionWidget import *\nfrom widgets.SleepWidget import *\nfrom dialogs.LoginDlg import *\nimport requests, json\n\n\nclass Main(QWidget):\n releaseSignal = pyqtSignal(QWidget)\n draggingSignal = pyqtSignal(QWidget, QPoint)\n def __init__(self):\n super().__init__()\n self.resize(1500,900)\n self.setFixedSize(1500, 900)\n self.setWindowTitle('Selfmade-IoT')\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setWindowIcon(QIcon('res/icon.ico'))\n self.setStyleSheet('Main { background-color: #0070AA }')\n self.widgetBox = QRect(20,120,260,760)\n self.codeBox = QRect(300,120,1180,760)\n self.binBox = QRect(840,20,200,70)\n self.releaseSignal.connect(self.onCodeReleased)\n self.draggingSignal.connect(self.onCodeDragging)\n self.info = None \n\n self.loginButton = ButtonWidget('white', 0, 16, '나눔스퀘어 Bold', '로그인', self)\n self.loginButton.setGeometry(1060,20,200,70)\n self.loginButton.clicked.connect(self.onLogin)\n self.uploadButton = ButtonWidget('white', 0, 16, '나눔스퀘어 Bold', '업로드', self) # 기기 변경 버튼\n self.uploadButton.setGeometry(1280,20,200,70)\n self.uploadButton.clicked.connect(self.onUpload)\n\n self.widgetList = [None for _ in range(5)]\n self.createFunctions()\n self.createWidgets()\n\n def onUpload(self):\n code = b''\n for function in self.funcList:\n code += function.sourceCode()\n \n id = 'dd'\n pw = '11'\n url='http://localhost:5000/receive'\n headers = {'Content-Type' : 'application/json; charset=utf-8'}\n data = {\"id\" : id, \"password\" : pw, \"code\" : code.decode('utf-8')}\n print(code.decode('utf-8'))\n res = requests.post(url, headers = headers, data=json.dumps(data))\n print(res.text)\n\n\n def onLogin(self):\n dlg = LoginDlg(self)\n if dlg.info:\n self.info = dlg.info\n\n\n def createFunctions(self):\n self.funcList = [\n FunctionWidget(self, QPoint(330,150), '계속 반복', b'loop'),\n FunctionWidget(self, QPoint(660,150), '원격 신호 1', b'sig1'),\n FunctionWidget(self, QPoint(960,150), '원격 신호 2', b'sig2'),\n TimeFunctionWidget(self, QPoint(1260,150))\n ]\n\n\n def paintEvent(self, e):\n pix = QPixmap('res/trash.png')\n qp = QPainter(self)\n rect = QRect(self.binBox)\n qp.fillRect(self.widgetBox, QColor('white'))\n qp.fillRect(self.codeBox, QColor('white'))\n qp.fillRect(rect, QColor('white'))\n rect.setRect(rect.center().x() - 20, rect.center().y() - 20, 40, 40)\n qp.drawPixmap(rect, pix)\n qp.end()\n\n def onCodeDragging(self, code, pos):\n for function in self.funcList:\n if code in function.childList:\n continue\n rect = function.area()\n if not rect.contains(pos):\n function.locationRefresh()\n continue\n \n for i, child in enumerate(function.childList):\n if child.geometry().contains(pos):\n if function.isBlanked:\n function.locationRefresh()\n function.makeBlank(i)\n return\n \n\n def onCodeReleased(self, code):\n try:\n idx = self.widgetList.index(code)\n self.widgetList[idx] = None\n self.createWidgets()\n except:\n pass \n\n if self.binBox.contains(code.geometry().center()):\n code.deleteLater()\n return\n pt = code.geometry().center()\n for function in self.funcList:\n if not function.area().contains(pt):\n function.locationRefresh()\n continue\n if function.blank.contains(pt):\n if code.hasParent:\n code.exitFunction.emit(code)\n function.addCode(code)\n break\n function.locationRefresh()\n\n def createWidgets(self):\n if not self.widgetList[0]:\n on = CodeWidget('#44BD41', 15, self, b'son')\n on.setGeometry(50, 250, 200, 70)\n on.setDraggingSignal(self.draggingSignal)\n on.setReleaseSignal(self.releaseSignal)\n on.setText('스위치 ON')\n on.show()\n self.widgetList[0] = on\n\n if not self.widgetList[1]:\n off = CodeWidget('#E22929', 15, self, b'soff')\n off.setGeometry(50, 350, 200, 70)\n off.setDraggingSignal(self.draggingSignal)\n off.setReleaseSignal(self.releaseSignal)\n off.setText('스위치 OFF')\n off.show()\n self.widgetList[1] = off\n\n\n if not self.widgetList[2]:\n sleep = SleepWidget(self)\n sleep.setGeometry(50, 450, 200, 70)\n sleep.setDraggingSignal(self.draggingSignal)\n sleep.setReleaseSignal(self.releaseSignal)\n sleep.show()\n self.widgetList[2] = sleep\n\n if not self.widgetList[3]:\n buzzon = CodeWidget('#81158A', 15, self, b'bon')\n buzzon.setGeometry(50, 550, 200, 70)\n buzzon.setDraggingSignal(self.draggingSignal)\n buzzon.setReleaseSignal(self.releaseSignal)\n buzzon.setText('부저 ON')\n buzzon.show() \n self.widgetList[3] = buzzon\n\n if not self.widgetList[4]:\n buzzoff = CodeWidget('#C21066', 15, self, b'boff')\n buzzoff.setGeometry(50, 650, 200, 70)\n buzzoff.setDraggingSignal(self.draggingSignal)\n buzzoff.setReleaseSignal(self.releaseSignal)\n buzzoff.setText('부저 OFF')\n buzzoff.show()\n self.widgetList[4] = buzzoff\n \n \nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n ui = Main()\n ui.show()\n sys.exit(app.exec_())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"332333078","text":"import networkx as nx\n\ndef shortest_path_dijkstra(G_nx, source, target):\n shortest_path = nx.dijkstra_path(G_nx, source, target, weight=None)\n return shortest_path\n\ndef shortest_paths_dijkstra(G_nx, sources, targets):\n all_pairs_dict = nx.all_pairs_dijkstra(G_nx, weight='length')\n shortest_paths = {}\n for key_i, value_i in all_pairs_dict:\n if key_i in sources:\n print(value_i[key_i])\n for val in value_i:\n print(val)\n # print(value_j[key_j])\n # if key_j in targets:\n # shortest_paths[(key_i, key_j)] = value_j\n # else:\n # continue\n\n # for source in sources:\n # for target in targets:\n # print(all_pairs_dict[source][target])\n\n return shortest_paths\n\ndef single_source_paths_dijkstra(G_nx, source, targets):\n single_source_dict = nx.single_source_dijkstra(G_nx, source, weight='length')\n single_source_shortest_paths = []\n print(single_source_dict)\n\n return single_source_shortest_paths\n\ndef multiple_source_paths_dijkstra(G_nx, sources, target):\n single_source_dict = nx.single_source_dijkstra(G_nx, sources, weight='length')\n single_source_shortest_paths = []\n print(single_source_dict)\n\n return single_source_shortest_paths\n\n\ndef find_point_to_gateway_path(graph, fromnode, gateway_nodes):\n lengths, paths = nx.single_source_dijkstra(graph, fromnode, weight='length')\n gateway_paths = {k:v for k, v in paths.items() if k in gateway_nodes}\n gateway_lengths = {k:v for k, v in lengths.items() if k in gateway_nodes}\n return gateway_paths, gateway_lengths\n\n\ndef find_gateway_to_gateway_path(graph, gateway_nodes):\n all_paths = {}\n all_lengths = {}\n for gateway_node in gateway_nodes:\n all_paths[gateway_node], all_lengths[gateway_node] = find_point_to_gateway_path(graph, gateway_node, gateway_nodes)\n return all_paths, all_lengths\n\n\ndef find_gateways_to_gateways_paths(graph, source_gateway_nodes, dest_gateway_nodes):\n all_paths = {}\n all_lengths = {}\n for source_gateway_node in source_gateway_nodes:\n all_paths[source_gateway_node], all_lengths[source_gateway_node] = find_point_to_gateway_path(graph, source_gateway_node, dest_gateway_nodes)\n return all_paths, all_lengths\n\n\ndef find_gateway_to_point_path(graph, tonode, gateway_nodes):\n gateway_paths = {}\n gateway_lengths = {}\n for gateway_node in gateway_nodes:\n lengths, paths = nx.single_source_dijkstra(graph, gateway_node)\n for k, v in paths.items():\n if k == tonode:\n gateway_paths[gateway_node] = v\n for k, v in lengths.items():\n if k == tonode:\n gateway_lengths[gateway_node] = v\n\n return gateway_paths, gateway_lengths\n","sub_path":"codes/shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"68871806","text":"import os\nimport time\nimport argparse\nimport cv2\nimport glob\nimport numpy as np\nfrom tqdm import tqdm\nimport _pickle as cPickle\nfrom lib.align import estimateSimilarityTransform\nfrom lib.utils import load_depth, get_bbox, compute_mAP, plot_mAP3\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data', type=str, default='real_test', help='val, real_test')\nparser.add_argument('--data_dir', type=str, default='data', help='data directory')\nparser.add_argument('--n_cat', type=int, default=6, help='number of object categories')\nparser.add_argument('--nv_prior', type=int, default=1024, help='number of vertices in shape priors')\nparser.add_argument('--model', type=str, default='results/T5_2105_three_stage_real/model_50.pth', help='resume from saved model')\nparser.add_argument('--n_pts', type=int, default=1024, help='number of foreground points')\nparser.add_argument('--img_size', type=int, default=192, help='cropped image size')\nparser.add_argument('--gpu', type=str, default='3', help='GPU to use')\nopt = parser.parse_args()\n\nmean_shapes = np.load('assets/mean_points_emb.npy')\n\nassert opt.data in ['val', 'real_test']\nif opt.data == 'val':\n result_dir = 'results/'+str(opt.model).split('/')[1]+'_val'\n file_path = 'CAMERA/val_list.txt'\n cam_fx, cam_fy, cam_cx, cam_cy = 577.5, 577.5, 319.5, 239.5\nelse:\n result_dir = 'results/'+str(opt.model).split('/')[1]+'_real_test'\n file_path = 'Real/test_list.txt'\n cam_fx, cam_fy, cam_cx, cam_cy = 591.0125, 590.16775, 322.525, 244.11084\n\nif not os.path.exists(result_dir):\n os.makedirs(result_dir)\n\nxmap = np.array([[i for i in range(640)] for j in range(480)])\nymap = np.array([[j for i in range(640)] for j in range(480)])\n\n\ndef evaluate():\n \"\"\"\n degree_thres_list = list(range(0, 61, 1))\n shift_thres_list = [i / 2 for i in range(21)]\n iou_thres_list = [i / 100 for i in range(101)]\n # predictions\n result_dir = \"results/final_transformers\"\n #result_dir = 'results/eval_spd_real'\n #result_dir = \"results/eval_T5_f_STAGE3_R_CAMERA_2_1_0.5/\"\n result_pkl_list = glob.glob(os.path.join(result_dir, 'results_*.pkl'))\n result_pkl_list = sorted(result_pkl_list)\n assert len(result_pkl_list)\n pred_results = []\n for pkl_path in result_pkl_list:\n with open(pkl_path, 'rb') as f:\n result = cPickle.load(f)\n if 'gt_handle_visibility' not in result:\n result['gt_handle_visibility'] = np.ones_like(result['gt_class_ids'])\n else:\n assert len(result['gt_handle_visibility']) == len(result['gt_class_ids']), \"{} {}\".format(\n result['gt_handle_visibility'], result['gt_class_ids'])\n if type(result) is list:\n pred_results += result\n elif type(result) is dict:\n pred_results.append(result)\n else:\n assert False\n # To be consistent with NOCS, set use_matches_for_pose=True for mAP evaluation\n iou_aps, pose_aps, iou_acc, pose_acc = compute_mAP(pred_results, result_dir, degree_thres_list, shift_thres_list,\n iou_thres_list, iou_pose_thres=0.1, use_matches_for_pose=True)\n # metric\n # load NOCS results\n #pkl_path = os.path.join('results/nocs_results', opt.data, 'mAP_Acc.pkl')\n \"\"\"\n degree_thres_list = list(range(0, 61, 1))\n shift_thres_list = [i / 2 for i in range(21)]\n iou_thres_list = [i / 100 for i in range(101)]\n # predictions\n result_dir = \"vis\"\n\n\n #pkl_path = os.path.join('results/eval_T5_f_STAGE3_R_CAMERA_2_1_0.5/', 'mAP_Acc.pkl')\n pkl_path = os.path.join('supp/real_ours/', 'mAP_Acc.pkl')\n with open(pkl_path, 'rb') as f:\n nocs_results = cPickle.load(f)\n\n\n pkl_path_new = os.path.join('supp/real_spd/', 'mAP_Acc.pkl')\n with open(pkl_path_new, 'rb') as f:\n nocs_results_new = cPickle.load(f)\n\n nocs_iou_aps = nocs_results['iou_aps'][1, :]\n nocs_pose_aps = nocs_results['pose_aps'][1, :, :]\n nocs_iou_aps_new = nocs_results_new['iou_aps'][1, :]\n nocs_pose_aps_new = nocs_results_new['pose_aps'][1, :, :]\n iou_aps = np.concatenate((nocs_iou_aps[None, :], nocs_iou_aps_new[None, :]), axis=0)\n pose_aps = np.concatenate((nocs_pose_aps[None, :, :], nocs_pose_aps_new[None, :, :]), axis=0)\n plot_mAP3(iou_aps, pose_aps, result_dir, iou_thres_list, degree_thres_list, shift_thres_list, name = '1.png')\n\n nocs_iou_aps = nocs_results['iou_aps'][2, :]\n nocs_pose_aps = nocs_results['pose_aps'][2, :, :]\n nocs_iou_aps_new = nocs_results_new['iou_aps'][2, :]\n nocs_pose_aps_new = nocs_results_new['pose_aps'][2, :, :]\n iou_aps = np.concatenate((nocs_iou_aps[None, :], nocs_iou_aps_new[None, :]), axis=0)\n pose_aps = np.concatenate((nocs_pose_aps[None, :, :], nocs_pose_aps_new[None, :, :]), axis=0)\n plot_mAP3(iou_aps, pose_aps, result_dir, iou_thres_list, degree_thres_list, shift_thres_list, name = '2.png')\n\n nocs_iou_aps = nocs_results['iou_aps'][3, :]\n nocs_pose_aps = nocs_results['pose_aps'][3, :, :]\n nocs_iou_aps_new = nocs_results_new['iou_aps'][3, :]\n nocs_pose_aps_new = nocs_results_new['pose_aps'][3, :, :]\n iou_aps = np.concatenate((nocs_iou_aps[None, :], nocs_iou_aps_new[None, :]), axis=0)\n pose_aps = np.concatenate((nocs_pose_aps[None, :, :], nocs_pose_aps_new[None, :, :]), axis=0)\n plot_mAP3(iou_aps, pose_aps, result_dir, iou_thres_list, degree_thres_list, shift_thres_list, name = '3.png')\n\n nocs_iou_aps = nocs_results['iou_aps'][4, :]\n nocs_pose_aps = nocs_results['pose_aps'][4, :, :]\n nocs_iou_aps_new = nocs_results_new['iou_aps'][4, :]\n nocs_pose_aps_new = nocs_results_new['pose_aps'][4, :, :]\n iou_aps = np.concatenate((nocs_iou_aps[None, :], nocs_iou_aps_new[None, :]), axis=0)\n pose_aps = np.concatenate((nocs_pose_aps[None, :, :], nocs_pose_aps_new[None, :, :]), axis=0)\n plot_mAP3(iou_aps, pose_aps, result_dir, iou_thres_list, degree_thres_list, shift_thres_list, name = '4.png')\n\n nocs_iou_aps = nocs_results['iou_aps'][5, :]\n nocs_pose_aps = nocs_results['pose_aps'][5, :, :]\n nocs_iou_aps_new = nocs_results_new['iou_aps'][5, :]\n nocs_pose_aps_new = nocs_results_new['pose_aps'][5, :, :]\n iou_aps = np.concatenate((nocs_iou_aps[None, :], nocs_iou_aps_new[None, :]), axis=0)\n pose_aps = np.concatenate((nocs_pose_aps[None, :, :], nocs_pose_aps_new[None, :, :]), axis=0)\n plot_mAP3(iou_aps, pose_aps, result_dir, iou_thres_list, degree_thres_list, shift_thres_list, name = '5.png')\n\n nocs_iou_aps = nocs_results['iou_aps'][6, :]\n nocs_pose_aps = nocs_results['pose_aps'][6, :, :]\n nocs_iou_aps_new = nocs_results_new['iou_aps'][6, :]\n nocs_pose_aps_new = nocs_results_new['pose_aps'][6, :, :]\n iou_aps = np.concatenate((nocs_iou_aps[None, :], nocs_iou_aps_new[None, :]), axis=0)\n pose_aps = np.concatenate((nocs_pose_aps[None, :, :], nocs_pose_aps_new[None, :, :]), axis=0)\n plot_mAP3(iou_aps, pose_aps, result_dir, iou_thres_list, degree_thres_list, shift_thres_list, name = '6.png')\n\n\nif __name__ == '__main__':\n print('Evaluating ...')\n evaluate()\n","sub_path":"evaluate_vis_supp.py","file_name":"evaluate_vis_supp.py","file_ext":"py","file_size_in_byte":7081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"528684902","text":"import numpy as np\n\nfrom optable.synthesis import manipulation\nfrom optable.synthesis import manipulation_candidate\nfrom optable.dataset import feature_types\nfrom optable import _core\n\n\nclass TimeDiffManipulation(manipulation.Manipulation):\n def __init__(self, path, dataset, col):\n self.__path = path\n self.__dataset = dataset\n self.__col = col\n\n super(TimeDiffManipulation, self).__init__()\n\n def __repr__(self):\n return \"TimeDiff {} {}\".format(self.__path, self.__col)\n\n @property\n def path(self):\n return self.__path\n\n @property\n def dataset(self):\n return self.__dataset\n\n @property\n def col(self):\n return self.__col\n\n def calculate_priority(self):\n return 0.7 + 0.5 * self.path.not_deeper_count \\\n + 0.5 * self.path.substance_to_many_count(self.dataset, self.col) \\\n * self.path.to_many_path_priority(self.dataset, self.col)\n\n def calculate_size(self):\n return 1\n\n def meta_feature_size():\n return 3\n\n def meta_feature(self):\n to_many_meta = self.path.substance_to_many_count(\n self.dataset, self.col) \\\n * self.path.to_many_path_priority(\n self.dataset, self.col)\n return [1, self.path.not_deeper_count, to_many_meta]\n\n def meta_feature_name():\n return [\n \"TimeDiff-Constant\",\n \"TimeDiff-NotDeeperCount\",\n \"TimeDiff-ToManyMeta\"\n ]\n\n def synthesis(self):\n new_data_name = \"{}TimeDiff_{}_{}\".format(\n feature_types.aggregate_processed_numerical.prefix,\n self.__path, self.__col)\n\n dst_table = self.dataset.tables[self.__path.dst]\n dst_df = dst_table.df\n dst_data = dst_df[self.__col].values\n time_for_each_table = {\n table_idx: self.dataset.tables[table_name].hour_time_data\n for table_idx, table_name in enumerate(self.__path.table_names)\n if self.dataset.tables[table_name].has_time}\n sorted_index_for_each_table = {\n table_idx: self.dataset.tables[table_name].sorted_time_index\n for table_idx, table_name in enumerate(self.__path.table_names)\n if self.dataset.tables[table_name].has_time}\n src_id_for_each_relation = [\n self.dataset.tables[rel.src].df[rel.src_id].values\n for rel in self.__path.relations\n ]\n dst_id_for_each_relation = [\n self.dataset.tables[rel.dst].df[rel.dst_id].values\n for rel in self.__path.relations\n ]\n src_is_unique_for_each_relation = [\n rel.type.src_is_unique\n for rel in self.__path.relations\n ]\n dst_is_unique_for_each_relation = [\n rel.type.dst_is_unique\n for rel in self.__path.relations\n ]\n\n new_data = _core.Aggregator().aggregate(\n dst_data, time_for_each_table, sorted_index_for_each_table,\n src_id_for_each_relation, dst_id_for_each_relation,\n src_is_unique_for_each_relation, dst_is_unique_for_each_relation,\n \"max\", \"max\")\n new_data -= self.dataset.tables[self.__path.src].time_data\n new_data = new_data.astype(np.float32)\n self.__dataset.tables[self.__path.src].set_new_data(\n new_data, new_data_name)\n\n\nclass TimeDiffCandidate(manipulation_candidate.ManipulationCandidate):\n def search(self, path, dataset):\n if len(path) == 0 or path.not_deeper_count > 0:\n return []\n dst_table = dataset.tables[path.dst]\n ret = []\n for col in dst_table.df.columns:\n ftype = dst_table.ftypes[col]\n if ftype == feature_types.time:\n ret.append(TimeDiffManipulation(\n path, dataset, col\n ))\n return ret\n","sub_path":"optable_submission/optable_package/optable/manipulations/time_features/time_diff.py","file_name":"time_diff.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"169499219","text":"# http://codeforces.com/problemset/problem/9/A\ndef reduce(numerator, denominator):\n for i in range(2, numerator+1):\n if numerator % i == 0 and denominator % i == 0:\n return reduce(numerator // i, denominator // i)\n return numerator, denominator\n\ndef solve(data):\n y, w = map(int, data[0].split())\n max_ = max(y, w)\n a, b = reduce(6-max_ + 1, 6)\n return [str(a)+\"/\"+str(b)]\n","sub_path":"thinkable/data_algorithms/codeforces/die_roll.py","file_name":"die_roll.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"632148837","text":"import os\nimport sys\nimport pickle\nimport subprocess\nimport docker\nimport re\nimport socket\n\nwith open('/etc/trainerIBOS/port_time.conf', 'r') as f:\n portProgramm = int(f.readline())\n\nclientDocker = docker.from_env()\n\ndef checkLab5Hint():\n \n codeLab = 0 #Код завершения 0 - успешно, 1 - не успешно.\n textError = '' #Текст завершения.\n # Суммарно 70\n res1 = True # 15 10 5\n res2 = True # 20 20 15\n res3 = True # 15 15 10\n res4 = True # 20 15 10\n res5 = True # 10 10\n res6 = True # 20\n \n amount = 0\n procent = 0.0\n container = clientDocker.containers.get('trainerIBOS')\n NAME_DOCKER_VM = 'trainerIBOS'\n NAME_DOCKER_VM_VOLUME = 'trainerIBOS_VOLUME'\n ready = []\n \n with open(\"/var/lib/docker/volumes/{}/_data/.bash_history\".format(NAME_DOCKER_VM_VOLUME)) as gr, open('/tmp/trainerIBOS/trainerIBOS.task', 'rb') as task:\n data = pickle.load(task)\n diff = data[0]\n user = data[1]\n apt_get = data[2]\n service = data[3]\n priority = data[4]\n if len(data) >= 6:\n ready = data[5]\n textBASH = []\n for line in gr:\n appLine = re.sub(r'\\s+', ' ', line.rstrip())\n if appLine not in textBASH:\n textBASH.append(appLine)\n \n # -------------------- ПЕРВОЕ ЗАДАНИЕ -----------------------------------------\n res11 = False\n res121 = False; res122 = False\n res13 = False\n res23 = False\n res3 = False\n res41 = False; res42 = False; res43 = False; res44 = False; res45 = False\n res51 = False; res52 = False\n \n for command in textBASH:\n \n if 1 not in ready:\n \n if 'ls ' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n text = result[1].decode()\n if 'syslog' in text and 'messages' in text:\n res11 = True\n continue\n \n if ('cat ' in command or 'tail ' in command) and '/var/log/syslog' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res121 = True\n continue\n \n if ('cat ' in command or 'tail ' in command) and '/var/log/messages' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res122 = True\n continue \n \n if ('cat ' in command or 'tail ' in command) and '/etc/rsyslog.conf' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res13 = True\n continue\n \n \n \n \n #-------------------------------\n if 2 not in ready:\n if ('cat ' in command or 'tail ' in command) and '/var/log/auth.log' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res23 = True\n continue\n \n #-------------------------------\n if 3 not in ready:\n if ('cat ' in command or 'tail ' in command) and '/var/log/apt/history.log' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res3 = True\n continue\n \n if 4 not in ready: \n if 'journalctl' == command:\n res41 = True\n continue\n \n if 'journalctl' in command and '-p {}'.format(priority) in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res42 = True\n continue\n \n if 'journalctl' in command and '--since ' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res43 = True\n continue\n \n if 'journalctl' in command and '-k' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res44 = True\n continue\n \n if 5 not in ready:\n if 'journalctl' in command and '--disk-usage' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res51 = True\n continue\n \n if 'journalctl' in command and '--vacuum-time=5days' in command:\n result = container.exec_run(command)\n if result[0] == 0:\n res52 = True\n continue\n \n if 2 not in ready:\n passwd = container.exec_run('cat /etc/passwd')[1].decode()\n \n \n if '{}:x'.format(user) not in passwd:\n res2 = False\n else:\n auth = container.exec_run('cat /var/log/auth.log')[1].decode()\n if 'session opened for user {} by'.format(user) not in auth and 'session closed for user {}'.format(user) not in auth:\n res2 = False\n\n if res23 == False:\n res2 = False \n if 4 not in ready:\n if res41 == False:\n res4 = False\n \n if res42 == False:\n res4 = False\n \n if res43 == False:\n res4 = False\n \n if res44 == False:\n res4 = False\n \n if 4 not in ready: \n ls_res = container.exec_run('ls /var/log/journal')[0]\n cat_conf = container.exec_run('cat /etc/systemd/journald.conf')[1].decode()\n check_status = container.exec_run('systemctl status systemd-journald')\n \n \n if ls_res != 0 or check_status[0] != 0 or 'active (running)' not in check_status[1].decode():\n res4 = False\n \n #print(cat_conf)\n \n if '\\nSystemMaxUse=100M\\n' not in cat_conf:\n res4 = False\n \n #---------------------------------------------\n \n if diff >= 1:\n if 5 not in ready:\n if res51 == False: \n res5 = False\n \n if res52 == False: \n res5 = False\n \n if diff == 2:\n if 6 not in ready:\n status = container.exec_run('systemctl status {}'.format(service))[1].decode()\n if 'Active: inactive (dead)' not in status or '{}.service: Succeeded'.format(service) not in status:\n res6=False\n \n res1 = all([res11, res121, res122, res13]) \n if 1 not in ready and res1 == True:\n ready.append(1)\n if 2 not in ready and res2 == True:\n ready.append(2)\n if 3 not in ready and res3 == True:\n ready.append(3)\n if 4 not in ready and res4 == True:\n ready.append(4)\n if 5 not in ready and res5 == True:\n ready.append(5)\n if 6 not in ready and res6 == True:\n ready.append(6)\n \n with open('/etc/trainerIBOS/{}.task'.format(NAME_DOCKER_VM), 'wb') as fil:\n pickle.dump([diff, user, apt_get, service, priority, ready], fil)\n \n #print(res1, res2, res3, res4, res5, res6)\n if res1:\n sock = socket.socket()\n sock.connect(('localhost', portProgramm))\n sock.send(b'codeLab5-1')\n sock.close() # Отправляяет соответствующее сообщение основной программе\n \n if res2:\n sock = socket.socket()\n sock.connect(('localhost', portProgramm))\n sock.send(b'codeLab5-2')\n sock.close() # Отправляяет соответствующее сообщение основной программе\n\n if res3:\n sock = socket.socket()\n sock.connect(('localhost', portProgramm))\n sock.send(b'codeLab5-3')\n sock.close() # Отправляяет соответствующее сообщение основной программе\n \n if res4:\n sock = socket.socket()\n sock.connect(('localhost', portProgramm))\n sock.send(b'codeLab5-4')\n sock.close() # Отправляяет соответствующее сообщение основной программе\n \n if res5 and diff > 0:\n sock = socket.socket()\n sock.connect(('localhost', portProgramm))\n sock.send(b'codeLab5-5')\n sock.close() # Отправляяет соответствующее сообщение основной программе\n \n if res6 and diff == 2:\n sock = socket.socket()\n sock.connect(('localhost', portProgramm))\n sock.send(b'codeLab5-6')\n sock.close() # Отправляяет соответствующее сообщение основной программе\n \n \n \nif __name__ == '__main__': \n checkLab5Hint() \n \n \n\n \n \n \n \n \n \n ","sub_path":"etc-trainerIBOS/script/Lab5hint.py","file_name":"Lab5hint.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"390440313","text":"# Copyright (c) 2009 James Aylett \n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nclass SessionStashable(object):\n \"Mixin this class to provide useful instance and class methods for stashing unsaved whatevers in the session.\"\n session_variable = 'object_stash' # if using this more than once, set explicitly on each class\n creator_field = 'created_by' # if you don't like this name, set explicitly on your derived class\n\n # Setting this to a string will add a context variable with that name\n # containing a count of unstashed models of this type.\n context_count_name = None\n\n def stash_in_session(self, session):\n \"Stash this object in the current session.\"\n if getattr(self, self.creator_field)!=None:\n return # nothing to do\n if not session.has_key(self.session_variable):\n session[self.session_variable] = []\n if self.pk not in session[self.session_variable]:\n session[self.session_variable].append(self.pk)\n session.modified = True\n #print \"stashed %s in session\" % self\n \n def stashed_in_session(self, session):\n \"Is this object stashed in the current session?\"\n if getattr(self, self.creator_field)!=None:\n return False # non-anonymous should never be stashed\n if session.has_key(self.session_variable):\n stashed = session.get(self.session_variable)\n return self.pk in stashed\n return False\n \n @classmethod\n def clear_stashed_objects(cls, session):\n \"Clear the sessions store for this object.\"\n #print \"Clearing session store for %s\" % cls\n if session.has_key(cls.session_variable):\n del session[cls.session_variable]\n \n @classmethod\n def reparent_all_my_session_objects(cls, session, user):\n \"Go over the objects stashed in session and set user as their creator_field. Then clear the sessions store.\"\n cls.get_stashed_in_session(session).update(**{cls.creator_field: user})\n cls.clear_stashed_objects(session)\n\n @classmethod\n def num_stashed_in_session(cls, session):\n \"Tell me how many objects are stashed in my session.\"\n try:\n r = len(session.get(cls.session_variable, []))\n #print \"Found %i in %s\" % (r, cls,)\n return r\n except:\n return 0\n\n @classmethod\n def get_stashed_in_session(cls, session):\n \"Get all the objects stashed in my session.\"\n #print \"Getting all %s stashed in session\" % cls\n if session.has_key(cls.session_variable):\n return cls.objects.filter(id__in=session[cls.session_variable])\n else:\n return cls.objects.none()\n\n @classmethod\n def get_objects_for_request(cls, request):\n \"Get all the objects either stashed in my session or owned by the user.\"\n if request.user.is_authenticated():\n return cls.objects.filter(\n ** {\n cls.creator_field: request.user,\n }\n )\n else:\n return cls.get_stashed_in_session(request.session)\n\nfrom django.db.models.loading import cache\n\ndef _stashable_models():\n for app in cache.get_apps():\n for model in cache.get_models(app):\n if issubclass(model, SessionStashable):\n yield model\n\ndef stashed_object_counts(request):\n \"\"\"A context processor which adds counts of stashed objects\n of models which inherit from SessionStashable to RequestContext.\n\n To make a make a count appear for a particular model, set the\n class attribute context_count_name to an appropriate string\n to name its context variable, and enable this context processor\n in settings.py.\n \"\"\"\n extra_context = {}\n for model in _stashable_models():\n if model.context_count_name:\n extra_context[model.context_count_name] = model.num_stashed_in_session(request.session)\n\n return extra_context\n\ndef reparent_all_session_objects(sender, request, user, **kwargs):\n \"\"\"Reparents the session objects for all models that inherit from\n SessionStashable.\n\n Can be connected to the `django.contrib.auth.signals.user_logged_in`\n signal.\n \"\"\"\n for model in _stashable_models():\n model.reparent_all_my_session_objects(request.session, user)\n","sub_path":"django_session_stashable/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"170496288","text":"\nfrom config import exchangekey \nimport requests, json \nurl = \"https://api.apilayer.com/exchangerates_data/latest?base=USD\" \npayload = {}\nheaders= {\n \"apikey\": exchangekey\n} \nresponse = requests.request(\"GET\", url, headers=headers, data = payload) \nstatus_code = response.status_code\nresult = response.text\njson = json.loads(result)\nprint(json['rates']['EUR'])\n \n#print(json.dumps(json, indent=4))\n#;","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"144460576","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 25 18:58:19 2017\r\n\r\n@author: 黃大祐\r\n\"\"\"\r\n#每月第三個禮拜三過了後 會是下個月的報價 所以每天刷比較保險\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndf = pd.read_csv('TXF20112015.csv',sep=',',header=None)\r\nTAIEX = df.values #資料取出為dataframe 變為nrowX5col的array\r\n#0time 1收 2開 3高 4低\r\ntradeday = list(set(TAIEX[:,0]//10000)) #只有前面八碼的日期到日 set讓資料不重複\r\ntradeday.sort()\r\nprofit = np.zeros((len(tradeday),1))\r\nprofitstrategy10 = np.zeros((len(tradeday),1))\r\nfor i in range(len(tradeday)):\r\n date = tradeday[i]\r\n idx = np.nonzero(TAIEX[:,0]//10000==date)[0] #拿出index col=0 那天的第一分鐘(若index=0)\r\n idx.sort()\r\n #想設停損為30點############################\r\n #空 變成越漲越賠 跌才會賺\r\n p1=TAIEX[idx[0],2] ##開盤價\r\n idx2 = np.nonzero(TAIEX[idx,4]>=p1+30)[0] #變成漲30是我的lose \r\n if(len(idx2)==0): #哪天都沒撞到停損價用\r\n p2 = TAIEX[idx[-1],1]\r\n else:\r\n p2 = TAIEX[idx[idx2[0]],1] #以第一次撞到為主\r\n profit[i] = p2-p1\r\n ###########################################\r\nprofit2 = np.cumsum(profit)#把profit做累積\r\nplt.plot(profit2)\r\nplt.show() #強制先畫出 就不會合起來\r\n\r\n#總損益點數、勝率(不賠不賺也算輸)、賺錢時平均每次獲利點數、輸錢時平均每次損失點數、繪出每日損益的分布圖\r\n\r\nans1 = profit2[-1] #最後一筆數值\r\nans2 = np.sum(profit>0) / len(profit) #>0為勝\r\nans3 = np.mean(profit[profit>0])\r\nans4 = np.mean(profit[profit<=0])\r\nplt.hist(profit,bins=100) #切成100等分\r\nplt.show()\r\n#=> 勝率變低合理嗎? 合理=>一停損就出場(原本先虧後彈回來的情況被刪掉了)\r\n\r\n","sub_path":"期貨策略_以台指期為例/放空停損.py","file_name":"放空停損.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"295686107","text":"#!/usr/bin/python-init -Otu\n# modify rpm / debian dependencies for given packages\n\nimport re\nimport os\nimport argparse\n\noptions = None\nbuffer = None\n\n# list of packages to work with\nDIR_LIST = [\n \"cluster-backbone\",\n \"cluster-backbone-sql\",\n \"cluster-config-server\",\n \"cluster-server\",\n \"collectd-init\",\n \"host-monitoring\",\n \"init-license-tools\",\n \"host-monitoring\",\n \"init-snmp-libs\",\n \"initcore\",\n \"loadmodules\",\n \"logcheck-server\",\n \"logging-server\",\n \"md-config-server\",\n \"meta-server\",\n \"mother\",\n \"noctua\",\n \"package-install\",\n \"python-modules-base\",\n \"rms-tools\",\n \"rms-tools-base\",\n \"rrd-grapher\",\n \"webfrontend\",\n \"discovery-server\",\n]\n\nP_LIST = DIR_LIST + [\n \"host-relay\",\n \"snmp-relay\",\n \"cluster-backbone-tools\",\n]\n\n\ndef print_verbose(str):\n if options.verbose:\n buffer.append(\" {}\".format(str))\n\n\nclass CFile(object):\n def __init__(self, name):\n self._name = name\n self.read()\n\n def read(self):\n self._lines = file(self._name, \"r\").read().split(\"\\n\")\n print_verbose(\"read {:d} lines from {}\".format(len(self._lines), self._name))\n\n def write(self):\n file(\"{}\".format(self._name), \"w\").write(\"\\n\".join(self._lines))\n print_verbose(\"wrote {:d} lines to {}\".format(len(self._lines), self._name))\n\n def format_spec(self, pd):\n return pd.show()\n\n def modify(self):\n new_lines = []\n _count = 0\n for line in self._lines:\n if self.is_depends_line(line):\n _count += 1\n line = self.modify_line(line)\n new_lines.append(line)\n print_verbose(\"found {:d} lines to modify in {}\".format(_count, self._name))\n self._lines = new_lines\n return self\n\n def modify_list(self, d_list):\n return \", \".join([self.format_spec(entry.apply_changes()) for entry in d_list])\n\n\nclass PD(object):\n def __init__(self, name, pd_type):\n self._name = name\n self._comp = None\n self._version = None\n self._type = pd_type\n\n def set_comp(self, comp):\n self._comp = comp\n\n def set_version(self, version):\n self._version = version\n\n def show(self):\n if self._type == \"spec\":\n if self._comp:\n return \"{} {} {}\".format(\n self._name,\n self._comp,\n self._version,\n )\n else:\n return self._name\n else:\n if self._comp:\n return \"{} ({} {})\".format(\n self._name,\n self._comp,\n self._version,\n )\n else:\n return self._name\n\n def __unicode__(self):\n _rs = \"pd for {}\".format(self._name)\n if self._comp:\n _rs = \"{} ({} {})\".format(_rs, self._comp, self._version)\n return _rs\n\n def __repr__(self):\n return unicode(self)\n\n def to_compare(self, op):\n return {\n \"eq\": \"==\" if self._type == \"spec\" else \"=\",\n \"gt\": \">\",\n \"lt\": \"<\",\n \"gte\": \">=\",\n \"lte\": \"<=\"\n }[op]\n\n def apply_changes(self):\n if (options.all_packages and self._name in P_LIST) or self._name == options.package:\n _new_comp, _new_version = (\n self.to_compare(options.compare),\n options.version,\n )\n if self._comp != _new_comp or self._version != _new_version:\n buffer.append(\n \" changing version dependency of {} from {} to {}\".format(\n self._name,\n \"{} {}\".format(self._comp, self._version) if self._comp else \"none\",\n \"{} {}\".format(_new_comp, _new_version),\n )\n )\n if options.doit:\n self._comp = _new_comp\n self._version = _new_version\n return self\n\n\nclass BuildSpec(CFile):\n def is_depends_line(self, line):\n return line.startswith(\"Requires:\")\n\n def modify_line(self, line):\n # print line\n return \"Requires: {}\".format(\n self.modify_list(\n self.split_line(line.split(\":\", 1)[1].strip()),\n )\n )\n\n def split_line(self, line):\n _dlist = []\n for entry in [_part.strip() for _part in line.split(\",\")]:\n _parts = entry.split()\n while _parts:\n _cpd = PD(_parts.pop(0), \"spec\")\n _dlist.append(_cpd)\n if _parts:\n _fc = _parts[0][0]\n if _fc in [\"=\", \"<\", \">\"]:\n _cpd.set_comp(_parts.pop(0))\n _cpd.set_version(_parts.pop(0))\n else:\n break\n return _dlist\n\n\nclass Control(CFile):\n def is_depends_line(self, line):\n return line.startswith(\"Depends:\")\n\n def modify_line(self, line):\n # print line\n return \"Depends: {}\".format(\n self.modify_list(\n self.split_line(line.split(\":\", 1)[1].strip()),\n )\n )\n\n def split_line(self, line):\n _dlist = []\n for entry in [_part.strip() for _part in line.split(\",\")]:\n if entry.endswith(\")\"):\n _name, _cvers = entry.split(None, 1)\n _sres = _cvers.replace(\"(\", \"\").replace(\")\", \"\").strip().split()\n if len(_sres) == 2:\n _comp, _vers = _sres\n else:\n _vers = _sres[0]\n _comp = \"\"\n while _vers[0] in [\"<\", \"=\", \">\"]:\n _comp = \"{}{}\".format(_comp, _vers[0])\n _vers = _vers[1:]\n _cpd = PD(_name, \"deb\")\n _cpd.set_comp(_comp)\n _cpd.set_version(_vers)\n else:\n _cpd = PD(entry, \"deb\")\n _dlist.append(_cpd)\n return _dlist\n\n\nclass Package(object):\n def __init__(self, root):\n self._root = root\n if not os.path.isdir(self._root):\n print(\"*** directory {} does not exist\".format(self._root))\n self.valid = False\n else:\n self.valid = True\n\n def modify(self):\n global buffer\n buffer = []\n build_spec = BuildSpec(os.path.join(self._root, \"build.spec\"))\n build_spec.modify().write()\n control = Control(os.path.join(self._root, \"debian\", \"control\"))\n control.modify().write()\n if buffer:\n print(\"\\nPackage:{}\\n\\n{}\".format(self._root[len(options.root):], \"\\n\".join(buffer)))\n\n\ndef main():\n global options\n valid_branches = [\"2.x\", \"2.5\", \"master\"]\n my_p = argparse.ArgumentParser()\n my_p.add_argument(\"--root\", type=str, default=\"/usr/local/share/home/local/development/git/init-packages\", help=\"root of package dir [%(default)s]\")\n my_p.add_argument(\"--branch\", type=str, default=\"2.5\", help=\"branch to modify [%(default)s]\", choices=valid_branches + [\"all\"])\n my_p.add_argument(\"--package\", type=str, default=\"\", help=\"package to modify [%(default)s]\")\n my_p.add_argument(\"--all-packages\", default=False, action=\"store_true\", help=\"modify all packages [%(default)s]\")\n my_p.add_argument(\"--version\", default=\"2.5-1\", type=str, help=\"version for package dependency [%(default)s]\")\n my_p.add_argument(\"--compare\", default=\"gte\", type=str, choices=[\"gt\", \"eq\", \"lt\", \"gte\", \"lte\"], help=\"compare oparator to use [%(default)s]\")\n my_p.add_argument(\"--verbose\", default=False, action=\"store_true\", help=\"enable debug mode [%(default)s]\")\n my_p.add_argument(\"--doit\", default=False, action=\"store_true\", help=\"modify dependencies [%(default)s]\")\n options = my_p.parse_args()\n\n if options.branch == \"all\":\n options.branch = valid_branches\n else:\n options.branch = [options.branch]\n for branch in options.branch:\n # build list of packages\n obj_list = [\n Package(\n os.path.join(\n options.root,\n _pn,\n \"\" if branch == \"master\" else branch\n )\n ) for _pn in DIR_LIST\n ]\n [obj.modify() for obj in obj_list if obj.valid]\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"modify_dependencies.py","file_name":"modify_dependencies.py","file_ext":"py","file_size_in_byte":8407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"105432677","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n# import tf\nimport transformations\nfrom pyquaternion import Quaternion\nimport math\nimport pdb\n\nclass CarlaSlamEvaluate(object):\n def __init__(self, method, flocation, plotstyle):\n\n # contains a method which will define how the data is processed\n self.method = method\n\n # .txt file location\n self.flocation = flocation\n\n # label used for plotting\n self.label = \"\"\n\n # defines how the method is presented in a graph\n self.plotstyle = plotstyle\n\n # game timestamp in seconds (float)\n self.time = []\n\n # position [x, y, z] initialised at [0, 0, 0]\n # Use a right handed coordinate system (z points upwards)\n self.positions = []\n\n # orientation [roll, pitch, yaw] in degrees\n self.orientations = []\n\n # orientation expressed in quaternions\n self.quaternions = []\n\n # Homogeneous coordinate matrix\n self.Q = []\n\n # Contains the data used for the open() function\n self.data = {}\n\n # Relative pose change over a certain time frame expressed in homogeneous coordinates\n self.Q1Q2 = []\n\n # time stamps used for RPE\n self.timeQ1Q2 = []\n\n # Root Mean Square Error of Relative Pose Error over distance\n self.RPE_RMSE_dist = []\n\n\n def __enter__(self):\n self.data = open(self.flocation, \"r\")\n # This should label the object to the file name which should be descriptive enough\n # index where \"SL\" starts\n index_start = self.flocation.find(\"SL\")\n\n # label = NameOfMethod_SL_{}_NV_{}\n # self.label = self.method + \"_\" + self.flocation[index_start:(index_start+11)]\n self.label = self.method + \"_\" + self.flocation[index_start:-4]\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.data.close()\n return None\n\n def process_data(self):\n \"\"\" Makes an object of .txt pose data \"\"\"\n\n if self.method == \"gt\":\n\n # pose variables that come from the groundtruth txt file\n x_ue_abs = []\n y_ue_abs = []\n z_ue_abs = []\n roll_ue_abs = []\n pitch_ue_abs = []\n yaw_ue_abs180 = []\n for line_data in self.data:\n # Groundtruth is seperated by a space\n line = line_data.split(\" \")\n float_line = [float(element) for element in line]\n\n # convert time from [ms] to [s]\n # time = round(float_line[0]*10**(-3), 4)\n time = float_line[0]\n self.time.append(time)\n\n # groundtruth is absolute position in unreal engine coordinate system\n # left handed coordinate system\n # orientation is in degrees\n x_ue_abs.append(float_line[1])\n y_ue_abs.append(float_line[2])\n z_ue_abs.append(float_line[3])\n roll_ue_abs.append(float_line[4])\n pitch_ue_abs.append(float_line[5])\n\n # note: this yaw angle is between -180 and 180 in a left handed system\n yaw_ue_abs180.append(float_line[6])\n\n # convert the yaw angle to an angle that goes beyond 180 degrees\n yaw_ue_abs = abs_yaw_angle_conversion(yaw_ue_abs180)\n\n # Input are the coordinates of the vehicle in an absolute left handed system, specified by UE\n # Yaw angle is the absolute rotation measured from the UE axis system, which allows to specify full rotation\n for index, time in enumerate(self.time):\n # Make sure that vehicle starts at 0\n if index == 0:\n x_ue_init = x_ue_abs[index]\n y_ue_init = y_ue_abs[index]\n z_ue_init = z_ue_abs[index]\n yaw_ue_init = yaw_ue_abs[index]\n\n # Make the pose relative to its starting position\n x_ue_0 = x_ue_abs[index] - x_ue_init\n y_ue_0 = y_ue_abs[index] - y_ue_init\n z_ue_0 = z_ue_abs[index] - z_ue_init\n yaw_ue_0 = yaw_ue_abs[index] - yaw_ue_init\n\n # Nothing changes with the Euler angles, except yaw.\n roll_ue_rel = roll_ue_abs[index]\n pitch_ue_rel = pitch_ue_abs[index]\n\n # new Carla can output a pitch of 360 degrees, which should be 0 degrees\n if pitch_ue_rel >= 360.0:\n pitch_ue_rel = pitch_ue_rel - 360.0\n # yaw needs to be relative to initial position\n yaw_ue_rel = yaw_ue_0\n\n # convert absolute left handed system into a relative left handed system\n theta = np.radians(yaw_ue_init)\n c, s = np.cos(theta), np.sin(theta)\n # note this is a rotation matrix for LEFT HANDED COORDINATE SYSTEM\n R = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n pos_ue_0 = np.matrix([x_ue_0, y_ue_0, z_ue_0])\n pos_ue_rel = R*pos_ue_0.reshape(3, 1)\n # matrix to floats\n x_ue_rel = pos_ue_rel.tolist()[0][0]\n y_ue_rel = pos_ue_rel.tolist()[1][0]\n z_ue_rel = pos_ue_rel.tolist()[2][0]\n\n # yaw_start = int(round(yaw_ue_init))\n # if yaw_start == 0:\n # x_ue_rel = x_ue_0\n # y_ue_rel = y_ue_0\n # z_ue_rel = z_ue_0\n # elif yaw_start == 180:\n # x_ue_rel = -x_ue_0\n # y_ue_rel = -y_ue_0\n # z_ue_rel = z_ue_0\n # elif yaw_start == 90:\n # x_ue_rel = y_ue_0\n # y_ue_rel = -x_ue_0\n # z_ue_rel = z_ue_0\n # elif yaw_start == -90:\n # x_ue_rel = - y_ue_0\n # y_ue_rel = x_ue_0\n # z_ue_rel = z_ue_0\n # else:\n # print(\"Starting point is not along one of the axis\")\n # exit()\n\n # Convert relative left handed system to the right handed system used in ROS\n x = x_ue_rel\n y = - y_ue_rel\n z = z_ue_rel\n roll = -roll_ue_rel\n pitch = pitch_ue_rel\n yaw = - yaw_ue_rel\n\n position = np.array([x, y, z])\n orientation = np.array([roll, pitch, yaw])\n self.positions.append(position)\n self.orientations.append(orientation)\n\n # convert euler angles to quaternions\n # rotation order: static axis, roll, pitch, yaw\n # could be that there are change in sign in these quaternions\n quaternion = tf.transformations.quaternion_from_euler(math.radians(roll), math.radians(pitch), math.radians(yaw), axes='sxyz')\n self.quaternions.append(quaternion)\n\n # Convert quaternion to homogeneous coordinates\n # Note: NEVER GO FROM ROTATION MATRIX TO QUATERNIONS\n # Since one rotation matrix has 2 quaternion values\n # q = tf.transformations.quaternion_matrix(quaternion)\n # q[0][3] = x\n # q[1][3] = y\n # q[2][3] = z\n\n # lets try to get the rotation matrix from the Euler angles\n q = tf.transformations.euler_matrix(math.radians(roll), math.radians(pitch), math.radians(yaw), axes='sxyz')\n q[0][3] = x\n q[1][3] = y\n q[2][3] = z\n self.Q.append(q)\n\n if self.method == \"orb\":\n\n # need to extract the Euler angles to convert yaw since it only indicates angle at +180, -180\n roll_list = []\n pitch_list = []\n yaw_180_list = []\n for line_data in self.data:\n line = line_data.split(\" \")\n float_line = [float(element) for element in line]\n\n # time is already in [s]\n time = float_line[0]\n self.time.append(time)\n\n # pose estimation in ORB coordinate system\n x_orb = float_line[1]\n y_orb = float_line[2]\n z_orb = float_line[3]\n q1_orb = float_line[4]\n q2_orb = float_line[5]\n q3_orb = float_line[6]\n q4_orb = float_line[7]\n\n # Convert orb axis to ros axis, which in theory looks like\n # 1) CCW (+)90 deg in x-axis\n # 2) CCW (+)90 deg in z-axis\n\n # But it seems that positive rotations are defined as CW in the ORB system, instead of CCW.\n # That is the only reason how I can explain that x is correct and y and z are in the negative direction.\n # It is also not a left handed system, because the orientation of the axis are right handed.\n theta = np.radians(-90)\n c, s = np.cos(theta), np.sin(theta)\n # Right handed rotation matrices\n Rx = np.matrix([[1, 0, 0], [0, c, -s], [0, s, c]])\n Rz = np.matrix([[c, -s, 0], [s, c, 0], [0, 0, 1]])\n\n pos_old = np.matrix([x_orb, y_orb, z_orb])\n new_pos = Rz*Rx*pos_old.reshape(3, 1)\n x_new = new_pos.tolist()[0][0]\n y_new = new_pos.tolist()[1][0]\n z_new = new_pos.tolist()[2][0]\n # old method\n # x_new = z_orb\n # y_new = -x_orb\n # z_new = -y_orb\n\n position = np.array([x_new, y_new, z_new])\n self.positions.append(position)\n\n # Quaternions is defined as iq1+jq2+kq3+q4\n # The axis system of i, j, k are incorrect and need to be converted to ROS axis system\n q_old = np.matrix([q1_orb, q2_orb, q3_orb])\n q_new = Rz*Rx*q_old.reshape(3, 1)\n # Now the quaternion axis system has the same orientation as the euclidean space,\n # since the ORB axis system underwent the same transformation\n q1_new = q_new.tolist()[0][0]\n q2_new = q_new.tolist()[1][0]\n q3_new = q_new.tolist()[2][0]\n q4_new = q4_orb\n\n quaternion = [q1_new, q2_new, q3_new, q4_new]\n self.quaternions.append(quaternion)\n q = tf.transformations.quaternion_matrix(quaternion)\n q[0][3] = x_new\n q[1][3] = y_new\n q[2][3] = z_new\n self.Q.append(q)\n\n # Note that euler_from_quaternion, is quaternion_matrix function and then euler_from_matrix function\n # Also, positive rotations seem to be defined as CW instead of CCW.\n roll, pitch, yaw_180 = tf.transformations.euler_from_quaternion(quaternion, axes='sxyz')\n\n # roll and pitch are exactly reversed. If results are weird, this could be the mistake\n roll_list.append(np.degrees(-roll))\n pitch_list.append(np.degrees(-pitch))\n yaw_180_list.append(np.degrees(yaw_180))\n\n # convert yaw angle to an angle that can show more than 180 degrees\n yaw_list = abs_yaw_angle_conversion(yaw_180_list)\n\n # append the converted euler angles into the orientation attribute\n for index, roll in enumerate(roll_list):\n orientation = np.array([roll, pitch_list[index], yaw_list[index]])\n self.orientations.append(orientation)\n\n\n\n\ndef abs_yaw_angle_conversion(rel_yaw_angle):\n \"\"\"Function that converts a yaw angle that ranges from -180 to 180 degrees, to a yaw angle that has infinite range\n and indicates full rotations. Note that this function can be used for left and right handed axis system\"\"\"\n\n yaw_abs = []\n yaw_abs.append(rel_yaw_angle[0])\n\n # n180 tracks the number of 180 degrees rotations and direction.\n n180 = 0\n\n # sign_modulo is the sign of the very first pose\n # scratch that... second pose, first pose the angle can be zero for ORB SLAM. The method does nto work then.\n sign_modulo = np.sign(rel_yaw_angle[1])\n\n index = 1\n index_limit = len(rel_yaw_angle)\n\n debug_yaw = open(\"/home/sietse/debug_yaw.txt\", \"w\")\n while index != index_limit:\n # finds the modulo of either 180 or -180\n modulo_angle = rel_yaw_angle[index] % (sign_modulo*180)\n\n if myround(rel_yaw_angle[index - 1]) == -180 and myround(rel_yaw_angle[index]) == 180:\n n180 = n180 - 180\n\n if myround(rel_yaw_angle[index - 1]) == 180 and myround(rel_yaw_angle[index]) == -180:\n n180 = n180 + 180\n\n if myround(rel_yaw_angle[index]) == 0 and np.sign(rel_yaw_angle[index - 1]) == -1 and np.sign(rel_yaw_angle[index]) == 1:\n n180 = n180 + 180\n\n if myround(rel_yaw_angle[index]) == 0 and np.sign(rel_yaw_angle[index - 1]) == 1 and np.sign(rel_yaw_angle[index]) == -1:\n n180 = n180 - 180\n\n yaw_abs_element = n180 + modulo_angle\n yaw_abs.append(yaw_abs_element)\n debug_yaw.write(\"{} {} {} \\n\".format(index, rel_yaw_angle[index], yaw_abs_element))\n index = index + 1\n\n debug_yaw.close()\n return yaw_abs\n\n\ndef myround(x, base=10):\n \"\"\"rounds the number to the nearest base, in this case 10\n ORB has jumps that go from -175 to 175, which are not detected if the round() function is used\"\"\"\n return int(round(x/base))*base\n\n\ndef main():\n method_gt = \"gt\"\n gt_ps_static = 'k-'\n gt_file_static = \"/home/sietse/PrelimExpStaticVsDynamic/SL_58_NV_0_SV_1_gt.txt\"\n with CarlaSlamEvaluate(method_gt, gt_file_static, gt_ps_static) as gt_static:\n gt_static.process_data()\n\n method_orb = \"orb\"\n orb_ps_static = 'g-'\n orb_file_static = \"/home/sietse/PrelimExpStaticVsDynamic/SL_58_NV_0_SV_1_orb.txt\"\n\n with CarlaSlamEvaluate(method_orb, orb_file_static, orb_ps_static) as orb_static:\n orb_static.process_data()\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"CarlaSlamPerformanceV2.py","file_name":"CarlaSlamPerformanceV2.py","file_ext":"py","file_size_in_byte":14253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"28744675","text":"import os\nfrom glob import glob\nimport numpy as np\nimport time\nimport datetime\nimport torch\nimport torchvision\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torch.nn import BCELoss, SmoothL1Loss\nimport torch.nn.functional as F\nfrom network import U_Net, R2U_Net, AttU_Net, R2AttU_Net, ResAttU_Net\nfrom evaluation import CrossEntropy\nimport csv\nimport timeit\nfrom scipy import ndimage\nimport scipy.ndimage.morphology as ndi_morph\nimport skimage.morphology as skimage_morph\nfrom skimage import io\nimport pandas as pd\n\n# Additional imports (R&R)\nfrom matplotlib import pyplot as plt\n\nclass Solver(object):\n\tdef __init__(self, config, train_loader, validation_loader, test_loader):\n\n\t\t# Data loader\n\t\tself.train_loader = train_loader\n\t\tself.validation_loader = validation_loader\n\t\tself.test_loader = test_loader\n\t\tself.down_factor = config.down_factor\n\n\t\t# Models\n\t\tself.unet = None\n\t\tself.optimizer = None\n\t\tself.img_ch = config.img_ch\n\t\tself.GT_ch = config.GT_ch\n\t\tself.output_ch = config.output_ch\n\t\tself.first_layer_numKernel = config.first_layer_numKernel\n\t\tself.UnetLayer = config.UnetLayer\n\n\t\t# Hyper-parameters\n\t\tself.initial_lr = config.lr\n\t\tself.current_lr = config.lr\n\t\tself.patch_num = config.patch_num // config.down_factor ** 2\n \n\t\tself.optimizer_choice = config.optimizer_choice\n\t\tif config.optimizer_choice == 'Adam':\n\t\t\tself.beta1 = config.beta1\n\t\t\tself.beta2 = config.beta2\n\t\telif config.optimizer_choice == 'SGD':\n\t\t\tself.momentum = config.momentum\n\t\telse:\n\t\t\tprint('No such optimizer available')\n\n\t\t# Loss Function\n\t\tif config.loss_function == 'BCE':\n\t\t\tself.loss_function_name = 'BCE'\n\t\t\tself.loss_function = BCELoss()\n\t\telif config.loss_function == 'SmoothL1':\n\t\t\tself.loss_function_name = 'SmoothL1'\n\t\t\tself.loss_function = SmoothL1Loss()\n\t\telif config.loss_function == 'Dice':\n\t\t\tself.loss_function_name = 'Dice'\n\n\t\t# Training settings\n\t\tself.num_epochs = config.num_epochs\n\t\tself.batch_size = config.batch_size\n\t\tself.withTF = config.withTF\n\t\tself.edge_enhance = config.edge_enhance\n\n\t\t# Early stop or not\n\t\tself.early_stop = config.early_stop\n\n\t\t# Path\n\t\tself.current_model_saving_path = config.current_model_saving_path\n\t\tself.current_prediction_path = config.current_prediction_path\n\t\tself.current_loss_history_path = config.current_loss_history_path\n\t\tself.test_result_comparison_path = config.test_result_comparison_path\n\t\tself.test_GT_list = config.GT_test\n\t\tself.mode = config.mode\n\n\t\tself.device = torch.device('cuda: %d' % config.cuda_idx)\n\t\tself.model_type = config.model_type\n\t\tself.t = config.t\n\t\tself.build_model()\n\n\tdef build_model(self):\n\t\t\"\"\"Build generator and discriminator.\"\"\"\n\t\tif self.model_type =='U_Net':\n\t\t\tself.unet = U_Net(UnetLayer = self.UnetLayer, img_ch = self.img_ch, output_ch = self.output_ch, first_layer_numKernel = self.first_layer_numKernel)\n\t\telif self.model_type =='R2U_Net':\n\t\t\tself.unet = R2U_Net(img_ch = self.img_ch, output_ch = self.output_ch, t = self.t, first_layer_numKernel = self.first_layer_numKernel)\n\t\telif self.model_type =='AttU_Net':\n\t\t\tself.unet = AttU_Net(img_ch = self.img_ch, output_ch = self.output_ch, first_layer_numKernel = self.first_layer_numKernel)\n\t\telif self.model_type == 'R2AttU_Net':\n\t\t\tself.unet = R2AttU_Net(img_ch = self.img_ch, output_ch = self.output_ch, t = self.t, first_layer_numKernel = self.first_layer_numKernel)\n\t\telif self.model_type == 'ResAttU_Net':\n\t\t\tself.unet = ResAttU_Net(UnetLayer = self.UnetLayer, img_ch = self.img_ch, output_ch = self.output_ch, first_layer_numKernel = self.first_layer_numKernel)\n\t\t\t\n\t\tif self.optimizer_choice == 'Adam':\n\t\t\tself.optimizer = optim.Adam(list(self.unet.parameters()), self.initial_lr, [self.beta1, self.beta2])\n\t\telif self.optimizer_choice == 'SGD':\n\t\t\tself.optimizer = optim.SGD(list(self.unet.parameters()), self.initial_lr, self.momentum)\n\t\telse:\n\t\t\tpass\n\n\t\tself.unet.to(self.device)\n\n\tdef print_network(self, model, name):\n\t\t\"\"\"Print out the network information.\"\"\"\n\t\tnum_params = 0\n\t\tfor p in model.parameters():\n\t\t\tnum_params += p.numel()\n\t\tprint(model)\n\t\tprint(name)\n\t\tprint(\"The number of parameters: {}\".format(num_params))\n\t\t\n\tdef to_data(self, x):\n\t\t\"\"\"Convert variable to tensor.\"\"\"\n\t\tif torch.cuda.is_available():\n\t\t\tx = x.cpu()\n\t\treturn x.data\n\n\t# Redefine the 'update_lr' function (R&R)\n\tdef update_lr(self, new_lr):\n\t\tfor param_group in self.optimizer.param_groups:\n\t\t\tparam_group['lr'] = new_lr \n\n\t# Define adaptive learning rate handler (R&R)\n\t# This only works for non-negative loss.\n\tdef adaptive_lr_handler(self, cooldown, min_lr, current_epoch, previous_update_epoch, plateau_ratio, adjustment_ratio, loss_history):\n\t\tif current_epoch > 1:\n\t\t\tif current_epoch - previous_update_epoch > cooldown:\n\t\t\t\tif (loss_history[-1] > loss_history[-2]) or (loss_history[-1]/loss_history[-2] > plateau_ratio):\n\t\t\t\t\tif self.current_lr > min_lr:\n\t\t\t\t\t\tself.current_lr = adjustment_ratio * self.current_lr\n\t\t\t\t\t\tself.update_lr(self.current_lr)\n\t\t\t\t\t\tprint('Validation loss stop decreasing. Adjust the learning rate to {}.'.format(self.current_lr))\n\t\t\t\t\t\treturn current_epoch\n\n\tdef reset_grad(self):\n\t\t\"\"\"Zero the gradient buffers.\"\"\"\n\t\tself.unet.zero_grad()\n\n\tdef dice_coeff_loss(self, Prediction_vector, GT_vector):\n\t\tsmooth = 1\n\t\tintersection = (GT_vector * Prediction_vector).sum()\n\t\treturn 1 - (2. * intersection + smooth) / (GT_vector.sum() + Prediction_vector.sum() + smooth)\n\n\tdef train(self):\n\t\t\"\"\"Train encoder, generator and discriminator.\"\"\"\n\n\t\t#====================================== Training ===========================================#\n\t\t#===========================================================================================#\n\n\t\tunet_path = os.path.join(self.current_model_saving_path, '%s-%s-%.4f-%d-%d-%d-best.pkl' %(self.model_type, self.optimizer_choice, self.initial_lr, self.num_epochs, self.batch_size, self.down_factor))\n\t\tlast_unet_path = os.path.join(self.current_model_saving_path, '%s-%s-%.4f-%d-%d-%d-last.pkl' %(self.model_type, self.optimizer_choice, self.initial_lr, self.num_epochs, self.batch_size, self.down_factor))\n\t\tprint('The U-Net path is {}'.format(unet_path))\n\t\t# U-Net Train\n\t\t# Train loss history (R&R)\n\t\ttrain_loss_history = []\n\t\t# Validation loss history (R&R)\n\t\tvalidation_loss_history = []\n\n\t\tif os.path.isfile(unet_path):\n\t\t\t# Load the pretrained Encoder\n\t\t\tself.unet.load_state_dict(torch.load(unet_path))\n\t\t\tprint('%s is Successfully Loaded from %s' %(self.model_type,unet_path))\n\n\t\telse:\n\t\t\t# Train for Encoder\n\t\t\tbest_unet_score = 0.\n\t\t\tprint('Start training. The initial learning rate is: {}'.format(self.initial_lr))\n\n\t\t\t# Write the first line of the train and validation loss history csv file.\n\t\t\twith open(os.path.join(self.current_loss_history_path, 'train_and_validation_history.csv'), 'a', \\\n\t\t\t\t\tencoding = 'utf-8', newline= '') as f:\n\t\t\t\twr = csv.writer(f)\n\t\t\t\twr.writerow(['Mode', 'Current Epoch', 'Total Epoch', 'Batch Size', 'Metric', 'Loss'])\n\t\t\t\tf.close()\n\n\t\t\tfor epoch in range(self.num_epochs):\n\t\t\t\tself.unet.train(True)\n\t\t\t\ttrain_epoch_loss = 0; validation_epoch_loss = 0\n\n\t\t\t\tlength = 0\n\t\t\t\tstart_time = timeit.default_timer()\n\n\t\t\t\tfor batch, (img, GT) in enumerate(self.train_loader):\n\t\t\t\t\timg = img.to(self.device)\n\t\t\t\t\tGT = GT.to(self.device)\n\n\t\t\t\t\t# Reshape the images and GTs to 4-dimensional so that they can get fed to the conv2d layer. (R&R)\n\t\t\t\t\t# The new shape has to be (batch_size, num_channels, img_dim1, img_dim2).\n\t\t\t\t\tif self.img_ch == 1:\n\t\t\t\t\t\timg = img[:, np.newaxis, :, :]\n\t\t\t\t\telse:\n\t\t\t\t\t\timg = img.transpose(1, 3); img = img.transpose(2, 3)\n\n\t\t\t\t\tif self.GT_ch == 1:\n\t\t\t\t\t\tGT = GT[:, np.newaxis, :, :]\n\t\t\t\t\telse:\n\t\t\t\t\t\tGT = GT.transpose(1, 3); GT = GT.transpose(2, 3)\n\n\t\t\t\t\t# SR : Segmentation Result\n\t\t\t\t\tSR = torch.sigmoid(self.unet(img))\n\n\t\t\t\t\t# Flatten the prediction, target, and training field.\n\t\t\t\t\tSR_flat = SR.view(SR.size(0), -1)\n\t\t\t\t\tGT_flat = GT.view(GT.size(0), -1)\n\n\t\t\t\t\t# Compute the loss for this batch.\n\t\t\t\t\tif self.loss_function_name == 'Dice':\n\t\t\t\t\t\ttrain_loss = self.dice_coeff_loss(SR_flat, GT_flat)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttrain_loss = self.loss_function(SR_flat, GT_flat)\n\n\t\t\t\t\t# Add the loss of this batch to the loss of this epoch. \n\t\t\t\t\ttrain_epoch_loss += train_loss.item()\n\t\t\t\t\tif self.edge_enhance == 'True':\n\t\t\t\t\t\tGT_edge_enhanced = ndimage.gaussian_laplace(np.squeeze(GT.cpu().detach().numpy()), sigma = 5)\n\t\t\t\t\t\tGT_edge1 = torch.tensor(np.int64(GT_edge_enhanced < -0.001))\n\t\t\t\t\t\tGT_edge2 = torch.tensor(np.int64(GT_edge_enhanced > 0.001))\n\t\t\t\t\t\ty_hat = torch.cat((torch.squeeze(SR_flat), torch.squeeze(SR)[GT_edge1 == 1], torch.squeeze(SR)[GT_edge2 == 1]), 0)\n\t\t\t\t\t\ty = torch.cat((torch.squeeze(GT_flat), torch.squeeze(GT)[GT_edge1 == 1], torch.squeeze(GT)[GT_edge2 == 1]), 0)\n\t\t\t\t\telif self.edge_enhance == 'Double':\n\t\t\t\t\t\tGT_edge_enhanced = ndimage.gaussian_laplace(np.squeeze(GT.cpu().detach().numpy()), sigma = 5)\n\t\t\t\t\t\tGT_edge1 = torch.tensor(np.int64(GT_edge_enhanced < -0.001))\n\t\t\t\t\t\tGT_edge2 = torch.tensor(np.int64(GT_edge_enhanced > 0.001))\n\t\t\t\t\t\ty_hat = torch.cat((torch.squeeze(SR_flat), torch.squeeze(SR)[GT_edge1 == 1], torch.squeeze(SR)[GT_edge1 == 1], torch.squeeze(SR)[GT_edge2 == 1]), 0)\n\t\t\t\t\t\ty = torch.cat((torch.squeeze(GT_flat), torch.squeeze(GT)[GT_edge1 == 1], torch.squeeze(GT)[GT_edge1 == 1], torch.squeeze(GT)[GT_edge2 == 1]), 0)\n\t\t\n\t\t\t\t\ttrain_loss = self.loss_function(y_hat, y)\n\n\t\t\t\t\t# Backprop + optimize\n\t\t\t\t\tself.reset_grad()\n\t\t\t\t\ttrain_loss.backward()\n\t\t\t\t\tself.optimizer.step()\n \n ### if batch size = 1 ###\n\t\t\t\t\tlength += 1\n\n\t\t\t\t\tif batch % 200 == 0:\n\t\t\t\t\t\tprint('[Training] Epoch [{}/{}], Batch: {}, Batch size: {}, Average {} Error: {}'.format(epoch + 1, self.num_epochs, batch, self.batch_size, self.loss_function_name, train_epoch_loss/length))\n\n\n \t\t\t\t\t# Empty cache to free up memory at the end of each batch.\n\t\t\t\t\tdel batch, img, GT, SR, GT_flat, SR_flat, train_loss\n\t\t\t\t\ttorch.cuda.empty_cache() \n\n\t\t\t\tend_time = timeit.default_timer()\n\t\t\t\t# Normalize the train loss over the length of the epoch (number of images in this epoch).\n\t\t\t\ttrain_epoch_loss = train_epoch_loss/length\n\n\t\t\t\t# Print the log info\n\t\t\t\tprint('[Training] Epoch [%d/%d], Train Loss: %.6f, Run Time: %.4f [h]' % (epoch + 1, self.num_epochs, train_epoch_loss, (end_time - start_time) / 60 / 60))\n\n\t\t\t\t# Append train loss to train loss history (R&R)\n\t\t\t\ttrain_loss_history.append(train_epoch_loss)\n\t\t\t\twith open(os.path.join(self.current_loss_history_path, 'train_and_validation_history.csv'), 'a', \\\n encoding = 'utf-8', newline= '') as f:\n\t\t\t\t\twr = csv.writer(f)\n\t\t\t\t\twr.writerow(['Training', '%d' % (epoch + 1), '%d' % (self.num_epochs), '%d' % (self.batch_size), \\\n \t '%s' % self.loss_function_name, '%.6f' % train_epoch_loss])\n\t\t\t\t\tf.close()\n\n\t\t\t\t#===================================== Validation ====================================#\n\t\t\t\tself.unet.train(False)\n\t\t\t\tself.unet.eval()\n\n\t\t\t\tlength = 0\n\t\t\t\tstart_time = timeit.default_timer()\n\n\t\t\t\tfor batch, (img, GT) in enumerate(self.validation_loader):\n\t\t\t\t\t# Read, reshape the GTs and images, and compute the target images.\n\t\t\t\t\timg = img.to(self.device)\n\t\t\t\t\tGT = GT.to(self.device)\n\n\t\t\t\t\t# Reshape the images and GTs to 4-dimensional so that they can get fed to the conv2d layer. (R&R)\n\t\t\t\t\t# The new shape has to be (batch_size, num_channels, img_dim1, img_dim2).\n\t\t\t\t\tif self.img_ch == 1:\n\t\t\t\t\t\timg = img[:, np.newaxis, :, :]\n\t\t\t\t\telse:\n\t\t\t\t\t\timg = img.transpose(1, 3); img = img.transpose(2, 3)\n\n\t\t\t\t\tif self.GT_ch == 1:\n\t\t\t\t\t\tGT = GT[:, np.newaxis, :, :]\n\t\t\t\t\telse:\n\t\t\t\t\t\tGT = GT.transpose(1, 3); GT = GT.transpose(2, 3)\n\n\t\t\t\t\t# SR : Segmentation Result\n\t\t\t\t\tSR = torch.sigmoid(self.unet(img))\n \n\t\t\t\t\t# Flatten the prediction and target.\n\t\t\t\t\tSR_flat = SR.view(SR.size(0), -1)\n\t\t\t\t\tGT_flat = GT.view(GT.size(0), -1)\n \n\t\t\t\t\t# Compute the loss for this batch.\n\t\t\t\t\tif self.loss_function_name == 'Dice':\n\t\t\t\t\t\tvalidation_loss = self.dice_coeff_loss(SR_flat, GT_flat)\n\t\t\t\t\telse:\n\t\t\t\t\t\tvalidation_loss = self.loss_function(SR_flat, GT_flat)\n\t\t\t\t\tlength += 1\n\t\t\t\t\tvalidation_epoch_loss += validation_loss.item()\n \n \n\t\t\t\t\t# Empty cache to free up memory at the end of each batch.\n\t\t\t\t\tdel img, GT, SR, GT_flat, SR_flat, validation_loss\n\t\t\t\t\ttorch.cuda.empty_cache() \n\n\t\t\t\t# Normalize the validation loss.\n\t\t\t\tvalidation_epoch_loss = validation_epoch_loss/length\n \n\t\t\t\tend_time = timeit.default_timer()\n\n\t\t\t\t# Define the decisive score of the network as 1 - validation_epoch_loss.\n\t\t\t\tunet_score = 1. - validation_epoch_loss\n\t\t\t\tprint('Current learning rate: {}'.format(self.current_lr))\n\n\t\t\t\tprint('[Validation] Epoch [%d/%d] Validation Loss: %.6f, Run Time: %.4f [h]' % (epoch + 1, self.num_epochs, validation_epoch_loss, (end_time - start_time)/60/60))\n\n\t\t\t\t# Append validation loss to train loss history (R&R)\n\t\t\t\tvalidation_loss_history.append(validation_epoch_loss)\t\t\t\t\n\t\t\t\twith open(os.path.join(self.current_loss_history_path, 'train_and_validation_history.csv'), 'a', \\\n encoding = 'utf-8', newline= '') as f:\n\t\t\t\t\twr = csv.writer(f)\n\t\t\t\t\twr.writerow(['Validation', '%d' % (epoch + 1), '%d' % (self.num_epochs), '%d' % (self.batch_size), \\\n \t '%s' % self.loss_function_name, '%.6f' % validation_epoch_loss])\n\t\t\t\t\tf.close()\n\n\t\t\t\t# Make sure we save the best and last unets.\n\t\t\t\tif unet_score > best_unet_score:\n\t\t\t\t\tbest_unet_score = unet_score\n\t\t\t\t\tbest_epoch = epoch\n\t\t\t\t\tbest_unet = self.unet.state_dict()\n\t\t\t\t\tprint('Best %s model score : %.6f' % (self.model_type, best_unet_score))\n\t\t\t\t\ttorch.save(best_unet, unet_path)\n\t\t\t\tif (epoch == self.num_epochs - 1):\n\t\t\t\t\tlast_unet = self.unet.state_dict()\n\t\t\t\t\ttorch.save(last_unet, last_unet_path)\n\t\t\t\tif epoch % 10 == 0 and epoch != 0:\n\t\t\t\t\tepoch_unet_path_component = unet_path.split('/')\n\t\t\t\t\tfile_name = epoch_unet_path_component[-1].replace('best', 'epoch%d' % epoch)\n\t\t\t\t\tepoch_unet_path_component[-1] = file_name\n\t\t\t\t\tepoch_unet_path = '/'.join(epoch_unet_path_component)\n\t\t\t\t\tepoch_unet = self.unet.state_dict()\n\t\t\t\t\ttorch.save(epoch_unet, epoch_unet_path)\n\n\t\t\t\t# Adaptive Learning Rate (R&R)\n\t\t\t\ttry:\n\t\t\t\t\tprevious_epoch = self.adaptive_lr_handler(3, 0.01*self.initial_lr, epoch, previous_epoch, 0.98, 0.5, validation_loss_history)\n\t\t\t\texcept:\n\t\t\t\t\tprevious_epoch = self.adaptive_lr_handler(3, 0.01*self.initial_lr, epoch, 0, 0.98, 0.5, validation_loss_history)\n\t\t\t\t\n\t\t\t\t# Early stop (R&R)\n\t\t\t\tif (self.early_stop == True):\n\t\t\t\t\tif (len(validation_loss_history) > 9):\n\t\t\t\t\t\tif (np.mean(validation_loss_history[-10:-5]) <= np.mean(validation_loss_history[-5:])):\n\t\t\t\t\t\t\tprint('Validation loss stop decreasing. Stop training.')\n\t\t\t\t\t\t\tlast_unet = self.unet.state_dict()\n\t\t\t\t\t\t\ttorch.save(last_unet, last_unet_path)\n\t\t\t\t\t\t\tbreak \n \n\t\tdel self.unet\n\t\ttry:\n\t\t\tdel best_unet\n\t\t\ttorch.cuda.empty_cache()\n\t\texcept:\n\t\t\tprint('Cannot delete the variable \"best_unet\": variable does not exist.')\n \n\t\treturn train_loss_history, validation_loss_history\n\n\n\tdef test(self, which_unet = 'best', stop_epoch = None):\n\n\t\t\"\"\"Test encoder, generator and discriminator.\"\"\"\n\t\t#======================================= Test ====================================#\n\t\t#=================================================================================#\n\t\tunet_path = os.path.join(self.current_model_saving_path, '%s-%s-%.4f-%d-%d-%d-best.pkl' %(self.model_type, self.optimizer_choice, self.initial_lr, self.num_epochs, self.batch_size, self.down_factor))\n\t\tlast_unet_path = os.path.join(self.current_model_saving_path, '%s-%s-%.4f-%d-%d-%d-last.pkl' %(self.model_type, self.optimizer_choice, self.initial_lr, self.num_epochs, self.batch_size, self.down_factor))\n\t\t\n\n\t\tself.build_model()\n\t\tif not os.path.exists(self.current_prediction_path):\n\t\t\tos.makedirs(self.current_prediction_path)\n\t\tif stop_epoch == None:\n\t\t\tif which_unet == 'best':\n\t\t\t\tself.unet.load_state_dict(torch.load(unet_path))\n\t\t\t\tsave_folder = 'best/'\n\t\t\telif which_unet == 'last':\n\t\t\t\tself.unet.load_state_dict(torch.load(last_unet_path))\n\t\t\t\tsave_folder = 'last/'\n\t\t\telse:\n\t\t\t\tprint('Input argument which_unet must be either \"best\" or \"last\"')\n\t\telse:\n\t\t\tepoch_unet_path_component = unet_path.split('/')\n\t\t\tfile_name = epoch_unet_path_component[-1].replace('best', 'epoch%d' % stop_epoch)\n\t\t\tepoch_unet_path_component[-1] = file_name\n\t\t\tepoch_unet_path = '/'.join(epoch_unet_path_component)\n\t\t\tself.unet.load_state_dict(torch.load(epoch_unet_path))\n\t\t\tsave_folder = 'epoch%d/' % stop_epoch\n\t\tif not os.path.exists(self.current_prediction_path + save_folder):\n\t\t\tos.makedirs(self.current_prediction_path + save_folder)\n\n\t\tself.unet.train(False)\n\t\tself.unet.eval()\n\t\tlength = 0\n\t\ttest_epoch_loss = 0\n\t\ttest_GT_list = self.test_GT_list\n\n\t\tfor batch, (img, GT) in enumerate(self.test_loader):\n\t\t\timg = img.to(self.device)\n\t\t\tGT = GT.to(self.device)\n\n\t\t\t# Reshape the images and GTs to 4-dimensional so that they can get fed to the conv2d layer. (R&R)\n\t\t\t# The new shape has to be (batch_size, num_channels, img_dim1, img_dim2).\n\t\t\tif self.img_ch == 1:\n\t\t\t\timg = img[:, np.newaxis, :, :]\n\t\t\telse:\n\t\t\t\timg = img.transpose(1, 3); img = img.transpose(2, 3)\n\n\t\t\tif self.GT_ch == 1:\n\t\t\t\tGT = GT[:, np.newaxis, :, :]\n\t\t\telse:\n\t\t\t\tGT = GT.transpose(1, 3); GT = GT.transpose(2, 3)\n\n\t\t\t#plt.subplot(1,2,1); plt.imshow(img[0, 0, :, :].cpu().detach().numpy())\n\t\t\t#plt.subplot(1,2,2); plt.imshow(GT[0, 0, :, :].cpu().detach().numpy())\n\n\t\t\t# SR : Segmentation Result\n\t\t\tSR = torch.sigmoid(self.unet(img))\n \n\t\t\t# Flatten the prediction and target.\n\t\t\tSR_flat = SR.view(SR.size(0), -1)\n\t\t\tGT_flat = GT.view(GT.size(0), -1)\n\n\t\t\t# Compute test loss\n\t\t\tif self.loss_function_name == 'Dice':\n\t\t\t\ttest_loss = self.dice_coeff_loss(SR_flat, GT_flat)\n\t\t\telse:\n\t\t\t\ttest_loss = self.loss_function(SR_flat, GT_flat)\n\t\t\tnp_img = np.squeeze(SR.cpu().detach().numpy()) \n\n\t\t\t# extract filename from test folder\n\t\t\tfilename = test_GT_list[batch//self.patch_num][-15:-4]\n\n\t\t\tlength += 1\n\t\t\ttest_epoch_loss += test_loss.item()\n\n\t\t\tnp.save(self.current_prediction_path + save_folder + filename + '_' + str(batch % self.patch_num).zfill(2) + '_modelprediction' + '.npy', np_img)\n\n\t\t\tdel batch, img, GT, SR, GT_flat, SR_flat, test_loss\n\t\t\ttorch.cuda.empty_cache()\n\n\t\ttest_epoch_loss = test_epoch_loss/length\n\t\tprint('Model type: ', self.model_type, 'Test loss: ', test_epoch_loss)\n\n\t\t# Store the test result together with all other ones for comparison\n\t\t# If the file doesn't exist or is empty, write the header.\n\t\tif os.path.exists(os.path.join(self.test_result_comparison_path, 'test_result_comparison.csv')):\n\t\t\tdf = pd.read_csv(os.path.join(self.test_result_comparison_path, 'test_result_comparison.csv'))\n\t\t\tif df.empty:\n\t\t\t\twith open(os.path.join(self.test_result_comparison_path, 'test_result_comparison.csv'), 'a', encoding = 'utf-8', newline= '') as f:\n\t\t\t\t\twr = csv.writer(f)\n\t\t\t\t\twr.writerow(['Down Sample Factor', 'Model type', 'Optimizer', 'Initial learning rate', 'Loss function', 'Batch size', 'Best or last', 'Stop Epoch', 'Unet Layer', 'First Layer Kernel Number', 'Test loss'])\n\t\t\t\t\tf.close()\n\t\telse:\n\t\t\twith open(os.path.join(self.test_result_comparison_path, 'test_result_comparison.csv'), 'a', encoding = 'utf-8', newline= '') as f:\n\t\t\t\twr = csv.writer(f)\n\t\t\t\twr.writerow(['Down Sample Factor', 'Model type', 'Optimizer', 'Initial learning rate', 'Loss function', 'Batch size', 'Best or last', 'Stop Epoch', 'Unet Layer', 'First Layer Kernel Number', 'Test loss'])\n\t\t\t\tf.close()\n\n # Always fill in the test result for this trial.\n\t\twith open(os.path.join(self.test_result_comparison_path, 'test_result_comparison.csv'), 'a', encoding = 'utf-8', newline= '') as f:\n\t\t\twr = csv.writer(f)\n\t\t\twr.writerow([self.down_factor, self.model_type, self.optimizer_choice, self.initial_lr, self.loss_function_name, self.batch_size, which_unet, stop_epoch, self.UnetLayer, self.first_layer_numKernel, '%.6f' % test_epoch_loss])\n\t\t\tf.close()","sub_path":"Cell_segmentation/Prediction_only/deep_learning_model/.ipynb_checkpoints/solver-checkpoint.py","file_name":"solver-checkpoint.py","file_ext":"py","file_size_in_byte":19940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"472746248","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport json\n\nfrom .baseframe import BaseFrame\n\n\nclass PersonalityTestFrame(BaseFrame):\n def __init__(self, master=None, fname=None, path='./'):\n super().__init__(master)\n self.path = path\n self.fname = fname if fname is not None else 'personalitytest.json'\n\n self.qdata = self.load_json()\n self.title = self.qdata['タイトル']\n self.introduction = self.qdata['説明']\n self.scales = self.qdata['尺度']\n self.questions = self.qdata['質問']\n self.qnum = 1 # 1 ~ 12\n self.qstr = tk.StringVar(value=f'{self.questions[str(self.qnum)][\"question\"]} ({self.qnum}/12)')\n self.qans = [None] * 12\n\n self.create_widgets()\n\n def create_widgets(self):\n self.title_label = tk.Label(self, text=self.title, font=('', 30, 'bold'))\n self.title_label.pack(pady=(180,50))\n\n self.intro_label = tk.Label(self, text=self.introduction, font=('', 15, ''))\n self.intro_label.pack(pady=30)\n\n self.scale_frame = tk.Frame(self)\n for i in range(5, 0, -1):\n tk.Label(self.scale_frame, text=f'{i}.{self.scales[str(i)]}',\n font=('', 15, '')).pack(padx=20, side=tk.LEFT)\n self.scale_frame.pack()\n\n self.centerframe = tk.Frame(self)\n self.centerframe.pack(pady=(100, 10))\n\n self.question_label = tk.Label(self.centerframe, textvariable=self.qstr, font=('', 30, ''))\n self.question_label.pack(anchor=tk.N)\n\n self.radio_frame = tk.Frame(self.centerframe)\n self.radio_value = tk.IntVar()\n for i in range(5, 0, -1):\n tk.Radiobutton(self.radio_frame, text=i, value=i, font=('', 20, 'bold'),\n variable=self.radio_value,\n command=self.radio_clicked\n ).pack(padx=20, side=tk.LEFT)\n self.radio_frame.pack()\n self.back_button = tk.Button(self.centerframe, text='前の質問に戻る', \n font=('', 15, ''), command=self.previous_question)\n self.back_button.pack(side=tk.LEFT, pady=10)\n self.go_button = tk.Button(self.centerframe, text='次の質問に進む',\n font=('', 15, ''), command=self.next_question)\n self.go_button.pack(side=tk.RIGHT, pady=10)\n\n self.submit_button = tk.Button(self, text='提出する', font=('', 20, ''), command=self.submit)\n self.submit_button.pack(expand=True)\n return\n\n def radio_clicked(self):\n value = self.radio_value.get() # 1 ~ 5\n reverse = self.questions[str(self.qnum)]['reverse'] # true/false\n self.qans[self.qnum-1] = value if not reverse else 6 - value\n self.logger.info(self.qans)\n \n def next_question(self):\n if self.radio_value.get() == 0:\n messagebox.showwarning('Warning', '質問に回答してください')\n return \n if self.qnum < 12:\n self.qnum += 1\n self.qstr.set(f'{self.questions[str(self.qnum)][\"question\"]} ({self.qnum}/12)')\n if self.qans[self.qnum-1] == None: # 次に表示する質問に未回答のと��\n self.radio_value.set(0) # ラジオボタンのチェックを外す\n else: # 既に回答していた場合\n self.radio_value.set(self.qans[self.qnum-1]) #その回答のボタンにチェックをいれる\n def previous_question(self):\n if self.qnum > 1:\n self.qnum -= 1\n self.radio_value.set(self.qans[self.qnum-1])\n self.qstr.set(self.questions[str(self.qnum)]['question'])\n\n def submit(self):\n if None in self.qans:\n messagebox.showerror('Error', '全ての項目に回答してください')\n return\n if messagebox.askyesno('提出確認', '提出してよろしいですか?'):\n self.save()\n self.finish()\n\n def save(self):\n self.logger.info(f'saved answer: {self.qans}')\n score = self.calc_score()\n self.logger.info(f'score {score}')\n exp = self.explain(score)\n self.logger.info(f'explanation {exp}')\n\n if '.json' not in self.fname:\n self.fname += '.json'\n with open(self.path + self.fname, encoding='utf-8', mode='w') as f:\n json.dump({'回答':self.qans, 'スコア':score, '解釈':exp}, f, ensure_ascii=False, indent=4)\n\n def load_json(self):\n with open('./config/personality.json', 'r', encoding='utf-8') as f:\n jsondata = json.load(f)\n return jsondata\n\n def calc_score(self):\n scales = self.qdata['次元']\n score = {}\n for k, idxs in self.qdata['計算'].items():\n score[k] = sum([self.qans[i-1] for i in idxs])\n return score\n\n def explain(self, score:dict):\n exp = self.qdata['解釈']\n result = {}\n for k, v in score.items():\n for e, rng in exp[k].items():\n if v in rng:\n result[k] = e\n break\n return result","sub_path":"tasks/frames/personalitytest.py","file_name":"personalitytest.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"356932426","text":"from __future__ import division\nfrom builtins import hex\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nfrom past.utils import old_div\nimport numpy as np\nimport threading\nimport logging\nimport socket\nimport struct\n\n\nclass Spectra(object):\n \"\"\" REACH spectrometer data receiver \"\"\"\n\n def __init__(self, ip, port=4660, nof_signals=2, nof_channels=16384, floating_point=True):\n \"\"\" Class constructor:\n @param ip: IP address to bind receiver to \n @param port: Port to receive data on \"\"\"\n\n # Initialise parameters\n self._use_floating_point = floating_point\n self._nof_signals_per_fpga = nof_signals // 2\n self._nof_channels = nof_channels\n self._nof_signals = nof_signals\n self._port = port\n self._ip = ip\n\n # Create socket reference\n self._socket = None\n\n # Spectra containers\n data_type = np.double if self._use_floating_point else np.uint64\n self._data_reassembled = np.zeros((2, self._nof_signals_per_fpga * self._nof_channels), dtype=data_type)\n self._data_buffer = np.zeros((self._nof_signals, self._nof_channels), dtype=data_type)\n self._data_temporary_buffer = np.zeros((self._nof_signals, self._nof_channels), dtype=data_type)\n\n # Packet header content \n self._packet_counter = 0\n self._logical_channel_id = 0\n self._payload_length = 0\n self._sync_time = 0\n self._timestamp = 0\n self._lmc_mode = 0\n self._start_channel_id = 0\n self._start_antenna_id = 0\n self._buffer_id = 0\n self._offset = 9 * 8\n\n # Payload data parameters\n self._data_width = 64\n self._data_byte = self._data_width // 8\n self._bytes_per_packet = 1024\n self._words_per_packet = self._bytes_per_packet // self._data_width\n self._expected_nof_packets = (self._nof_signals * self._nof_channels * self._data_byte) // self._bytes_per_packet\n\n # Book keeping\n self._received_packets = 0\n self._previous_timestamp = 0\n\n # Received spectra placeholder\n self._receiver_thread = None\n self._received_spectra = None\n self._received_timestamps = None\n\n def initialise(self):\n \"\"\" Initilise socket and set local buffers \"\"\"\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self._socket.bind((self._ip, self._port))\n self._socket.settimeout(2)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2 * 1024 * 1024)\n\n def receive_spectrum(self):\n \"\"\" Wait for a spead packet to arrive \"\"\"\n\n # Clear receiver \n self._clear_receiver()\n\n # Check if receiver has been initialised\n if self._socket is None:\n logging.error(\"Spectrum receiver not initialised\")\n return\n\n # Loop until required to stop\n while True:\n # Try to acquire packet\n try:\n packet, _ = self._socket.recvfrom(9000)\n except socket.timeout:\n logging.info(\"Socket timeout\")\n continue\n\n # We have a packet, check if it is a valid packet\n if not self._decode_spead_header(packet):\n continue\n\n # Valid packet, extract payload and add to buffer\n unpack_type = 'd' if self._use_floating_point else 'q'\n payload = struct.unpack('<' + unpack_type * (self._payload_length // 8), packet[self._offset:])\n self._add_packet_to_buffer(payload)\n\n # If the buffer is full, finalize packet buffer\n if self._detect_full_buffer():\n self._finalise_buffer()\n return self._sync_time + self._timestamp * 32768 * 2.5e-9, self._data_buffer\n\n def _receive_spectra_threaded(self, nof_spectra=1):\n \"\"\" Receive specified number of thread, should run in a separate thread \"\"\"\n\n self._received_spectra = np.zeros((nof_spectra, self._nof_signals, self._nof_channels))\n self._received_timestamps = np.zeros((nof_spectra))\n for i in range(nof_spectra):\n self._received_timestamps[i], self._received_spectra[i] = self.receive_spectrum()\n\n def start_receiver(self, nof_spectra):\n \"\"\" Receive specified number of spectra \"\"\"\n\n # Create and start thread and wait for it to stop\n self._receiver_thread = threading.Thread(target=self._receive_spectra_threaded, args=(nof_spectra,))\n self._receiver_thread.start()\n\n def wait_for_receiver(self):\n \"\"\" Wait for receiver to finish \"\"\"\n if self._receiver_thread is None:\n logging.error(\"Receiver not started\")\n\n self._receiver_thread.join()\n\n # Return result\n return self._received_timestamps, self._received_spectra\n\n def _decode_spead_header(self, packet):\n \"\"\" Decode SPEAD packet header \n @param: Received packet header \"\"\"\n\n # Flag specifying whether packet is a valid SPEAD packet\n valid_packet = False\n\n # Unpack SPEAD header items\n try:\n items = struct.unpack('>' + 'Q' * 9, packet[0:8 * 9])\n except:\n logging.error(\"Error processing packet\")\n return False\n\n # Process all spead items\n for idx in range(len(items)):\n spead_item = items[idx]\n spead_id = spead_item >> 48\n val = spead_item & 0x0000FFFFFFFFFFFF\n if spead_id == 0x5304 and idx == 0:\n valid_packet = True\n elif spead_id == 0x8001:\n heap_counter = val\n self._packet_counter = heap_counter & 0xFFFFFF\n self._logical_channel_id = heap_counter >> 24\n elif spead_id == 0x8004:\n self._payload_length = val\n elif spead_id == 0x9027:\n self._sync_time = val\n elif spead_id == 0x9600:\n self._timestamp = val\n elif spead_id == 0xA004:\n self._lmc_mode = val & 0xEF\n # Check whether packet is floating point and how the spectra receiver is programmed\n if not ((val >> 7) & 0x1 and self._use_floating_point):\n logging.error(\"Firmware and spectra floating point settings do not match (firmware: {}, sw: {})\".format(\n \"on\" if (val >> 7) & 0x1 else \"off\",\n \"on\" if self._use_floating_point else \"off\"))\n return\n elif spead_id == 0xA002:\n self._start_channel_id = (val & 0x000000FFFF000000) >> 24\n self._start_antenna_id = (val & 0x000000000000FF00) >> 8\n elif spead_id == 0xA003 or spead_id == 0xA001:\n self._buffer_id = (val & 0xFFFFFFFF) >> 16\n elif spead_id == 0x3300:\n pass\n else:\n logging.error(\"Error in SPEAD header decoding!\")\n logging.error(\"Unexpected item {} at position {}\".format(hex(spead_item), \" at position \" + str(idx)))\n\n return valid_packet\n\n def _add_packet_to_buffer(self, data):\n \"\"\" Add packet content to buffer \"\"\"\n index = self._start_channel_id * self._nof_signals_per_fpga\n self._data_reassembled[self._start_antenna_id // self._nof_signals_per_fpga,\n index:index + self._payload_length // self._data_byte] = data\n self._received_packets += 1\n\n def _finalise_buffer(self):\n \"\"\" Demux and descramble buffer for persisting \"\"\"\n\n # De-multiplex buffer\n if self._nof_signals_per_fpga == 1:\n self._data_temporary_buffer[:] = self._data_reassembled\n else:\n for b in range(self._nof_signals_per_fpga):\n for n in range(self._nof_signals_per_fpga * self._nof_channels):\n self._data_temporary_buffer[(n % self._nof_signals_per_fpga) + self._nof_signals_per_fpga * b,\n n // self._nof_signals_per_fpga] = self._data_reassembled[b, n]\n\n # Descramble buffer\n if self._nof_signals_per_fpga != 1:\n for b in range(self._nof_signals):\n for n in range(self._nof_channels):\n if n % 2 == 0:\n channel = old_div(n, 2)\n else:\n channel = old_div(n, 2) + self._nof_channels // 2\n self._data_buffer[b, channel] = self._data_temporary_buffer[b, n]\n else:\n self._data_buffer[:] = self._data_temporary_buffer\n\n # Reverse bits if use floating point\n if self._use_floating_point:\n self._data_temporary_buffer[:] = 0\n for b in range(self._nof_signals):\n for n in range(self._nof_channels):\n # Perform reversal\n channel = self._reverse_bit(n)\n self._data_temporary_buffer[:][b, channel] = self._data_buffer[b, n]\n\n # Copy final buffer\n self._data_buffer[:] = self._data_temporary_buffer\n\n def _reverse_bit(self, num):\n step = int(np.log2(self._nof_channels))\n result = 0\n for n in range(step):\n result += (num & 1) << (step - n - 1)\n num >>= 1\n return result\n\n def _detect_full_buffer(self):\n \"\"\" Check whether we have a full buffer \"\"\"\n # Timestamp check\n if self._previous_timestamp != self._timestamp:\n self._received_packets = 1\n self._previous_timestamp = self._timestamp\n\n # If number of received packets is the expected number, return True, otherwise False\n if self._received_packets == self._expected_nof_packets:\n self._received_packets = 0\n return True\n else:\n return False\n\n def _clear_receiver(self):\n \"\"\" Reset receiver \"\"\"\n self._received_packets = 0\n self._previous_timestamp = 0\n\n self._data_buffer[:] = 0\n self._data_reassembled[:] = 0\n self._data_temporary_buffer[:] = 0\n\n\nif __name__ == \"__main__\":\n from optparse import OptionParser\n\n parser = OptionParser()\n parser.add_option(\"-p\", dest=\"port\", default=4660, type=int, help=\"UDP port (default:4660)\")\n parser.add_option(\"-i\", dest=\"ip\", default=\"10.0.10.40\", help=\"IP (default: 10.0.10.40)\")\n (config, args) = parser.parse_args()\n\n spectra = Spectra(ip=config.ip, port=config.port)\n spectra.initialise()\n spectrum = 10 * np.log10(spectra.receive_spectrum()[1])\n print(spectrum.shape)\n\n from matplotlib import pyplot as plt\n\n plt.plot(spectrum[0], label=\"Channel 0\")\n plt.plot(spectrum[1], label=\"Channel 1\")\n plt.legend()\n plt.show()\n","sub_path":"reach_ctrl/spectrometer/spectra.py","file_name":"spectra.py","file_ext":"py","file_size_in_byte":10774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"376661204","text":"print('Sequência de fribonacci!')\ntermos = int(input('Quantos termos vocÊ quer mostrar? '))\nt1 = 0\nt2 = 1\nprint(f'{t1} > {t2} ', end='')\ncont = 3\nwhile cont <= termos:\n t3 = t1 + t2\n print(f'> {t3} ', end='')\n t1 = t2\n t2 = t3\n cont += 1\nprint('FIM! ')","sub_path":"Curso_Python_MEGAREVISAO/aprendendo/exercicios/ex063.py","file_name":"ex063.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"512704513","text":"from argparse import ArgumentParser\nfrom pydub import AudioSegment\nimport multiprocessing\nimport os\nimport tensorflow as tf\n\n\ndef mp3_to_wav(filepath):\n try:\n audio_segment = AudioSegment.from_mp3(filepath)\n audio_segment.export('{}.wav'.format(filepath[:-4]), format='wav')\n except Exception:\n print(filepath, \" ---> mp3_to_wav failed\")\n pass\n os.remove(filepath)\n\n\ndef mp3_converter_job(mp3_filenames):\n for filename in mp3_filenames:\n if filename[-4:] != '.mp3':\n continue\n print(filename)\n mp3_to_wav(filename)\n\n\ndef main_preprocess(args):\n print('Converting all Common Voice MP3s to WAV...')\n clips_dir = os.path.join(args.data_dir, 'clips')\n all_clips = os.listdir(clips_dir)\n all_clips = [os.path.join(clips_dir, clip) for clip in all_clips]\n num_total = len(all_clips)\n num_cpus = multiprocessing.cpu_count()\n pool = multiprocessing.Pool(num_cpus)\n job_size = num_total // num_cpus\n\n jobs = []\n for _ in range(num_cpus - 1):\n jobs.append(all_clips[:job_size])\n all_clips = all_clips[job_size:]\n jobs.append(all_clips)\n\n pool.map_async(mp3_converter_job, jobs)\n pool.close()\n pool.join()\n print('Removing missing files...')\n return\n\n\ndef read_file_test(filepath):\n try:\n audio_raw = tf.io.read_file(filepath)\n audio, sr = tf.audio.decode_wav(audio_raw)\n except:\n return False\n return True\n\n\ndef remove_missing(data_dir, fname, cnt_list):\n clips_dir = os.path.join(data_dir, 'clips')\n\n old_filepath = os.path.join(data_dir, '{}.tsv'.format(fname))\n new_filepath = os.path.join(data_dir, '{}-tmp.tsv'.format(fname))\n\n with open(old_filepath, 'r') as old_f:\n with open(new_filepath, 'w') as new_f:\n new_f.write(next(old_f))\n for line in old_f:\n audio_fn = line.split('\\t')[1][:-4] + '.wav'\n cur_wav_file = os.path.join(clips_dir, audio_fn)\n if os.path.exists(cur_wav_file):\n cnt_list[0] += 1\n new_f.write(line)\n # if read_file_test(cur_wav_file):\n # cnt_list[0] += 1\n # new_f.write(line)\n # else:\n # print(cur_wav_file, \" ----> read wav failed.\")\n # os.remove(cur_wav_file)\n else:\n print(audio_fn, \" don't exist\")\n\n os.remove(old_filepath)\n os.rename(new_filepath, old_filepath)\n\n\ndef check_file(args):\n tsv_files = ['dev', 'invalidated', 'other', 'test', 'train', 'validated']\n cnt_list = [0]\n for _file in tsv_files:\n remove_missing(args.data_dir, _file, cnt_list)\n print(\"cnt = %d \" % cnt_list[0]) # 896452\n print(\"all cnt = %d \" % cnt_list[0]) # 896452\n print('remove_missing Done.')\n\n\n\nif __name__ == '__main__':\n ap = ArgumentParser()\n ap.add_argument('--data_dir', type=str, default='../data/en', help='Path to common voice data directory.')\n args = ap.parse_args()\n # main_preprocess(args)\n check_file(args)\n","sub_path":"preprocess_common_voice.py","file_name":"preprocess_common_voice.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"126770967","text":"#!/usr/bin/env python\n\"\"\" \"\"\"\n\n# Standard library modules.\nimport unittest\nimport logging\nimport os\nimport sys\nimport subprocess\n\n# Third party modules.\n\n# Local modules.\nfrom pymontecarlo.testcase import TestCase\nfrom pymontecarlo.util.path import get_config_dir\nfrom pymontecarlo._settings import Settings\n\n# Globals and constants variables.\n\nclass Test__main__(TestCase):\n\n def setUp(self):\n super().setUp()\n\n # Exchange settings.h5\n self.filepath = os.path.join(get_config_dir(), Settings.DEFAULT_FILENAME)\n\n if os.path.exists(self.filepath):\n self.filepath_backup = self.filepath + '.backup'\n os.rename(self.filepath, self.filepath_backup)\n else:\n self.filepath_backup = None\n\n settings = Settings()\n settings.write(self.filepath)\n\n def tearDown(self):\n super().tearDown()\n\n os.remove(self.filepath)\n\n if self.filepath_backup:\n os.rename(self.filepath_backup, self.filepath)\n\n def testprograms(self):\n args = [sys.executable, '-m', 'pymontecarlo', '--programs']\n process = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = process.stderr.decode('ascii')\n self.assertEqual('Program', out[:7])\n\nif __name__ == '__main__': #pragma: no cover\n logging.getLogger().setLevel(logging.DEBUG)\n unittest.main()\n","sub_path":"pymontecarlo/test__main__.py","file_name":"test__main__.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"267475023","text":"from django.conf.urls import patterns, url\n\nfrom zap_apps.zap_analytics import views\n\nurlpatterns = [\n url(r'^(?Pproduct | user)(/?)$', views.Analytics.as_view()),\n url(r'^recently_viewed_products/$', views.RecentProductViewed.as_view()),\n url(r'^initiate_analytics_session/$', views.InitiateAnalyticsSession.as_view()),\n url(r'^end_analytics_session/$', views.EndAnalyticsSession.as_view()),\n url(r'^track_analytics_events/$', views.TrackAnalyticsEvents.as_view()),\n url(r'^seller_analytics/$', views.SellerAnalytics.as_view()),\n url(r'^download_seller_analytics/$', views.DownloadSellerAnalytics.as_view()),\n]","sub_path":"zap_apps/zap_analytics/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"587706159","text":"\"\"\"\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom energy_demand.plotting import basic_plot_functions\nfrom energy_demand.plotting import plotting_styles\nfrom energy_demand.basic import conversions\n\ndef run(results, lookups, fig_name, plotshow=False):\n \"\"\"Plot lines with total energy demand for all enduses\n per fueltype over the simluation period. Annual GWh\n are converted into GW.\n\n Arguments\n ---------\n results : dict\n Results for every year and fueltype (yh)\n lookups : dict\n Lookup fueltypes\n fig_name : str\n Figure name\n\n Note\n ----\n Values are divided by 1'000\n \"\"\"\n print(\"... plot fuel per fueltype for whole country over annual timesteps\")\n\n # Set figure size\n plt.figure(figsize=basic_plot_functions.cm2inch(14, 8))\n\n # Initialise (number of enduses, number of hours to plot)\n y_values_fueltype = {}\n\n for fueltype_str, fueltype_int in lookups['fueltypes'].items():\n\n # Read out fueltype specific max h load\n data_years = {}\n for year, data_year in results.items():\n tot_gwh_fueltype_y = np.sum(data_year[fueltype_int])\n\n #Conversion: Convert gwh per years to gw\n yearly_sum_gw = tot_gwh_fueltype_y\n\n yearly_sum_twh = conversions.gwh_to_twh(yearly_sum_gw)\n\n data_years[year] = yearly_sum_twh #yearly_sum_gw\n\n y_values_fueltype[fueltype_str] = data_years\n\n # -----------------\n # Axis\n # -----------------\n base_yr, year_interval = 2015, 5\n end_yr = list(results.keys())\n\n major_ticks = np.arange(\n base_yr,\n end_yr[-1] + year_interval,\n year_interval)\n\n plt.xticks(major_ticks, major_ticks)\n\n # ----------\n # Plot lines\n # ----------\n color_list_selection = plotting_styles.color_list_selection()\n\n for fueltype_str, fuel_fueltype_yrs in y_values_fueltype.items():\n\n if len(np.array(list(fuel_fueltype_yrs.values()))) > 2:\n smooth_x_line_data, smooth_y_line_data = basic_plot_functions.smooth_line(\n np.array(list(fuel_fueltype_yrs.keys())),\n np.array(list(fuel_fueltype_yrs.values())))\n else:\n smooth_x_line_data = list(fuel_fueltype_yrs.keys())\n smooth_y_line_data = list(fuel_fueltype_yrs.values())\n\n plt.plot(\n smooth_x_line_data, # years\n smooth_y_line_data, # yearly data per fueltype\n color=str(color_list_selection.pop()),\n label=fueltype_str)\n\n # ----\n # Axis\n # ----\n plt.ylim(ymin=0) #no upper limit to xmax\n\n # ------------\n # Plot legend\n # ------------\n plt.legend(\n ncol=2,\n loc=2,\n prop={\n 'family': 'arial',\n 'size': 10},\n frameon=False)\n\n # ---------\n # Labels\n # ---------\n plt.ylabel(\"TWh\")\n plt.xlabel(\"year\")\n plt.title(\"tot annual ED per fueltype\")\n\n # Tight layout\n plt.tight_layout()\n plt.margins(x=0)\n\n plt.savefig(fig_name)\n\n if plotshow:\n plt.show()\n plt.close()\n else:\n plt.close()\n ","sub_path":"energy_demand/plotting/fig_fuels_enduses_y.py","file_name":"fig_fuels_enduses_y.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"281997307","text":"import numpy as np\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import GridSearchCV\n\ndef run_grid_search(X_train, y_train, features=None):\n\n\n log_params = {\n 'class_weight':['balanced', None],\n 'penalty':['l2', 'l1'] }\n\n svm_params = {\n 'kernel':['rbf', 'poly'],\n 'C':[.5, 1, 1.5] }\n\n hyperparams = [rf,tfidf]\n models = [RandomForestClassifier(), TfidfVectorizer()]\n\n if features and ('RandomForestClassifier' not in [m.__class__.__name__ for m in models]):\n models.append(RandomForestClassifier())\n hyperparams.append({'n_estimators':[500]})\n","sub_path":"pipeline/grid_search.py","file_name":"grid_search.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"511856193","text":"import logging\n\nfrom pymongo import MongoClient\n\nfrom src import config\nfrom src.mongo_adapter.MongoDatabaseWrapper import MongoDatabaseWrapper\n\n\n################################################################################\n# class: MongoClientSingleton\n################################################################################\n\nclass MongoClientSingleton :\n\n ################################################################################\n # class: __MongoClientSingleton\n ################################################################################\n\n class __MongoClientSingleton :\n\n ### function: __init__ ###\n\n def __init__ (self) :\n try :\n self.connection = MongoClient (config.MONGO_URL)\n\n except Exception as exc :\n logging.error (\"__MongoClientSingleton: __init__: Error connecting to \" + config.MONGO_URL)\n logging.error (\"[Exception: \" + str (exc) + \"]\")\n\n\n ### function: getDatabase ###\n\n def getDatabase (self, databaseName) :\n try:\n return MongoDatabaseWrapper (self.connection.get_database (databaseName))\n\n except Exception as exc :\n logging.error (\"__MongoClientSingleton: getDatabase: Error getting database: '\" + databaseName + \"'\")\n logging.error (\"[Exception: \" + str (exc) + \"]\")\n\n\n ### function: getCollection ###\n\n def getCollection (self, collectionName, databaseName = config.MONGO_DATABASE) :\n return self.getDatabase (databaseName).getCollection (collectionName)\n\n\n ### function: close ###\n\n def close (self) :\n try:\n self.connection.close ()\n\n except Exception as exc:\n logging.error (\"__MongoClientSingleton: Error closing connection\")\n logging.error (\"[Exception: \" + str (exc) + \"]\")\n\n\n instance = None\n\n\n ### function: __new__ ###\n\n def __new__ (cls) :\n try :\n if MongoClientSingleton.instance is None :\n MongoClientSingleton.instance = MongoClientSingleton.__MongoClientSingleton ()\n\n return MongoClientSingleton.instance\n\n except Exception as exc :\n logging.error (\"MongoClientSingleton: __new__: Error creating object '__MongoClientSingleton'\")\n logging.error (\"[Exception: \" + str (exc) + \"]\")","sub_path":"src/mongo_adapter/MongoClientSingleton.py","file_name":"MongoClientSingleton.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"646060069","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# 구글 뉴스 클리핑\ndef main():\n url = 'https://news.google.com/search?q=%ED%8C%8C%EC%9D%B4%EC%8D%AC&hl=ko&gl=KR&ceid=KR%3Ako'\n\n res = requests.get(url)\n soup = BeautifulSoup(res.content, 'html.parser')\n\n news_clipping = data_extract(soup)\n for news_section in news_clipping:\n for k, v in news_section.items():\n print(\"{} : {}\".format(k, v))\n\n\ndef data_extract(soup):\n # 뉴스영역 가져오기\n section = soup.select('div.xrnccd > article')\n\n # 각 섹션에서 링크, 제목, 내용, 출처, 등록일시 추출\n news = []\n news_item = {}\n base_url = 'https://news.google.com'\n\n for item in section:\n # 링크와 제목 태그 가져오기\n link_title = item.select_one(\"h3 a\")\n # 뉴스 기사 링크 추출\n news_item['주소'] = base_url + link_title['href'][1:]\n # 제목 추출\n news_item['제목'] = link_title.get_text()\n # 내용 추출\n news_item['내용'] = item.select_one('div > span').get_text()\n # 작성자\n news_item['작성자'] = item.select_one('div > div > a').get_text()\n # 작성일자\n news_item['작성일자_시간'] = item.select(\"div > div > time\")\n # 작성일자와 시간이 없는 뉴스기사가 존재\n if news_item['작성일자_시간']:\n news_item['작성일자_시간'] = (news_item['작성일자_시간'][0])['datetime'].split('T')\n news_item['작성일자'] = news_item['작성일자_시간'][0]\n news_item['작성시간'] = news_item['작성일자_시간'][1][:-1]\n else:\n news_item['작성일자'] = \"\"\n news_item['작성시간'] = \"\"\n\n # dict 구조로 만든 뉴스기사에 대한 정보를 리스트에 추가\n news.append(news_item)\n\n news_item = {}\n\n return news\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"beautifulsoup/beautifulsoup17-1.py","file_name":"beautifulsoup17-1.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"17592125","text":"import string\nimport numpy as np\n\ndef counter(s):\n Length = len(s)\n hist_current = np.zeros(26,dtype='int64')\n i = 0#文字数カウンタ\n while i < Length:\n if s[i] in integers:#数字の場合\n figure = i+1\n while s[figure] in integers:\n figure += 1\n \n repeat_num = int(s[i:figure])\n i = figure\n \n if s[i] == '(':#数字の次が(の場合\n leftp, rightp = 1,0\n point = i+1\n while leftp != rightp:\n if s[point] =='(':\n leftp += 1\n elif s[point] == ')':\n rightp += 1\n point += 1\n \n hist_current += repeat_num * counter(s[i+1:point-1])\n i = point\n continue\n \n else:#数字のつぎがアルファベットの場合\n hist_current[hash[s[i]]] += repeat_num\n i += 1\n continue\n \n elif s[i] == '(':\n leftp, rightp = 1,0\n point = i+1\n while leftp != rightp:\n if s[point] =='(':\n leftp += 1\n elif s[point] == ')':\n rightp += 1\n point += 1\n \n hist_current += counter(s[i+1:point-1])\n i = point\n continue\n \n else:#アルファベットの場合\n hist_current[hash[s[i]]] += 1\n i += 1\n continue\n \n return hist_current\n \nS = input()\nalpha = string.ascii_lowercase#a~zの作成\nintegers = set([str(i) for i in range(10)])#一桁の整数の文字の作成\nhash = {k : v for k, v in zip(alpha, range(26))}#ハッシュの作成 検索用\n\nhistogram = counter(S)\nfor k, v in zip(alpha, histogram):\n print('{} {}'.format(k,v))","sub_path":"python/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"206277943","text":"# -----------------------------------------------------------------------------\n# Copyright (c) 2014--, The Qiita Development Team.\n#\n# Distributed under the terms of the BSD 3-clause License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# -----------------------------------------------------------------------------\n\nfrom os import mkdir\nfrom os.path import basename, join, exists\n\nfrom future.utils import viewitems\nfrom functools import partial\nfrom subprocess import Popen\nfrom contextlib import contextmanager\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\nfrom gzip import open as gzopen\n\nfrom qiita_client import ArtifactInfo\nfrom qiita_client.util import system_call, get_sample_names_by_run_prefix\n\n\n@contextmanager\ndef make_temp_directory(out_dir):\n temp_dir = mkdtemp(dir=out_dir)\n try:\n yield temp_dir\n finally:\n rmtree(temp_dir)\n\n\ndef make_read_sets_per_sample(files, map_file):\n \"\"\"Recovers read set information from kneaddata output\n\n Parameters\n ----------\n files : list of str\n The list of sequence filepaths from the kneaddata artifact\n map_file : str\n The path to the mapping file\n\n Returns\n -------\n read_sets: list of tup\n list of 7-tuples with run prefix, sample name, fwd paired read fp,\n rev paired read fp, fwd unpaired read fp, rev unpaired read fp, and\n single fwd read fp.\n\n Raises\n ------\n ValueError\n If there are files matching the kneaddata paired file naming convention\n (_paired_1.fastq.gz, _unmatched_1.fastq.gz) but which don't have all 4\n outputs.\n ValueError\n If there are files matching the kneaddata paired file naming convention\n (_paired_1.fastq.gz, _unmatched_1.fastq.gz) in addition to fastq.gz\n files that do not match naming convention (the latter are interpreted\n as single read files).\n ValueError\n If there are no *.fastq.gz files in the artifact\n\n Notes\n -----\n \"\"\"\n\n # sort through the filenames and bin into sequence type lists\n fwd_paired = []\n fwd_unpaired = []\n rev_paired = []\n rev_unpaired = []\n single = []\n\n for fp in files:\n if fp.endswith('_paired_1.fastq.gz'):\n fwd_paired.append(fp)\n elif fp.endswith('_paired_2.fastq.gz'):\n rev_paired.append(fp)\n elif fp.endswith('_unmatched_1.fastq.gz'):\n fwd_unpaired.append(fp)\n elif fp.endswith('_unmatched_2.fastq.gz'):\n rev_unpaired.append(fp)\n elif fp.endswith('.fastq.gz'):\n single.append(fp)\n\n # check that seq lists are same len\n if not (len(fwd_paired) == len(fwd_unpaired) ==\n len(rev_paired) == len(rev_unpaired)):\n raise ValueError('There are not equal numbers of forward paired, '\n 'forward unpaired, reverse paired, and reverse '\n 'unpaired sequences.')\n\n # check that there aren't both paired and single sequences\n if len(single) > 0 and len(fwd_paired) > 0:\n raise ValueError('There are both paired-end and single-end sequences.')\n\n # fill out unused seq file types with None and check that there exist files\n if len(fwd_paired) > 0:\n single = [None] * len(fwd_paired)\n elif len(single) > 0:\n fwd_paired = [None] * len(single)\n fwd_unpaired = [None] * len(single)\n rev_paired = [None] * len(single)\n rev_unpaired = [None] * len(single)\n else:\n raise ValueError('There are no *.fastq.gz files in the artifact')\n\n # make the 5-tuple of sorted sequence filepaths\n fwd_paired.sort()\n rev_paired.sort()\n fwd_unpaired.sort()\n rev_unpaired.sort()\n single.sort()\n\n seq_files = zip(fwd_paired, rev_paired, fwd_unpaired, rev_unpaired, single)\n\n # get run prefixes\n # These are prefixes that should match uniquely to forward reads\n # sn_by_rp is dict of samples keyed by run prefixes\n sn_by_rp = get_sample_names_by_run_prefix(map_file)\n\n # make sets\n read_sets = []\n used_prefixes = set()\n\n for f_p, r_p, f_u, r_u, s in seq_files:\n # pick file basename\n if f_p is None:\n fn = basename(s)\n else:\n fn = basename(f_p)\n\n # iterate over run prefixes and make sure only one matches\n run_prefix = None\n for rp in sn_by_rp:\n if fn.startswith(rp) and run_prefix is None:\n run_prefix = rp\n elif fn.startswith(rp) and run_prefix is not None:\n raise ValueError('Multiple run prefixes match this '\n 'file: %s\\n\\n' % fn)\n\n # make sure that we got one matching run prefix:\n if run_prefix is None:\n raise ValueError('No run prefix matching this read file: '\n '%s\\n\\n' % fn)\n\n if run_prefix in used_prefixes:\n raise ValueError('This run prefix matches multiple read '\n ' files: %s\\n\\n' % run_prefix)\n\n # if paired, check that all files match run prefix\n if s is None:\n if not (basename(r_p).startswith(run_prefix) and\n basename(f_u).startswith(run_prefix) and\n basename(r_u).startswith(run_prefix)):\n raise ValueError('Not all read files match run prefix.'\n '\\nRun prefix: %s\\nForward paired: '\n '%s\\nReverse paired: %s\\nForward '\n 'unpaired: %s\\nReverse unpaired: %s\\n'\n % (run_prefix, f_p, r_p, f_u, r_u))\n\n read_sets.append((run_prefix, sn_by_rp[run_prefix], f_p, r_p,\n f_u, r_u, s))\n\n used_prefixes.add(run_prefix)\n\n return(read_sets)\n\n\ndef make_single_fastq_gz(read_sets, out_dir, include_reverse):\n \"\"\"Recovers read set information from kneaddata output\n\n Parameters\n ----------\n read_sets: list of tup\n list of 7-tuples with run prefix, sample name, fwd paired read fp,\n rev paired read fp, fwd unpaired read fp, rev unpaired read fp, and\n single fwd read fp.\n out_dir : str\n The path to a directory in which to write files\n include_reverse : bool\n Whether to include reverse sequences in combined file\n\n Returns\n -------\n combined_reads: list of tup\n list of 3-tuples with run prefix, sample name, combined gzip fastq\n\n Raises\n ------\n OSError\n If the Popen process call to cat returns with value other than 0\n\n Notes\n -----\n If all input files are empty for a sample, will not output that sample in\n the `sample` list.\n \"\"\"\n combined_reads = []\n for run_prefix, sample, f_p, r_p, f_u, r_u, s in read_sets:\n out_fp = join(out_dir, '%s.fastq.gz' % run_prefix)\n\n if s is None:\n if include_reverse:\n cmd = 'cat %s %s %s %s > %s' % (f_p, r_p, f_u, r_u, out_fp)\n else:\n cmd = 'cat %s %s > %s' % (f_p, f_u, out_fp)\n else:\n cmd = 'cat %s > %s' % (s, out_fp)\n\n proc = Popen(cmd, shell=True)\n\n failure = proc.wait()\n\n if failure != 0:\n raise OSError('Problem with cat of files: %s' % cmd)\n\n # Check to make sure that the combined gzip is not totally empty\n with gzopen(out_fp, 'rb') as f:\n data = f.read(1).strip()\n\n if data:\n combined_reads.append((run_prefix, sample, out_fp))\n\n return(combined_reads)\n\n\ndef generate_humann2_analysis_commands(combined_reads, out_dir, parameters):\n \"\"\"Generates the HUMAnN2 commands\n\n Parameters\n ----------\n combined_reads: list of tup\n list of 3-tuples with run prefix, sample name, combined gzip fastq\n out_dir : str\n The job output directory\n parameters : dict\n The command's parameters, keyed by parameter name\n\n Returns\n -------\n list of str\n The HUMAnN2 commands\n\n Raises\n ------\n\n Notes\n -----\n \"\"\"\n cmds = []\n params = []\n for k, v in viewitems(parameters):\n if v is False or v in ['False', 'default', '']:\n continue\n if v is True or v == 'True':\n params.append('--%s' % k)\n else:\n params.append('--%s \"%s\"' % (k, v))\n\n # sort params to enable unit testing\n params.sort()\n\n for run_prefix, sample, fp in combined_reads:\n od = join(out_dir, run_prefix)\n # just making sure the output directory exists\n if not exists(od):\n mkdir(od)\n cmds.append('humann2 --input \"%s\" --output \"%s\" --output-basename '\n '\"%s\" --output-format biom %s' % (fp, od, sample,\n ' '.join(params)))\n\n return cmds\n\n\ndef _run_commands(qclient, job_id, commands, msg):\n for i, cmd in enumerate(commands):\n qclient.update_job_step(job_id, msg % i)\n std_out, std_err, return_value = system_call(cmd)\n if return_value != 0:\n error_msg = (\"Error running HUMANn2:\\nStd out: %s\\nStd err: %s\"\n % (std_out, std_err))\n return False, error_msg\n\n return True, \"\"\n\n\ndef humann2(qclient, job_id, parameters, out_dir):\n \"\"\"Run humann2 with the given parameters\n\n Parameters\n ----------\n qclient : qiita_client.QiitaClient\n The Qiita server client\n job_id : str\n The job id\n parameters : dict\n The parameter values to run HUMAnN2\n out_dir : str\n The path to the job's output directory\n\n Returns\n -------\n boolean, list, str\n The results of the job\n \"\"\"\n # Step 1 get the rest of the information need to run humann2\n qclient.update_job_step(job_id, \"Step 1 of 6: Collecting information\")\n artifact_id = parameters['input']\n # removing input from parameters so it's not part of the final command\n del parameters['input']\n\n # Get the artifact filepath information\n artifact_info = qclient.get(\"/qiita_db/artifacts/%s/\" % artifact_id)\n fps = artifact_info['files']\n\n # Get the artifact metadata\n prep_info = qclient.get('/qiita_db/prep_template/%s/'\n % artifact_info['prep_information'][0])\n qiime_map = prep_info['qiime-map']\n\n # Get the read set information\n read_sets = make_read_sets_per_sample(fps['preprocessed_fastq'], qiime_map)\n\n # Generate the per-sample combined gzip\n with make_temp_directory(out_dir) as temp_dir:\n read_set = parameters.pop('read-set')\n\n if read_set == 'fwd_rev':\n include_reverse = True\n elif read_set == 'fwd':\n include_reverse = False\n\n combined_reads = make_single_fastq_gz(read_sets, temp_dir,\n include_reverse)\n\n # Step 2 generating command humann2\n qclient.update_job_step(job_id,\n \"Step 2 of 6: Generating HUMANn2 command\")\n\n commands = generate_humann2_analysis_commands(combined_reads, out_dir,\n parameters)\n\n # Step 3 execute humann2\n msg = (\"Step 3 of 6: Executing HUMANn2 job \"\n \"(%d/{0})\".format(len(commands)))\n success, msg = _run_commands(qclient, job_id, commands, msg)\n if not success:\n return False, None, msg\n\n # Step 4 merge tables\n commands = []\n commands.append(('humann2_join_tables -i {0} -o {0}/genefamilies.biom '\n '--file_name genefamilies --search-subdirectories '\n '--verbose').format(out_dir))\n commands.append(('humann2_join_tables -i {0} -o {0}/pathcoverage.biom '\n '--file_name pathcoverage --search-subdirectories '\n '--verbose').format(out_dir))\n commands.append(('humann2_join_tables -i {0} -o {0}/pathabundance.biom '\n '--file_name pathabundance --search-subdirectories '\n '--verbose').format(out_dir))\n msg = \"Step 4 of 6: Merging resulting tables job (%d/3)\"\n success, msg = _run_commands(qclient, job_id, commands, msg)\n if not success:\n return False, None, msg\n\n # Step 5 generating re-normalized tables\n commands = []\n commands.append(('humann2_renorm_table -i {0}/genefamilies.biom -u cpm '\n '-o {0}/genefamilies_cpm.biom').format(out_dir))\n commands.append(('humann2_renorm_table -i {0}/pathcoverage.biom -u relab '\n '-o {0}/pathcoverage_relab.biom').format(out_dir))\n commands.append(('humann2_renorm_table -i {0}/pathabundance.biom -u relab '\n '-o {0}/pathabundance_relab.biom').format(out_dir))\n msg = \"Step 5 of 6: Re-normalizing tables (%d/3)\"\n success, msg = _run_commands(qclient, job_id, commands, msg)\n if not success:\n return False, None, msg\n\n # Step 6 stratifiying re-normalized tables\n commands = []\n pb = partial(join, out_dir)\n cmd = \"humann2_split_stratified_table --input %s --output %s\"\n commands.append(cmd % (pb(out_dir, 'genefamilies_cpm.biom'), out_dir))\n commands.append(cmd % (pb(out_dir, 'pathcoverage_relab.biom'), out_dir))\n commands.append(cmd % (pb(out_dir, 'pathabundance_relab.biom'), out_dir))\n msg = \"Step 6 of 6: Stratifiying re-normalizing tables (%d/3)\"\n success, msg = _run_commands(qclient, job_id, commands, msg)\n if not success:\n return False, None, msg\n\n # Generating 6 artifacts, separation is important for analysis\n ainfo = [\n ArtifactInfo('Gene family table', 'BIOM',\n [(pb('genefamilies.biom'), 'biom')]),\n ArtifactInfo('Path coverage table', 'BIOM',\n [(pb('pathcoverage.biom'), 'biom')]),\n ArtifactInfo('Path abundance table', 'BIOM',\n [(pb('pathabundance.biom'), 'biom')]),\n ArtifactInfo('Gene family CMP table', 'BIOM',\n [(pb('genefamilies_cpm.biom'), 'biom')]),\n ArtifactInfo('Path coverage RELAB table', 'BIOM',\n [(pb('pathcoverage_relab.biom'), 'biom')]),\n ArtifactInfo('Path abundance RELAB table', 'BIOM',\n [(pb('pathabundance_relab.biom'), 'biom')]),\n ArtifactInfo('Gene family CMP table - stratified', 'BIOM',\n [(pb('genefamilies_cpm_stratified.biom'), 'biom')]),\n ArtifactInfo('Path coverage RELAB table - stratified', 'BIOM',\n [(pb('pathcoverage_relab_stratified.biom'), 'biom')]),\n ArtifactInfo('Path abundance RELAB table - stratified', 'BIOM',\n [(pb('pathabundance_relab_stratified.biom'), 'biom')]),\n ArtifactInfo('Gene family CMP table - unstratified', 'BIOM',\n [(pb('genefamilies_cpm_unstratified.biom'), 'biom')]),\n ArtifactInfo('Path coverage RELAB table - unstratified', 'BIOM',\n [(pb('pathcoverage_relab_unstratified.biom'), 'biom')]),\n ArtifactInfo('Path abundance RELAB table - unstratified', 'BIOM',\n [(pb('pathabundance_relab_unstratified.biom'), 'biom')])]\n\n return True, ainfo, \"\"\n","sub_path":"qp_shotgun/humann2/humann2.py","file_name":"humann2.py","file_ext":"py","file_size_in_byte":15208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"239934980","text":"import sqlite3\n\nconn = sqlite3.connect('database.db')\n\nprint(\"Opened database successfully\")\n\n#\n# conn.execute('''CREATE TABLE COMPANY\n# (ID INT PRIMARY KEY NOT NULL,\n# NAME TEXT NOT NULL,\n# AGE INT NOT NULL,\n# ADDRESS CHAR(50),\n# SALARY REAL);''')\n#\n# conn.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n# VALUES (1, 'Paul', 32, 'California', 20000.00 )\")\n#\n# conn.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n# VALUES (2, 'Allen', 25, 'Texas', 15000.00 )\")\n#\n# conn.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n# VALUES (3, 'Teddy', 23, 'Norway', 20000.00 )\")\n#\n# conn.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n# VALUES (4, 'Mark', 25, 'Rich-Mond ', 65000.00 )\")\n#\n#conn.commit()\n\nres = conn.execute(\"\"\"\n SELECT t.Name FROM tracks t\n JOIN genres g ON t.GenreID = g.GenreId \n WHERE g.Name = \"Rock\" \n ORDER BY t.Milliseconds DESC\n LIMIT 10\n \"\"\")\n\nfor row in res:\n print(row)\n","sub_path":"s09/user_sqlite/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"326717344","text":"import time\nimport pytest\n\nfrom main_page import MainPage\nfrom base_page import BasePage\nfrom basket_page import BasketPage\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\n\nlink = \"http://media.test.itass.local/customers\"\n\n# @pytest.fixture(scope=\"function\")\n# def browser():\n# print(\"\\nstart browser for test..\")\n# browser = webdriver.Chrome()\n# # return browser\n# yield browser\n# print(\"\\nquit browser..\")\n# browser.quit()\n\n\n# class TestMainPage1():\n# вызываем фикстуру в тесте, передав ее как параметр\n# создание внутреннего клиента\n@pytest.mark.parametrize('flag, links, buttons, selector, value, selector1, value1, selector2, value2, selector3, value3, button_save',\n [(\"client\", \"http://media.test.itass.local/clients/customers\", \"#root > div > div > div > header > a > button\", \"#name\", \"test1@yandex.ru\", \"#email\", \"teывап2@yanывапdex.ru\", \"#phone1\", \"test3@yandex.ru\", \"#site\", \"test4@yandex.ru\", \"#root > div > div > div > div.MDTTD808z7GuhtYhNCiyx > form > div.IKVnscL2xZb4_HAppSRik > div > div > button._1JPTNwXTDV_vLByphy1l-O.M9c1UcWhIvzGEP-ZtlSLa._2bMXaBwkLZj8rpG7EdgcRt\"),\n (\"user\", \"http://media.test.itass.local/clients/users\", \"#root > div > div > div > header > button\", \"#fullName\", \"ц@ук-ецук ---@--\", \"#email\", \"teывап2@yanывапdex.r\", \"#fullName\", \"цук-ецук -----\", \"#email\", \"teывап2@yanывапdex.r\", \"#user-creating > div > div > div > button._1JPTNwXTDV_vLByphy1l-O.M9c1UcWhIvzGEP-ZtlSLa._2bMXaBwkLZj8rpG7EdgcRt\"),\n (\"prod\", \"http://media.test.itass.local/clients/customers/106/products\", \"#root > div > div > div > div.MDTTD808z7GuhtYhNCiyx > div._3FtI9K_rYuNqNUawu1Ow7K._2dqkqLWSBZAvRV2i3XPaE1 > header > button\", \"#name\", \"test3@yandex.ru№\", \"#mediaRegistrationCertificate\", \"test3@yandex.r№\", \"#circulation\", \"№city№\", \"#name\", \"test3@yandex.ru\", \"#root > div > div > div > div.MDTTD808z7GuhtYhNCiyx > div.IKVnscL2xZb4_HAppSRik > div > div > button._1JPTNwXTDV_vLByphy1l-O.M9c1UcWhIvzGEP-ZtlSLa._2bMXaBwkLZj8rpG7EdgcRt\"),\n (\"\", \"\", \"\", \"#site\", \"test4@yandex.ru\", 79992244333, \"test4@yandex.r\", 12345, \"city\", 2020202024, 222444884, \"\")])\ndef test_guest(browser, flag, links, buttons, selector, value, selector1, value1, selector2, value2, selector3, value3, button_save):\n # browser.get(link)\n # time.sleep(2)\n # browser.find_element_by_id(\"details-button\").click()\n # browser.find_element_by_id(\"proceed-link\").click()\n # # driver.get(\"http://media.test.itass.local\")\n # browser.find_element_by_id(\"userNameInput\").send_keys(\"ext_kolzin_a\")\n # time.sleep(1)\n # browser.find_element_by_id(\"passwordInput\").send_keys(\"Overlor1\")\n # browser.find_element_by_id(\"submitButton\").click()\n\n page = MainPage(browser, links) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес\n page.open()\n\n time.sleep(2)\n input = browser.find_element_by_css_selector(buttons)\n input.click()\n time.sleep(1)\n\n base_page = BasePage(browser, browser.current_url)\n mass_selector = [selector, selector1, selector2, selector3]\n mass_value = [value, value1, value2, value3]\n\n # input = browser.find_element(By.XPATH, \"// *[ @ id = 'root'] / div / div[3] / form / article[1] / section / div / div[1] / label / div / div\")\n # input.click()\n\n if flag == \"client\":\n input = browser.find_element(By.XPATH, \"//*[@id='root']/div/div/div/div[3]/form/div[1]/span[2]\")\n status = input.text\n assert status == \"Анкета не отправлена\"\n\n # переключение на внутреннего клиента\n input = browser.find_element_by_css_selector(\n \"label._1B_rCW-HdhdsZBqdHkrllC\")\n input.click()\n\n if flag == \"user\":\n input = browser.find_element(By.XPATH, \"//*[@id='modal-root']/div/div/section/div/form/div/label\")\n status = input.text\n assert status == \"Имя Фамилия пользователя\"\n\n # ввод имени полользователя\n input = browser.find_element_by_css_selector(\"#fullName\")\n input.send_keys(\"Имя\")\n # сохранить\n input = browser.find_element_by_css_selector(\n \"#modal-root > div > div > section > div > form > button\")\n input.click()\n\n input = browser.find_element_by_css_selector(\"#modal-root > div > div > section > div > form > div > span\")\n mess = input.text\n if mess == \"Некорректное значение\":\n input = browser.find_element_by_css_selector(\"#fullName\")\n input.send_keys(\" Фамилия\")\n input = browser.find_element_by_css_selector(\n \"#modal-root > div > div > section > div > form > button\")\n input.click()\n\n if flag == \"prod\":\n input = browser.find_element(By.XPATH, \"//*[@id='product-creating']/section[1]/section/section/div/div[1]/div[2]/div/label\")\n status = input.text\n assert status == \"Наименование продукта\"\n\n # клик по новый продукт\n # input = browser.find_element_by_css_selector(\n # \"#root > div > div > div > div.MDTTD808z7GuhtYhNCiyx > div._3FtI9K_rYuNqNUawu1Ow7K._2dqkqLWSBZAvRV2i3XPaE1 > header > button\")\n # input.click()\n # time.sleep(1)\n input = browser.find_element_by_css_selector(\n \"#product-creating > section:nth-child(1) > section > section > div > div:nth-child(2) > div._1BYAPcKJgAVhwLSvxdLQtV._2AYE17bNwF9fAiUDd3jicg._3FlCQKhOHMB0FDq6Lniosb._1CollOa9FvDSi8OuUmm1vf > div > div._1BYAPcKJgAVhwLSvxdLQtV._2AYE17bNwF9fAiUDd3jicg._3FlCQKhOHMB0FDq6Lniosb._1y9E9O3hdI5vdkkrArB_b > div > label\")\n input.click()\n\n i = 0\n for ii in mass_selector:\n time.sleep(2)\n # поля\n browser.find_element_by_css_selector(mass_selector[i]).clear()\n input = browser.find_element_by_css_selector(mass_selector[i])\n input.send_keys(mass_value[i])\n\n # клик по кнопке сохранить\n input = browser.find_element_by_css_selector(button_save)\n input.click()\n time.sleep(2)\n browser.execute_script('window.scrollTo(0,0);')\n\n # проверка на наличие надписи \"Некорректное значение\"\n input1 = browser.find_element_by_class_name(\"_2BdPeWfKXp-TXCYIUAZiad\")\n status1 = input1.text\n assert status1 == \"Некорректное значение\"\n time.sleep(2)\n\n # преписка к значению в поле(изменение поля)\n input = browser.find_element_by_css_selector(mass_selector[i])\n input.send_keys(\"f\")\n\n # base_page = BasePage(browser, browser.current_url)\n if flag == \"client\":\n assert base_page.is_element_present(By.CLASS_NAME, \"_2BdPeWfKXp-TXCYIUAZiad\")\n\n base_page = BasePage(browser, browser.current_url)\n assert base_page.is_element_present(By.LINK_TEXT, \"Некорректное значение\") == False\n\n # проверка на наличие надписи \"Некорректное значение\"\n # input1 = browser.find_element_by_class_name(\"_2BdPeWfKXp-TXCYIUAZiad\")\n # status1 = input1.text\n # assert status1 == \"Некорректное значение\"\n\n browser.find_element_by_css_selector(mass_selector[i])\n i = i + 1\n\n\n # basket_page = BasketPage(browser, browser.current_url)\n # basket_page.is_basket_empty()\n #\n # # проверка на наличие элемента на странице\n # red = True\n # try:\n # browser.find_element_by_class_name(\"_2BdPeWfKXp-TXCYIUAZiad\")\n # except NoSuchElementException:\n # red = False\n # assert red == True\n #\n # red = True\n # try:\n # browser.find_element_by_link_text(\"Некорректное значение\")\n # except NoSuchElementException:\n # red = False\n # assert red == False\n # time.sleep(3)\n\n","sub_path":"test_tass.py","file_name":"test_tass.py","file_ext":"py","file_size_in_byte":8348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"605017441","text":"from sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\ntrain_path = './data/train.txt'\nvalid_path = './data/valid.txt'\ntest_path = './data/test.txt'\ntrain_feature_path = './data/train.feature.txt'\nvalid_feature_path = './data/valid.feature.txt'\ntest_feature_path = './data/test.feature.txt'\n\n#データ読み込み\nnames = ['TITLE', 'CATEGORY']\nX_train = pd.read_csv(train_feature_path, sep='\\t', header=None)\ntrain_df = pd.read_csv(train_path, sep='\\t', header=None, names=names)\nX_valid = pd.read_csv(valid_feature_path, sep='\\t', header=None)\nvalid_df = pd.read_csv(valid_path, sep='\\t', header=None, names=names)\nX_test = pd.read_csv(test_feature_path, sep='\\t', header=None)\ntest_df = pd.read_csv(test_path, sep='\\t', header=None, names=names)\n\nresult = []\nfor C in tqdm(np.logspace(-5, 4, 10, base=10)):\n # モデルの学習\n model = LogisticRegression(penalty='l2', solver='sag', random_state=0, C=C)\n model.fit(X_train, train_df['CATEGORY'])\n\n # 予測値の取得\n train_pred = model.predict(X_train)\n valid_pred = model.predict(X_valid)\n test_pred = model.predict(X_test)\n\n # 正解率の算出\n train_acc = accuracy_score(train_df['CATEGORY'], train_pred)\n valid_acc = accuracy_score(valid_df['CATEGORY'], valid_pred)\n test_acc = accuracy_score(test_df['CATEGORY'], test_pred)\n\n result.append([C, train_acc, valid_acc, test_acc])\n\n#可視化\nplt.plot(result[0], result[1], label=\"train\")\nplt.plot(result[0], result[2], label=\"valid\")\nplt.plot(result[0], result[3], label=\"test\")\nplt.xscale(\"log\")\nplt.xlabel(\"C\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.show()","sub_path":"masamune/chapter06/knock58.py","file_name":"knock58.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"620412735","text":"#\n# Copyright 2020 - Swiss Data Science Center (SDSC)\n# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Initial migrations.\"\"\"\n\nimport os\nimport shutil\nimport urllib\nfrom pathlib import Path\n\nfrom renku.core.constant import DEFAULT_DATA_DIR as DATA_DIR\nfrom renku.core.constant import RENKU_HOME\nfrom renku.core.migration.models.refs import LinkReference\nfrom renku.core.migration.models.v3 import Collection, Dataset, Project, get_project_datasets\nfrom renku.core.migration.models.v9 import generate_file_id, generate_label\nfrom renku.core.migration.utils import (\n OLD_METADATA_PATH,\n generate_dataset_id,\n get_datasets_path,\n get_pre_0_3_4_datasets_metadata,\n is_using_temporary_datasets_path,\n)\nfrom renku.core.util.contexts import with_project_metadata\nfrom renku.core.util.git import get_in_submodules\nfrom renku.core.util.urls import url_to_string\nfrom renku.domain_model.dataset import generate_default_name\nfrom renku.domain_model.project_context import project_context\n\n\ndef migrate(migration_context):\n \"\"\"Migration function.\"\"\"\n _ensure_clean_lock()\n _do_not_track_lock_file()\n _migrate_datasets_pre_v0_3()\n _migrate_broken_dataset_paths(migration_context=migration_context)\n _fix_labels_and_ids(migration_context)\n _fix_dataset_urls()\n _migrate_dataset_and_files_project()\n\n\ndef _ensure_clean_lock():\n \"\"\"Make sure Renku lock file is not part of repository.\"\"\"\n if is_using_temporary_datasets_path():\n return\n\n lock_file = project_context.path / \".renku.lock\"\n try:\n lock_file.unlink()\n except FileNotFoundError:\n pass\n\n\ndef _do_not_track_lock_file():\n \"\"\"Add lock file to .gitignore if not already exists.\"\"\"\n if is_using_temporary_datasets_path():\n return\n\n lock_file = \".renku.lock\"\n gitignore = project_context.path / \".gitignore\"\n if not gitignore.exists() or lock_file not in gitignore.read_text():\n gitignore.open(\"a\").write(f\"\\n{lock_file}\\n\")\n\n\ndef _migrate_datasets_pre_v0_3():\n \"\"\"Migrate datasets from Renku 0.3.x.\"\"\"\n if is_using_temporary_datasets_path():\n return\n\n changed = False\n repository = project_context.repository\n\n for old_path in get_pre_0_3_4_datasets_metadata():\n changed = True\n name = str(old_path.parent.relative_to(project_context.path / DATA_DIR))\n\n dataset = Dataset.from_yaml(old_path)\n dataset.title = name\n dataset.name = generate_default_name(name)\n new_path = get_datasets_path() / dataset.identifier / OLD_METADATA_PATH\n new_path.parent.mkdir(parents=True, exist_ok=True)\n\n with with_project_metadata(read_only=True) as meta:\n for submodule in repository.submodules:\n if Path(submodule.url).name == meta.name:\n repository.submodules.remove(submodule)\n\n for file_ in dataset.files:\n if not Path(file_.path).exists():\n expected_path = project_context.path / DATA_DIR / dataset.name / file_.path\n if expected_path.exists():\n file_.path = expected_path.relative_to(project_context.path)\n\n dataset.to_yaml(new_path)\n\n Path(old_path).unlink()\n ref = LinkReference.create(name=f\"datasets/{name}\", force=True)\n ref.set_reference(new_path)\n\n if changed:\n project_path = project_context.metadata_path.joinpath(OLD_METADATA_PATH)\n project = Project.from_yaml(project_path)\n project.version = \"3\"\n project.to_yaml(project_path)\n\n repository.add(all=True)\n repository.commit(\"renku migrate: committing structural changes\" + project_context.transaction_id)\n\n\ndef _migrate_broken_dataset_paths(migration_context):\n \"\"\"Ensure all paths are using correct directory structure.\"\"\"\n for dataset in get_project_datasets():\n if not dataset.name:\n dataset.name = generate_default_name(dataset.title, dataset.version)\n else:\n dataset.name = generate_default_name(dataset.name)\n\n expected_path = get_datasets_path() / dataset.identifier\n\n # migrate the refs\n if not is_using_temporary_datasets_path():\n ref = LinkReference.create(name=f\"datasets/{dataset.name}\", force=True)\n ref.set_reference(expected_path / OLD_METADATA_PATH)\n\n if not expected_path.exists():\n old_dataset_path = dataset.path\n if not is_using_temporary_datasets_path():\n expected_path.parent.mkdir(parents=True, exist_ok=True)\n shutil.move(old_dataset_path, expected_path)\n else:\n expected_path.mkdir(parents=True, exist_ok=True)\n shutil.move(str(Path(old_dataset_path) / OLD_METADATA_PATH), expected_path)\n\n dataset.path = os.path.relpath(expected_path, project_context.path)\n\n if not is_using_temporary_datasets_path():\n base_path = project_context.path\n else:\n base_path = project_context.path / RENKU_HOME\n\n collections = [f for f in dataset.files if isinstance(f, Collection)]\n files = [f for f in dataset.files if not isinstance(f, Collection)]\n\n while collections:\n collection = collections.pop()\n for file in collection.members:\n if isinstance(file, Collection):\n collections.append(file)\n else:\n files.append(file)\n\n dataset.files = files\n\n for file in dataset.files:\n if _is_dir(migration_context=migration_context, path=file.path):\n continue\n if file.path.startswith(\"..\"):\n file_absolute_path = os.path.abspath(get_datasets_path() / dataset.identifier / file.path)\n file.path = Path(file_absolute_path).relative_to(base_path)\n elif not _exists(migration_context=migration_context, path=file.path):\n file.path = (project_context.path / DATA_DIR / file.path).relative_to(project_context.path)\n\n file.name = os.path.basename(file.path)\n\n dataset.to_yaml(expected_path / \"metadata.yml\")\n\n\ndef _fix_labels_and_ids(migration_context):\n \"\"\"Ensure files have correct label instantiation.\"\"\"\n for dataset in get_project_datasets():\n dataset._id = generate_dataset_id(identifier=dataset.identifier)\n dataset._label = dataset.identifier\n\n for file in dataset.files:\n if not _exists(migration_context=migration_context, path=file.path):\n continue\n\n commit = _get_previous_commit(migration_context=migration_context, path=file.path)\n _, commit, _ = get_in_submodules(repository=project_context.repository, commit=commit, path=file.path)\n\n if not _is_file_id_valid(file._id, file.path, commit.hexsha):\n file._id = generate_file_id(hexsha=commit.hexsha, path=file.path)\n\n if not file._label or commit.hexsha not in file._label or file.path not in file._label:\n file._label = generate_label(file.path, commit.hexsha)\n\n dataset.to_yaml()\n\n\ndef _fix_dataset_urls():\n \"\"\"Ensure dataset and its files have correct url format.\"\"\"\n for dataset in get_project_datasets():\n dataset.url = dataset._id\n for file_ in dataset.files:\n if file_.url:\n file_.url = url_to_string(file_.url)\n\n dataset.to_yaml()\n\n\ndef _migrate_dataset_and_files_project():\n \"\"\"Ensure dataset files have correct project.\"\"\"\n project_path = project_context.metadata_path.joinpath(OLD_METADATA_PATH)\n project = Project.from_yaml(project_path)\n if not is_using_temporary_datasets_path():\n project.to_yaml(project_path)\n\n for dataset in get_project_datasets():\n dataset._project = project\n if not dataset.creators:\n dataset.creators = [project.creator]\n for file_ in dataset.files:\n file_._project = project\n\n dataset.to_yaml()\n\n\ndef _is_file_id_valid(id_, path, hexsha):\n if not id_ or not isinstance(id_, str) or not id_.startswith(\"https\"):\n return False\n\n u = urllib.parse.urlparse(id_)\n return u.scheme and u.netloc and u.path.startswith(\"/blob\") and hexsha in u.path and path in u.path\n\n\ndef _exists(migration_context, path):\n dmc = migration_context.dataset_migration_context\n if dmc:\n return dmc.exists(path)\n\n path = project_context.path / path\n return path.exists() or (path.is_symlink() and os.path.lexists(path))\n\n\ndef _is_dir(migration_context, path):\n dmc = migration_context.dataset_migration_context\n if dmc:\n return dmc.is_dir(path)\n\n return (project_context.path / path).is_dir()\n\n\ndef _get_previous_commit(migration_context, path):\n dmc = migration_context.dataset_migration_context\n if dmc:\n return dmc.get_previous_commit(path)\n return project_context.repository.get_previous_commit(path, revision=\"HEAD\")\n","sub_path":"renku/core/migration/m_0003__2_initial.py","file_name":"m_0003__2_initial.py","file_ext":"py","file_size_in_byte":9570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"520033572","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 9 15:44:12 2015\n\n@author: stan\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport os\nimport re\nimport patsy\n\nwd = '/home/stan/K/Ponpare/02_Input'\nfiles = [x for x in os.listdir(wd) if re.search('.csv$', x) is not None]\nfull_dir = [wd + '/' + x for x in files]\nnames = [n[:-4] for n in files]\nraw_dfs = {x: pd.DataFrame.from_csv(full_dir[i], index_col=None)\n for i, x in enumerate(names)}\n\ntrain = pd.merge(raw_dfs['coupon_detail_train'], raw_dfs['coupon_list_train'])\ntrain = train.ix[:, [\"COUPON_ID_hash\", \"USER_ID_hash\",\n \"GENRE_NAME\", \"DISCOUNT_PRICE\", \"PRICE_RATE\",\n \"USABLE_DATE_MON\", \"USABLE_DATE_TUE\", \"USABLE_DATE_WED\",\n \"USABLE_DATE_THU\", \"USABLE_DATE_FRI\", \"USABLE_DATE_SAT\",\n \"USABLE_DATE_SUN\", \"USABLE_DATE_HOLIDAY\",\n \"USABLE_DATE_BEFORE_HOLIDAY\", \"ken_name\", \"small_area_name\"]]\n\ncplte = raw_dfs['coupon_list_test']\n\ncplte['USER_ID_hash'] = 'dummyuser'\n\ncpchar = cplte.ix[:, [\"COUPON_ID_hash\", \"USER_ID_hash\",\n \"GENRE_NAME\", \"DISCOUNT_PRICE\", \"PRICE_RATE\",\n \"USABLE_DATE_MON\", \"USABLE_DATE_TUE\", \"USABLE_DATE_WED\",\n \"USABLE_DATE_THU\", \"USABLE_DATE_FRI\", \"USABLE_DATE_SAT\",\n \"USABLE_DATE_SUN\", \"USABLE_DATE_HOLIDAY\",\n \"USABLE_DATE_BEFORE_HOLIDAY\", \"ken_name\", \"small_area_name\"]]\n\ntrain = train.append(cpchar)\ntrain = train.fillna(1)\n\ntrain['DISCOUNT_PRICE'] = 1/np.log10(train.DISCOUNT_PRICE)\ntrain['PRICE_RATE'] = (train.PRICE_RATE*train.PRICE_RATE)/(100*100)\n\nformula = ''\nfor i in train.ix[0, 2:].columns:\n formula = formula + i + ' + '\nformula = formula[:-2] + '-1'\n\ndesign_mat = patsy.dmatrix(formula, data=train.ix[:, 2:], return_type='dataframe')\n\ntrain = train.iloc[:, :2].append(design_mat)\ntrain.fillna(0, inplace=True) # verify this is correct\n\ntest = train.ix[train.USER_ID_hash == 'dummyuser'].copy()\ntest = test.drop('USER_ID_hash', axis=1)\n\ntrain = train.ix[train.USER_ID_hash != 'dummyuser']\n\n# user characteristics\nuchar = train.groupby('USER_ID_hash').mean()\nuchar[['DISCOUNT_PRICE', 'PRICE_RATE']] = 1\n\n# Weight Matrix: GENRE_NAME DISCOUNT_PRICE PRICE_RATE USABLE_DATE_ ken_name small_area_name\n# verify this order is correct i.e. that order is preserved b/w R and py code\ndiag = (3, 1, 0.2, 0, 3, 3)\ndiag_reps = [13, 1, 1, 9, 45, 55]\nw = np.diag(np.repeat(diag, diag_reps))\n\nscore = np.dot(\n np.dot(uchar, w), # not uchar.ix[:, 1:] b/c no index col\n test.ix[:, 1:].T\n )\n","sub_path":"sb_cosine_similarity.py","file_name":"sb_cosine_similarity.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"122553545","text":"\"\"\" Debug panel for pyramid_debugtoolbar. It will require that package to be included first before including this. \"\"\"\ntry:\n from pyramid_debugtoolbar.panels import DebugPanel\nexcept ImportError:\n #DebugPanel won't be used, so it's not needed\n DebugPanel = object\n\nfrom betahaus.viewcomponent.interfaces import IViewGroup\n\n\nVA_ATTRS = ('title',\n 'permission',\n 'interface',\n 'containment',)\n\n\nclass ViewGroupDebugPanel(DebugPanel):\n\n name = 'ViewGroup'\n has_content = True\n\n def __init__(self, request):\n self.request = request\n self.utils = tuple(self.request.registry.getUtilitiesFor(IViewGroup))\n\n def nav_title(self):\n return u\"View Groups\"\n\n title = nav_title\n\n def nav_subtitle(self):\n num = len(self.utils)\n return '%d %s' % (num, self.pluralize(\"view group\", \"view groups\", num))\n\n def url(self):\n return ''\n\n def content(self):\n vars = dict(\n utils = self.utils,\n va_attrs = VA_ATTRS,\n callable_repr = self.callable_repr,\n )\n return self.render(\n 'betahaus.viewcomponent:debug_panel.pt',\n vars,\n request=self.request)\n\n def callable_repr(self, _callable):\n return '%s.%s' % (_callable.__module__, _callable.__name__)\n\n\ndef includeme(config):\n \"\"\" Activate the debug toolbar; usually called via\n ``config.include('betahaus.viewcomponent.debug_panel')`` \n instead of being invoked directly.\n \"\"\"\n try:\n panels = config.registry.settings['debugtoolbar.panels']\n if ViewGroupDebugPanel not in panels:\n panels.append(ViewGroupDebugPanel)\n except KeyError:\n raise KeyError(\"Did you include pyramid_debugtoolbar before including this?\")\n\n","sub_path":"betahaus/viewcomponent/debug_panel.py","file_name":"debug_panel.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"282239939","text":"'''Arsenal client NodeGroups class.'''\nimport logging\nfrom arsenalclient.interface.arsenal_interface import ArsenalInterface\n\nLOG = logging.getLogger(__name__)\n\nclass NodeGroups(ArsenalInterface):\n '''The arsenal client NodeGroups class.'''\n\n def __init__(self, **kwargs):\n super(NodeGroups, self).__init__(**kwargs)\n self.uri = '/api/node_groups'\n\n # Overridden methods\n def search(self, params=None):\n '''Search for node_groups.\n\n Usage:\n\n >>> params = {\n ... 'name': 'my_node_group',\n ... 'exact_get': True,\n ... }\n >>> NodeGroups.search(params)\n\n Args:\n\n params (dict): a dictionary of url parameters for the request.\n\n Returns:\n A json response from ArsenalInterface.check_response_codes().\n '''\n\n return super(NodeGroups, self).search(params=params)\n\n def create(self, params):\n '''Create a new node_group.\n\n Args:\n\n params (dict): A dictionary with the following attributes:\n\n name : The name of the node_group you wish to create.\n owner : The email address of the owner of the node group.\n description: A text description of the members of this node_group.\n notes_url : A url to documentation relevant to the node_group.\n\n Usage:\n\n >>> params = {\n ... 'name': 'my_node_group',\n ... 'owner': 'email@mycompany.com',\n ... 'description': 'The nodegroup for all the magical servers',\n ... 'notes_url': 'https://somurl.somedomain.com/',\n ... }\n >>> NodeGroups.create(params)\n \n '''\n\n return super(NodeGroups, self).create(params)\n\n def update(self, params):\n '''Update a node_group.\n\n Args:\n params (dict): A dictionary of url parameters for the request.\n\n Usage:\n Only these params are updatable from this action.\n\n >>> params = {\n ... 'name': 'my_node_group',\n ... 'owner': 'email@mycompany.com',\n ... 'description': 'The nodegroup for all the magical servers',\n ... 'notes_url': 'https://somurl.somedomain.com/',\n ... }\n >>> NodeGroups.update(params)\n\n Returns:\n A json response from ArsenalInterface.check_response_codes().\n '''\n\n return super(NodeGroups, self).update(params)\n\n def delete(self, params):\n '''Delete a node_group object from the server.\n\n Args:\n\n params: A node_group dictionary to delete. Must contain the\n node_group id, and name.\n\n Usage:\n\n >>> params = {\n ... 'id': 1,\n ... 'name': 'my_node_group',\n ... }\n >>> NodeGroups.delete(params)\n '''\n\n return super(NodeGroups, self).delete(params)\n\n def get_audit_history(self, results):\n '''Get the audit history for node_groups.'''\n return super(NodeGroups, self).get_audit_history(results)\n\n def get_by_name(self, name):\n '''Get a single node_group by it's name.\n\n Args:\n name (str): A string representing the node_group name you wish to find.\n '''\n return super(NodeGroups, self).get_by_name(name)\n\n\n # Custom methods\n def _manage_assignments(self, node_group, nodes, api_method):\n '''Assign or de-assign a node_group to/from a list of node dictionaries.\n Args:\n\n node_group (str): The name of the node_group to assign to the node search results.\n nodes (list): The list of node dicts. Must contain the name and the id.\n api_method (string): Whether we are doing a 'put' or 'delete'.\n '''\n\n node_names = []\n node_ids = []\n msg = 'Assigning'\n if api_method == 'delete':\n msg = 'De-assigning'\n\n for node in nodes:\n node_names.append(node['name'])\n node_ids.append(node['id'])\n\n LOG.info('{0} node_group: {1}'.format(msg,\n node_group['name']))\n for node in node_names:\n LOG.info(' node: {0}'.format(node))\n\n data = {\n 'nodes': node_ids\n }\n\n return self.api_conn('/api/node_groups/{0}/nodes'.format(node_group['id']),\n data,\n method=api_method)\n\n def assign(self, name, nodes):\n '''Assign a node_group to one or more nodes.\n\n Args:\n\n name (str) : The name of the node_group to assign to the node search results.\n nodes (list): The list of node dicts from the search results to assign to\n the node_group.\n\n Usage:\n\n >>> NodeGroups.node_groups.assign('node_group1', )\n \n '''\n\n try:\n node_group = self.get_by_name(name)\n return self._manage_assignments(node_group, nodes, 'put')\n except IndexError:\n pass\n\n def deassign(self, name, nodes):\n '''De-assign a node_group from one or more nodes.\n\n Args:\n\n name (str) : The name of the node_group to de-assign from the node search results.\n nodes (list): The list of node dicts from the search results to de-assign from\n the node_group.\n\n Usage:\n\n >>> NodeGroups.deassign('node_group1', )\n \n '''\n\n try:\n node_group = self.get_by_name(name)\n return self._manage_assignments(node_group, nodes, 'delete')\n except IndexError:\n pass\n\n def deassign_all(self, nodes):\n '''De-assign ALL node_groups from one or more nodes.\n\n Args:\n\n nodes (list): The list of node dicts from the search results to de-assign\n from all node_groups.\n\n Usage:\n\n >>> NodeGroups.deassign_all()\n \n '''\n\n node_ids = []\n for node in nodes:\n LOG.info('Removing all node_groups from node: {0}'.format(node['name']))\n node_ids.append(node['id'])\n\n data = {'node_ids': node_ids}\n\n try:\n resp = self.api_conn('/api/bulk/node_groups/deassign',\n data,\n method='delete')\n except Exception as ex:\n LOG.error('Command failed: {0}'.format(repr(ex)))\n raise\n\n return resp\n","sub_path":"client/arsenalclient/interface/node_groups.py","file_name":"node_groups.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"176098863","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys,os,io\nimport logging\nfrom datetime import datetime\n\nnow = datetime.now()\ndt_string = now.strftime(\"%d_%m_%H_%M\")\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',level=logging.DEBUG, filename= dt_string + 'DeBERT_root.log')\n\nfh = logging.FileHandler(dt_string + 'DeBERT_with_res.log')\nfh.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nlogging.getLogger('').addHandler(fh)\n\nlogger = logging.getLogger(__name__)\nimport argparse\nfrom tqdm import tqdm, trange\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.modeling import BertAttention, BertForPreTraining, BertLayer, BertOutput, \\\n BertIntermediate, BertSelfAttention, BertSelfOutput,BertLayerNorm\nfrom pytorch_pretrained_bert.optimization import BertAdam\n\nfrom extract_features import convert_examples_to_features, InputExample\nfrom extract_features import InputExample as IE\n\nfrom torch import nn\nfrom torch.utils.data import Dataset\nimport random\n\n\n# Set PATHs\nPATH_TO_SENTEVAL = '/home/xiongyi/Codes/SentEval/examples'\nPATH_TO_DATA = os.path.join(PATH_TO_SENTEVAL,'../data')\n# PATH_TO_VEC = 'glove/glove.840B.300d.txt'\n#PATH_TO_VEC = os.path.join(PATH_TO_DATA,'glove/glove.840B.300d.txt')\n\n# import SentEval\nsys.path.insert(0, PATH_TO_SENTEVAL)\nimport senteval\n\n\ndef warmup_linear(x, warmup=0.002):\n if x < warmup:\n return x/warmup\n return 1.0 - x\n\n\nclass BERTDataset(Dataset):\n def __init__(self, corpus_path, tokenizer, seq_len, encoding=\"utf-8\", corpus_lines=None, on_memory=True):\n self.vocab = tokenizer.vocab\n self.tokenizer = tokenizer\n self.seq_len = seq_len\n self.on_memory = on_memory\n self.corpus_lines = corpus_lines # number of non-empty lines in input corpus\n self.corpus_path = corpus_path\n self.encoding = encoding\n self.current_doc = 0 # to avoid random sentence from same doc\n\n # for loading samples directly from file\n self.sample_counter = 0 # used to keep track of full epochs on file\n self.line_buffer = None # keep second sentence of a pair in memory and use as first sentence in next pair\n\n # for loading samples in memory\n self.current_random_doc = 0\n self.num_docs = 0\n self.sample_to_doc = [] # map sample index to doc and line\n\n # load samples into memory\n if on_memory:\n self.all_docs = []\n doc = []\n self.corpus_lines = 0\n with open(corpus_path, \"r\", encoding=encoding) as f:\n for line in tqdm(f, desc=\"Loading Dataset\", total=corpus_lines):\n line = line.strip()\n if line == \"\":\n self.all_docs.append(doc)\n doc = []\n #remove last added sample because there won't be a subsequent line anymore in the doc\n self.sample_to_doc.pop()\n else:\n #store as one sample\n sample = {\"doc_id\": len(self.all_docs),\n \"line\": len(doc)}\n self.sample_to_doc.append(sample)\n doc.append(line)\n self.corpus_lines = self.corpus_lines + 1\n\n # if last row in file is not empty\n if self.all_docs[-1] != doc:\n self.all_docs.append(doc)\n self.sample_to_doc.pop()\n\n self.num_docs = len(self.all_docs)\n\n # load samples later lazily from disk\n else:\n if self.corpus_lines is None:\n with open(corpus_path, \"r\", encoding=encoding) as f:\n self.corpus_lines = 0\n for line in tqdm(f, desc=\"Loading Dataset\", total=corpus_lines):\n if line.strip() == \"\":\n self.num_docs += 1\n else:\n self.corpus_lines += 1\n\n # if doc does not end with empty line\n if line.strip() != \"\":\n self.num_docs += 1\n\n self.file = open(corpus_path, \"r\", encoding=encoding)\n self.random_file = open(corpus_path, \"r\", encoding=encoding)\n\n def __len__(self):\n # last line of doc won't be used, because there's no \"nextSentence\". Additionally, we start counting at 0.\n return self.corpus_lines - self.num_docs - 1\n\n def __getitem__(self, item):\n cur_id = self.sample_counter\n self.sample_counter += 1\n if not self.on_memory:\n # after one epoch we start again from beginning of file\n if cur_id != 0 and (cur_id % len(self) == 0):\n self.file.close()\n self.file = open(self.corpus_path, \"r\", encoding=self.encoding)\n\n t1, t2, is_next_label = self.random_sent(item)\n\n # tokenize\n tokens_a = self.tokenizer.tokenize(t1)\n tokens_b = self.tokenizer.tokenize(t2)\n\n # combine to one sample\n cur_example = InputExample(guid=cur_id, tokens_a=tokens_a, tokens_b=tokens_b, is_next=is_next_label)\n\n # transform sample to features\n cur_features = convert_example_to_features(cur_example, self.seq_len, self.tokenizer)\n\n cur_tensors = (torch.tensor(cur_features.input_ids),\n torch.tensor(cur_features.input_mask),\n torch.tensor(cur_features.segment_ids),\n torch.tensor(cur_features.lm_label_ids),\n torch.tensor(cur_features.is_next))\n\n return cur_tensors\n\n def random_sent(self, index):\n \"\"\"\n Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences\n from one doc. With 50% the second sentence will be a random one from another doc.\n :param index: int, index of sample.\n :return: (str, str, int), sentence 1, sentence 2, isNextSentence Label\n \"\"\"\n t1, t2 = self.get_corpus_line(index)\n if random.random() > 0.5:\n label = 0\n else:\n t2 = self.get_random_line()\n label = 1\n\n assert len(t1) > 0\n assert len(t2) > 0\n return t1, t2, label\n\n def get_corpus_line(self, item):\n \"\"\"\n Get one sample from corpus consisting of a pair of two subsequent lines from the same doc.\n :param item: int, index of sample.\n :return: (str, str), two subsequent sentences from corpus\n \"\"\"\n t1 = \"\"\n t2 = \"\"\n assert item < self.corpus_lines\n if self.on_memory:\n sample = self.sample_to_doc[item]\n t1 = self.all_docs[sample[\"doc_id\"]][sample[\"line\"]]\n t2 = self.all_docs[sample[\"doc_id\"]][sample[\"line\"]+1]\n # used later to avoid random nextSentence from same doc\n self.current_doc = sample[\"doc_id\"]\n return t1, t2\n else:\n if self.line_buffer is None:\n # read first non-empty line of file\n while t1 == \"\" :\n t1 = self.file.__next__().strip()\n t2 = self.file.__next__().strip()\n else:\n # use t2 from previous iteration as new t1\n t1 = self.line_buffer\n t2 = self.file.__next__().strip()\n # skip empty rows that are used for separating documents and keep track of current doc id\n while t2 == \"\" or t1 == \"\":\n t1 = self.file.__next__().strip()\n t2 = self.file.__next__().strip()\n self.current_doc = self.current_doc+1\n self.line_buffer = t2\n\n assert t1 != \"\"\n assert t2 != \"\"\n return t1, t2\n\n def get_random_line(self):\n \"\"\"\n Get random line from another document for nextSentence task.\n :return: str, content of one line\n \"\"\"\n # Similar to original tf repo: This outer loop should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document we're processing.\n for _ in range(10):\n if self.on_memory:\n rand_doc_idx = random.randint(0, len(self.all_docs)-1)\n rand_doc = self.all_docs[rand_doc_idx]\n line = rand_doc[random.randrange(len(rand_doc))]\n else:\n rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)\n #pick random line\n for _ in range(rand_index):\n line = self.get_next_line()\n #check if our picked random line is really from another doc like we want it to be\n if self.current_random_doc != self.current_doc:\n break\n return line\n\n def get_next_line(self):\n \"\"\" Gets next line of random_file and starts over when reaching end of file\"\"\"\n try:\n line = self.random_file.__next__().strip()\n #keep track of which document we are currently looking at to later avoid having the same doc as t1\n if line == \"\":\n self.current_random_doc = self.current_random_doc + 1\n line = self.random_file.__next__().strip()\n except StopIteration:\n self.random_file.close()\n self.random_file = open(self.corpus_path, \"r\", encoding=self.encoding)\n line = self.random_file.__next__().strip()\n return line\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for the language model.\"\"\"\n\n def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n tokens_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n tokens_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.tokens_a = tokens_a\n self.tokens_b = tokens_b\n self.is_next = is_next # nextSentence\n self.lm_labels = lm_labels # masked words for language model\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, is_next, lm_label_ids):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.is_next = is_next\n self.lm_label_ids = lm_label_ids\n\n\ndef random_word(tokens, tokenizer):\n \"\"\"\n Masking some random tokens for Language Model task with probabilities as in the original BERT paper.\n :param tokens: list of str, tokenized sentence.\n :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)\n :return: (list of str, list of int), masked tokens and related labels for LM prediction\n \"\"\"\n output_label = []\n\n for i, token in enumerate(tokens):\n prob = random.random()\n # mask token with 15% probability\n if prob < 0.15:\n prob /= 0.15\n\n # 80% randomly change token to mask token\n if prob < 0.8:\n tokens[i] = \"[MASK]\"\n\n # 10% randomly change token to random token\n elif prob < 0.9:\n tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]\n\n # -> rest 10% randomly keep current token\n\n # append current token to output (we will predict these later)\n try:\n output_label.append(tokenizer.vocab[token])\n except KeyError:\n # For unknown words (should not occur with BPE vocab)\n output_label.append(tokenizer.vocab[\"[UNK]\"])\n logger.warning(\"Cannot find token '{}' in vocab. Using [UNK] insetad\".format(token))\n else:\n # no masking token (will be ignored by loss function later)\n output_label.append(-1)\n\n return tokens, output_label\n\n\ndef convert_example_to_features(example, max_seq_length, tokenizer):\n \"\"\"\n Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with\n IDs, LM labels, input_mask, CLS and SEP tokens etc.\n :param example: InputExample, containing sentence input as strings and is_next label\n :param max_seq_length: int, maximum length of sequence.\n :param tokenizer: Tokenizer\n :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)\n \"\"\"\n tokens_a = example.tokens_a\n tokens_b = example.tokens_b\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\n t1_random, t1_label = random_word(tokens_a, tokenizer)\n t2_random, t2_label = random_word(tokens_b, tokenizer)\n # concatenate lm labels and account for CLS, SEP, SEP\n lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n assert len(tokens_b) > 0\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n lm_label_ids.append(-1)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(lm_label_ids) == max_seq_length\n\n if example.guid < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"LM label: %s \" % (lm_label_ids))\n logger.info(\"Is next sentence label: %s \" % (example.is_next))\n\n features = InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n lm_label_ids=lm_label_ids,\n is_next=example.is_next)\n return features\n\n\ndef DisentangleModel(model):\n #modify the architecture, delete the last layer and add back a layer with\n #two independent components.\n \n bm = next(model.children())\n #drop the last layer\n bm.encoder.layer = bm.encoder.layer[:-1]\n #add back a \"disentangled\" layer\n new_layer = BertDELayer(model.config)\n bm.encoder.layer.append(new_layer)\n return model\n #self_attention_layer\n \nclass BertDEAttention(nn.Module):\n #self stays the same, output needs modification\n def __init__(self, config):\n super(BertDEAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output1 = BertDESelfOutput(config)\n self.output2 = BertDESelfOutput(config)\n self.size = config.hidden_size\n \n def forward(self, input_tensor, attention_mask):\n self_output = self.self(input_tensor, attention_mask)\n #print ('self_output.shape', self_output.shape)\n attention_output1 = self.output1(self_output[:,:,0:int(self.size/2)], input_tensor)\n attention_output2 = self.output2(self_output[:,:,int(self.size/2):], input_tensor)\n return attention_output1, attention_output2\n\nclass BertDESelfOutput(nn.Module):\n #size = half, also drop the residual connection\n def __init__(self, config):\n super(BertDESelfOutput, self).__init__()\n self.dense = nn.Linear(int(config.hidden_size/2), int(config.hidden_size/2))\n self.LayerNorm = BertLayerNorm(int(config.hidden_size/2), eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertDELayer(nn.Module):\n def __init__(self, config):\n super(BertDELayer, self).__init__()\n self.attention = BertDEAttention(config)\n \n half_config = config\n half_config.hidden_size = int(config.hidden_size/2)\n half_config.intermediate_size = int(config.intermediate_size/2)\n self.intermediate1 = BertIntermediate(half_config)\n self.intermediate2 = BertIntermediate(half_config)\n \n self.output1 = BertOutput(half_config)\n self.output2 = BertOutput(half_config)\n def forward(self, hidden_states, attention_mask):\n attention_output1, attention_output2 = self.attention(hidden_states, attention_mask)\n intermediate_output1 = self.intermediate1(attention_output1)\n intermediate_output2 = self.intermediate2(attention_output2)\n \n layer_output1 = self.output1(intermediate_output1, attention_output1)\n layer_output2 = self.output2(intermediate_output2, attention_output2)\n #print ('layer_output1.shape :', layer_output1.shape)\n return torch.cat((layer_output1, layer_output2), -1)\n\n\n# SentEval prepare and batcher\ndef prepare(params, samples):\n params.batch_size = 32\n return\n\ndef batcher(params, batch):\n #print ('batch size' ,len(batch))\n batch = [sent if sent != [] else ['.'] for sent in batch]\n batch = [' '.join(sent) for sent in batch]\n #print ('batch', batch)\n examples = []\n unique_id = 0\n #print ('batch size ', len(batch))\n for sent in batch:\n sent = sent.strip()\n text_b = None\n text_a = sent\n examples.append(\n IE(unique_id=unique_id, text_a=text_a, text_b=text_b))\n unique_id += 1\n\n features = convert_examples_to_features(examples, 128, params['DEbert'].tokenizer)\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).to(params['DEbert'].device)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).to(params['DEbert'].device)\n \n embeddings, _ = params['DEbert'](all_input_ids, token_type_ids=None, \\\n attention_mask=all_input_mask)\n \n #print ('embeddings[-1].shape ', embeddings[-1].shape)\n final_embeddings = embeddings[-1].detach().mean(1).cpu().numpy()\n \n return final_embeddings\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--train_file\",\n default=None,\n type=str,\n required=True,\n help=\"The input train corpus.\")\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n action='store_true',\n help=\"Whether to run evaluation.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--learning_rate\",\n default=3e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--on_memory\",\n action='store_true',\n help=\"Whether to load train samples into memory or use disk\")\n parser.add_argument(\"--do_lower_case\",\n action='store_true',\n help=\"Whether to lower case the input text. True for uncased models, False for cased models.\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumualte before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type = float, default = 0,\n help = \"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n\n #args = parser.parse_args()\n args = parser.parse_args([\"--train_file\",\"/home/xiongyi/Data/Corpus/small_wiki_sentence_corpus.txt\",\"--do_eval\",\"--bert_model\",\\\n \"bert-base-uncased\",\"--output_dir\",\"june10\"])\n \n \n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n #n_gpu = torch.cuda.device_count()\n device = torch.device(\"cuda\", 1)\n n_gpu = 1\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl', rank = 1, world_size=2)\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir):\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n os.makedirs(args.output_dir, exist_ok=True)\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n\n #train_examples = None\n num_train_steps = None\n if args.do_train:\n print(\"Loading Train Dataset\", args.train_file)\n train_dataset = BERTDataset(args.train_file, tokenizer, seq_len=args.max_seq_length,\n corpus_lines=None, on_memory=args.on_memory)\n num_train_steps = int(\n len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)\n\n # Prepare model\n model = BertForPreTraining.from_pretrained(args.bert_model)\n model = DisentangleModel(model)\n \n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n \n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_steps)\n\n global_step = 0\n if args.do_train:\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_steps)\n\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_dataset)\n else:\n\n train_sampler = DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch\n loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n # modify learning rate with special warm up BERT uses\n lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_steps, args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n # Save a trained model\n logger.info(\"** ** * Saving fine - tuned model ** ** * \")\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\")\n if args.do_train:\n torch.save(model_to_save.state_dict(), output_model_file)\n \n model.eval() \n new_model = next(model.children())\n ##use probing/downstream_tasks to evaluate the model\n\n # Set params for SentEval\n params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}\n params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 32,\n 'tenacity': 3, 'epoch_size': 2}\n \n params_senteval['DEbert']=new_model\n params_senteval['DEbert'].tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n params_senteval['DEbert'].device = device\n se = senteval.engine.SE(params_senteval, batcher, prepare)\n transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',\n 'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',\n 'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',\n 'Length', 'WordContent', 'Depth', 'TopConstituents',\n 'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',\n 'OddManOut', 'CoordinationInversion']\n results = se.eval(transfer_tasks)\n print(results)\n\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"examples/seperate_bert.py","file_name":"seperate_bert.py","file_ext":"py","file_size_in_byte":34392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"525408504","text":"from pyramid.view import view_config\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n@view_config(route_name='home', renderer='templates/query_form.pt')\ndef wikiapp_home_view(request):\n return {'project': 'Wiki-Scrape App'}\n\n\n@view_config(route_name='download', renderer='templates/results.pt')\ndef download_content(request):\n wiki_url = request.params['url']\n\n if 'https:' and 'en.wikipedia' and 'wiki' in wiki_url.split('/', 4):\n pass\n else:\n return {'data': 'You provided an invalid url', 'wiki_url': wiki_url}\n\n if requests.get(wiki_url).status_code == 404:\n return {'data': 'Page does not exist', 'wiki_url': wiki_url}\n\n r = requests.get(wiki_url)\n soup = BeautifulSoup(r.content, 'html.parser')\n toc = soup.find('div', id='toc', class_='toc')\n\n if toc is None:\n return {'data': 'Page has no Table Of Contents', 'wiki_url': wiki_url}\n\n return {'data': toc.contents, 'wiki_url': wiki_url}\n","sub_path":"wikiapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"295317429","text":"'''Write a program which contains one class named as BankAccount.\r\nBankAccount class contains two instance variables as Name & Amount.\r\nThat class contains one class variable as ROI which is initialise to 10.5.\r\nInside init method initialise all name and amount variables by accepting the values from user.\r\nThere are Four instance methods inside class as Display(), Deposit(), Withdraw(),\r\nCalculateIntrest().\r\nDeposit() method will accept the amount from user and add that value in class instance variable\r\nAmount.\r\nWithdraw() method will accept amount to be withdrawn from user and subtract that amount\r\nfrom class instance variable Amount.\r\nCalculateIntrest() method calculate the interest based on Amount by considering rate of interest\r\nwhich is Class variable as ROI.\r\nAnd Display() method will display value of all the instance variables as Name and Amount.\r\nAfter designing the above class call all instance methods by creating multiple objects.'''\r\n\r\nclass BankAccount:\r\n ROI = 10.5\r\n def __init__(self,custName,custAmount):\r\n self.Name = custName\r\n self.Amount = custAmount\r\n \r\n def Deposit(self):\r\n print(\"Amount Before Deposit: \",self.Amount)\r\n surplus = int(input(\"Enter Amount To be Deposited: \"))\r\n self.Amount = self.Amount + surplus\r\n print(\"Amount After Deposit: \",self.Amount)\r\n \r\n def Withdraw(self):\r\n surplus = int(input(\"Enter Amount To be Withdrawn: \"))\r\n if surplus <= self.Amount:\r\n print(\"Amount Before Withdraw: \",self.Amount)\r\n self.Amount = self.Amount - surplus\r\n print(\"Amount After Withdraw: \",self.Amount)\r\n else:\r\n print(\"Insufficient Balance\")\r\n \r\n def CalculateIntrest(self):\r\n time = int(input(\"Enter Time Period: \"))\r\n print(\"The Simple Interest Is: \",(self.Amount * self.ROI * time))\r\n \r\n def Display(self):\r\n print(\"Name Of Customer: \",self.Name)\r\n print(\"Account Balance: \",self.Amount)\r\n\r\n\r\ndef main():\r\n # Creating First Object of Class BankAccount and using it to call Class Instance Methods\r\n Name = input(\"Enter Customer Name: \")\r\n Amount = int(input(\"Enter Amount: \"))\r\n Obj1 = BankAccount(Name,Amount)\r\n Obj1.Display()\r\n Obj1.Deposit()\r\n Obj1.Withdraw()\r\n Obj1.CalculateIntrest()\r\n\r\n print()\r\n \r\n # Creating Second Object of Class BankAccount and using it to call Class Instance Methods\r\n Name = input(\"Enter Customer Name: \")\r\n Amount = int(input(\"Enter Amount: \"))\r\n Obj2 = BankAccount(Name,Amount)\r\n Obj2.Display()\r\n Obj2.Deposit()\r\n Obj2.Withdraw()\r\n Obj2.CalculateIntrest()\r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Assignment_7_Python/Assignment_7_2.py","file_name":"Assignment_7_2.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"360113121","text":"import csv\n\nfrom ofxstatement import statement\nfrom ofxstatement.plugin import Plugin\nfrom ofxstatement.parser import CsvStatementParser\n\nimport locale\n\nfrom pprint import pformat, pprint\n\n\nclass IngRoPlugin(Plugin):\n\t\"\"\"ING Romania Plugin\n\t\"\"\"\n\n\tdef get_parser(self, filename):\n\t\tf = open(filename, 'r', encoding=self.settings.get(\"charset\", \"ISO-8859-2\"))\n\t\tparser = IngRoParser(f)\n\t\treturn parser\n\n\nclass IngRoParser(CsvStatementParser):\n\tdate_format = \"%d %B %Y\"\n\tmappings = {\n\t\t'date': 0,\n\t\t'memo': 1,\n\t\t'amount': 2\n\t}\n\tcurrentRecord = {\n\t\t'date': '',\n\t\t'details': '',\n\t\t'amount': 0.0,\n\t\t'type': 'NONE'\n\t}\n\n\tdef parse(self):\n\t\tstmt = super(IngRoParser, self).parse()\n\t\tstatement.recalculate_balance(stmt)\n\t\treturn stmt\n\n\tdef split_records(self):\n\t\treader = csv.reader(self.fin)\n\t\tnext(reader, None)\n\t\treturn reader\n\n\tdef parse_record(self, line):\n\t\t# print(\"\\n[[[[ parsing record: \" + pformat(line))\n\t\t(date, reserved1, reserved2, details, reserved3, debit_amount, credit_amount) = line\n\t\t# print(\">>>>> date is: \" + date)\n\t\t# print(\">>>>> recorded date is: \" + self.currentRecord['date'])\n\n\t\tdebit_amount = float(debit_amount.replace(\".\", \"\").replace(\",\", \".\")) if debit_amount is not '' else 0.0\n\t\tcredit_amount = float(\n\t\t\tcredit_amount.replace(\".\", \"\").replace(\",\", \".\")) if credit_amount is not '' else 0.0\n\n\t\tif debit_amount > 0:\n\t\t\tstatement_amount = debit_amount\n\t\t\tstatement_type = 'DEBIT'\n\t\telif credit_amount > 0:\n\t\t\tstatement_amount = credit_amount\n\t\t\tstatement_type = 'CREDIT'\n\t\telse:\n\t\t\tstatement_amount = 0.0\n\t\t\tstatement_type = 'NONE'\n\n\t\t# Skip header\n\t\tif date == 'Data':\n\t\t\t# print(\"^^^^^ Skip header\")\n\t\t\treturn None\n\n\t\t# Here we could commit the previous transaction because:\n\t\t# 1. We either start a new transaction (date field is valid)\n\t\t# 2. We reached the end of the file (reserved1 field is valid, and date is None)\n\t\t# However, we might not have a previous transaction (this is the first), so check if there is\n\t\t# anything to commit at this point.\n\n\t\tif date is not '':\n\t\t\tstatement_object = None\n\t\t\tif self.currentRecord['date'] is not '':\n\t\t\t\t# print(\"----> Output currentRecord\" + pformat(self.currentRecord))\n\t\t\t\tlocale.setlocale(locale.LC_ALL, 'ro_RO')\n\t\t\t\tstatement_object = super(IngRoParser, self).parse_record([\n\t\t\t\t\tself.currentRecord['date'],\n\t\t\t\t\tself.currentRecord['details'],\n\t\t\t\t\tself.currentRecord['amount']\n\t\t\t\t])\n\t\t\t\tstatement_object.trntype = self.currentRecord['type']\n\n\t\t\t# print(\"##### We started a new record with date: \" + date)\n\t\t\tself.currentRecord['date'] = date\n\t\t\tself.currentRecord['details'] = details\n\t\t\tself.currentRecord['amount'] = statement_amount\n\t\t\tself.currentRecord['type'] = statement_type\n\n\t\t\treturn statement_object\n\n\t\tif reserved1 is not '':\n\t\t\t# We are at the end of the file where the bank/account manager signatures\n\t\t\t# are found in the reserved fields. This means that there's no current record to\n\t\t\t# commit.\n\t\t\t# print(\"----- We are at the end of the file\")\n\t\t\tstatement_object = None\n\t\t\tif self.currentRecord['date'] is not '':\n\t\t\t\t# print(\"----> Output currentRecord\" + pformat(self.currentRecord))\n\t\t\t\tlocale.setlocale(locale.LC_ALL, 'ro_RO')\n\t\t\t\tstatement_object = super(IngRoParser, self).parse_record([\n\t\t\t\t\tself.currentRecord['date'],\n\t\t\t\t\tself.currentRecord['details'],\n\t\t\t\t\tself.currentRecord['amount']\n\t\t\t\t])\n\t\t\t\tstatement_object.trntype = self.currentRecord['type']\n\n\t\t\t# This is a record from the end of the file, where we do not have any record data.\n\t\t\tself.currentRecord['date'] = ''\n\t\t\tself.currentRecord['details'] = ''\n\t\t\tself.currentRecord['amount'] = 0.0\n\t\t\tself.currentRecord['type'] = 'NONE'\n\t\t\treturn statement_object\n\n\t\tif date is '':\n\t\t\t# This line contains extra details for the current transaction\n\t\t\t# print(\"***** Adding details: \" + details)\n\t\t\tself.currentRecord['details'] = self.currentRecord['details'] + \" \" + details\n\t\t\treturn None\n","sub_path":"src/ofxstatement/plugins/ingro.py","file_name":"ingro.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"560837838","text":"# Generators are a mechanism for lazy evaluation of a function that would otherwise return a space-prohibitive\n# or computationally intensive result.\n# They're vital when we need the flexibility of a function but the speed of an iterator\n\nfrom itertools import count\n\n\ndef generate_primes(stop_at=0):\n primes = []\n for n in count(2):\n if 0 < stop_at < n:\n return # raises stop iteration exception\n composite = False\n for p in primes:\n if not n % p:\n composite = True\n break\n elif p ** 2 > n:\n break\n if not composite:\n primes.append(n)\n yield n\n\n# generate primes up to 100\nfor i in generate_primes():\n if i > 100:\n break\n print(i)\n\n\n","sub_path":"generators/is_prime.py","file_name":"is_prime.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"140360203","text":"from pathlib import Path\n\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nfrom ...api_doc import ApiDoc\nfrom ...helpers import ExampleContainer, HighlightedSource\nfrom ...metadata import get_component_metadata\nfrom .body import cards as cards_body\nfrom .color import cards as cards_color\nfrom .header_footer import card as card_header_footer\nfrom .image import cards as cards_image\nfrom .layout.columns import cards as cards_columns\nfrom .layout.deck import cards as cards_deck\nfrom .layout.group import cards as cards_group\nfrom .list_group import card as card_list_group\nfrom .outline import cards as cards_outline\nfrom .simple import card as card_simple\nfrom .sizing.css import card as card_size_css\nfrom .sizing.grid import cards as cards_size_grid\nfrom .sizing.utility import cards as cards_size_utility\nfrom .ttl import card as card_ttl\n\nHERE = Path(__file__).parent\n\ncard_simple_source = (HERE / \"simple.py\").read_text()\ncards_body_source = (HERE / \"body.py\").read_text()\ncard_ttl_source = (HERE / \"ttl.py\").read_text()\ncards_image_source = (HERE / \"image.py\").read_text()\ncard_list_group_source = (HERE / \"list_group.py\").read_text()\ncard_header_footer_source = (HERE / \"header_footer.py\").read_text()\ncards_size_grid_source = (HERE / \"sizing\" / \"grid.py\").read_text()\ncards_size_utility_source = (HERE / \"sizing\" / \"utility.py\").read_text()\ncard_size_css_source = (HERE / \"sizing\" / \"css.py\").read_text()\ncards_color_source = (HERE / \"color.py\").read_text()\ncards_outline_source = (HERE / \"outline.py\").read_text()\ncards_group_source = (HERE / \"layout\" / \"group.py\").read_text()\ncards_deck_source = (HERE / \"layout\" / \"deck.py\").read_text()\ncards_columns_source = (HERE / \"layout\" / \"columns.py\").read_text()\n\ncontent = [\n html.H2(\"Cards\", className=\"display-4\"),\n html.P(\n dcc.Markdown(\n \"Bootstrap's cards provide a flexible content container with \"\n \"multiple variants and options.\"\n ),\n className=\"lead\",\n ),\n html.H4(\"Simple example\"),\n html.P(\n dcc.Markdown(\n \"Below is an example of a basic card with mixed content and a \"\n \"fixed width, set using the `style` argument. By default, `Card` \"\n \"has no fixed width, so it'll naturally fill the full width of \"\n \"its parent element. This is easily customized with Bootstraps \"\n \"various sizing options detailed below.\"\n )\n ),\n ExampleContainer(card_simple),\n HighlightedSource(card_simple_source),\n html.H4(\"Content types\"),\n html.P(\n \"Cards support a wide variety of content. Here are some of the \"\n \"building blocks you can use when creating your own cards.\"\n ),\n html.H5(\"Body\"),\n html.P(\n dcc.Markdown(\n \"Use the `CardBody` component whenever you need a padded section \"\n \"within a `Card`. If all of the children of the `Card` are in the \"\n \"body, you can instead set `body=True` on the `Card` to simplify \"\n \"your code slightly.\"\n )\n ),\n ExampleContainer(cards_body),\n HighlightedSource(cards_body_source),\n html.H5(\"Titles, text and links\"),\n html.P(\n dcc.Markdown(\n \"Use the `card-title`, `card-subtitle`, and `card-text` classes \"\n \"to add margins and spacing that have been optimized for cards to \"\n \"titles, subtitles and text respectively.\"\n )\n ),\n html.P(\n dcc.Markdown(\n \"The `CardLink` component can be used like \"\n \"`dash_core_components.Link`, as a regular hyperlink, or \"\n \"as a button by attaching a callback to the `n_clicks` prop. \"\n \"`CardLink` will behave like `dcc.Link` by default if a \"\n \"relative path is assigned to `href`, and like a hyperlink if an \"\n \"absolute path is assigned to `href`. This can be overridden \"\n \"using the `external_link` argument. This is useful, for example, \"\n \"when accessing routes on the underlying flask server.\"\n )\n ),\n ExampleContainer(card_ttl),\n HighlightedSource(card_ttl_source),\n html.H5(\"Images\"),\n html.P(\n dcc.Markdown(\n \"Use `CardImg` when adding images to cards. The `top` argument \"\n \"can be used when the image is at the top of the card to remove \"\n \"the border radius from the bottom corners. Similarly the \"\n \"`bottom` argument can be used when the image is at the bottom of \"\n \"the card.\"\n )\n ),\n ExampleContainer(cards_image),\n HighlightedSource(cards_image_source),\n html.H5(\"List groups\"),\n html.P(\n dcc.Markdown(\n \"Create lists of content in a card with a `ListGroup` component \"\n \"by setting `flush=True`.\"\n )\n ),\n ExampleContainer(card_list_group),\n HighlightedSource(card_list_group_source),\n html.H5(\"Header and footer\"),\n html.P(\n dcc.Markdown(\n \"Add optional headers or footers to cards using the `CardHeader` \"\n \"and `CardFooter` components.\"\n )\n ),\n ExampleContainer(card_header_footer),\n HighlightedSource(card_header_footer_source),\n html.H4(\"Sizing\"),\n html.P(\n dcc.Markdown(\n \"As mentioned previously, cards assume no specific width, so they \"\n \"will expand to the width of the parent element unless otherwise \"\n \"specified. You can change this behaviour as needed in one of \"\n \"three different ways.\"\n )\n ),\n html.H5(\"Using grid components\"),\n html.P(\n dcc.Markdown(\n \"Wrap cards in `Row` and `Col` components to control their width \"\n \"and layout. In this example we use the `width` argument of `Col` \"\n \"to make the first card take up a third of the available width, \"\n \"and the second card two thirds. See the \"\n \"[layout documentation](/l/components/layout) for more details on \"\n \"`Row` and `Col`.\"\n )\n ),\n ExampleContainer(cards_size_grid),\n HighlightedSource(cards_size_grid_source),\n html.H5(\"Using Bootstrap utility classes\"),\n html.P(\n dcc.Markdown(\n \"Bootstrap comes with several CSS utility classes built in, \"\n \"including some for sizing. For example, the class `w-50` sets \"\n \"`width:50%`. We can can apply these classes to quickly set the \"\n \"desired width of the cards. See the [Bootstrap documentation]\"\n \"(https://getbootstrap.com/docs/4.3/utilities/sizing/) for more \"\n \"details.\"\n )\n ),\n ExampleContainer(cards_size_utility),\n HighlightedSource(cards_size_utility_source),\n html.H5(\"Using custom CSS\"),\n html.P(\n dcc.Markdown(\n \"Finally, you can use custom CSS to control the size of your \"\n \"cards. In this example we use the `style` argument of `Card` to \"\n \"set inline style arguments. You can also write your own CSS \"\n \"classes that specify `width`, `max-width` etc. and apply them to \"\n \"the card.\"\n )\n ),\n ExampleContainer(card_size_css),\n HighlightedSource(card_size_css_source),\n html.H4(\"Card style\"),\n html.H5(\"Background and color\"),\n html.P(\n dcc.Markdown(\n \"Use the `color` argument of `Card` to set the background and \"\n \"border colors of `Card` to one of Bootstrap's contextual colors. \"\n \"When setting a dark color, use `inverse=True` to invert the text \"\n \"colors for better contrast.\"\n )\n ),\n ExampleContainer(cards_color),\n HighlightedSource(cards_color_source),\n html.H4(\"Outline style\"),\n html.P(\n dcc.Markdown(\n \"Use the argument `outline=True` to remove the block colors from \"\n \"the background and header.\"\n )\n ),\n ExampleContainer(cards_outline),\n HighlightedSource(cards_outline_source),\n html.H4(\"Card layout\"),\n html.P(\n dcc.Markdown(\n \"In addition to styling the contents of cards, Bootstrap includes \"\n \"options for laying out a series of cards.\"\n )\n ),\n html.H5(\"Card group\"),\n html.P(\n dcc.Markdown(\n \"Use the `CardGroup` component to render cards as a single \"\n \"attached element with equal width and height columns.\"\n )\n ),\n ExampleContainer(cards_group),\n HighlightedSource(cards_group_source),\n html.H5(\"Card deck\"),\n html.P(\n dcc.Markdown(\n \"The `CardDeck` component will lay cards out with equal width and \"\n \"height, without attaching them to one another like the \"\n \"`CardGroup` component.\"\n )\n ),\n ExampleContainer(cards_deck),\n HighlightedSource(cards_deck_source),\n html.H5(\"Card columns\"),\n html.P(\n dcc.Markdown(\n \"Cards can be organised into [Masonry]\"\n \"(https://masonry.desandro.com/)-like columns using the \"\n \"`CardColumns` component. Cards are ordered top to bottom and \"\n \"left to right.\"\n )\n ),\n ExampleContainer(cards_columns),\n HighlightedSource(cards_columns_source),\n ApiDoc(\n get_component_metadata(\"src/components/card/CardDeck.js\"),\n component_name=\"CardDeck\",\n ),\n ApiDoc(\n get_component_metadata(\"src/components/card/CardGroup.js\"),\n component_name=\"CardGroup\",\n ),\n ApiDoc(\n get_component_metadata(\"src/components/card/CardColumns.js\"),\n component_name=\"CardColumns\",\n ),\n ApiDoc(\n get_component_metadata(\"src/components/card/Card.js\"),\n component_name=\"Card\",\n ),\n ApiDoc(\n get_component_metadata(\"src/components/card/CardHeader.js\"),\n component_name=\"CardHeader\",\n ),\n ApiDoc(\n get_component_metadata(\"src/components/card/CardBody.js\"),\n component_name=\"CardBody\",\n ),\n ApiDoc(\n get_component_metadata(\"src/components/card/CardFooter.js\"),\n component_name=\"CardFooter\",\n ),\n ApiDoc(\n get_component_metadata(\"src/components/card/CardLink.js\"),\n component_name=\"CardLink\",\n ),\n ApiDoc(\n get_component_metadata(\"src/components/card/CardImg.js\"),\n component_name=\"CardImg\",\n ),\n ApiDoc(\n get_component_metadata(\"src/components/card/CardImgOverlay.js\"),\n component_name=\"CardImgOverlay\",\n ),\n]\n","sub_path":"docs/components_page/components/card/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"527558096","text":"#!/usr/bin/env python3\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nimport dash_table\n\nimport pandas as pd\nimport numpy as np\n\nfrom app import app\n# import test_contract_opportunities\nimport test_contract_manager\nimport test_contract_manager_drilldown\nimport test_contract_manager_bundle\nimport test_contract_manager_drilldown_bundle\n# import test_contract_optimizer\n# import test_contract_optimizer_bundle\n# import test_contract_generator\n# import test_contract_generator_recom\n# import test_contract_generator_bundle\nimport test_contract_manager_bundle\nimport test_contract_manager_drilldown_bundle\n# import test_contract_report_generator\n# import test_contract_report_generator_bundle\nimport contract_overview\nimport aco_contract\nimport bundle_contract\n\n\n\n\ndef launch_layout():\n return html.Div([\n\n \n html.Div(\n [\n html.Img(src=app.get_asset_url(\"logo-demo.png\"),style={\"height\":\"20rem\",\"margin-top\":\"5rem\"}),\n html.H1(u\"ValueGen Solution\",style={\"background-color\":\"transparent\",\"font-size\":\"5rem\"}),\n html.Div([\n html.Div(\n [\n dbc.Row(\n [\n dbc.Col(dbc.Button(\"ENTER\", color=\"dark\", className=\"mr-1\", href = \"/vbc-demo/contract-overview/\", style={\"font-family\":\"NotoSans-Black\", \"font-size\":\"1rem\", \"padding\":\"1rem\",\"border-radius\":\"1rem\",\"border\":\"none\",\"box-shadow\":\"0 4px 8px 0 rgba(0, 0, 0, 0.1), 0 6px 20px 0 rgba(0, 0, 0, 0.1)\"}), style={\"border-radius\":\"1rem\",\"width\":\"5rem\"}),\n # dbc.Col(dbc.Button(\"Contract Administrator\", color=\"light\", className=\"mr-1\", href = \"/vbc-demo/contract-manager/\", style={\"font-family\":\"NotoSans-Regular\", \"font-size\":\"1rem\", \"padding\":\"1rem\", \"padding\":\"1rem\", \"border-radius\":\"1rem\",\"border\":\"1px solid #ececf6\",\"box-shadow\":\"0 4px 8px 0 rgba(0, 0, 0, 0.1), 0 6px 20px 0 rgba(0, 0, 0, 0.1)\"}), style={\"border-radius\":\"1rem\",\"width\":\"5rem\"}),\n # dbc.Col(dbc.Button(\"Tele Case Manager\", color=\"light\", className=\"mr-1\", href = \"/vbc-demo/tele-case-manager/\", style={\"font-family\":\"NotoSans-Regular\", \"font-size\":\"1rem\", \"padding\":\"1rem\", \"border-radius\":\"1rem\",\"border\":\"1px solid #ececf6\",\"box-shadow\":\"0 4px 8px 0 rgba(0, 0, 0, 0.1), 0 6px 20px 0 rgba(0, 0, 0, 0.1)\"}), style={\"border-radius\":\"1rem\",\"width\":\"5rem\"}),\n ],\n style={\"background-color\":\"none\", \"font-family\":\"NotoSans-Regular\", \"font-size\":\"1rem\", \"border\":\"none\",\"padding-top\":\"1rem\",\"padding-bottom\":\"1rem\",\"padding-left\":\"20rem\",\"padding-right\":\"20rem\"}\n )\n ]\n )\n\n ],\n style={\"background-color\":\"transparent\", \"border\":\"none\", \"width\":\"1400px\", \"margin\":\"auto\"}\n ),\n ],\n style={\"background-color\":\"transparent\",\"text-align\":\"center\"}\n ),\n html.Div(\n [\n html.P(\"© 2021 Powered by One Health Link \")\n ],\n style={\"magin\":\"auto\",\"position\":\"fixed\",\"bottom\":\"0\",\"width\":\"100%\",\"text-align\":\"center\", \"font-size\":\"1rem\"}\n ),\n \n ],\n style={\"background-color\":\"#fff\",\"height\":\"100vh\"})#, \"background-image\":\"linear-gradient(to bottom, rgba(105, 132, 214,0), rgba(105, 132, 214,1))\"})\n\n\n# Describe the layout/ UI of the app\napp.layout = html.Div(\n [dcc.Location(id=\"url\", refresh=False), html.Div(id=\"page-content\")]\n)\n\n\n# Update page\n@app.callback(Output(\"page-content\", \"children\"), [Input(\"url\", \"pathname\")])\ndef display_page(pathname):\n if pathname == \"/vbc-demo/contract-manager/\":\n return test_contract_manager.layout\n elif pathname == \"/vbc-demo/contract-manager-drilldown/\":\n return test_contract_manager_drilldown.layout\n elif pathname == \"/vbc-demo/contract-manager-bundle/\":\n return test_contract_manager_bundle.layout\n elif pathname == \"/vbc-demo/contract-manager-drilldown-bundle/\":\n return test_contract_manager_drilldown_bundle.layout\n # elif pathname == \"/vbc-demo/contract-optimizer-opportunities/\":\n # return test_contract_opportunities.layout\n # elif pathname == \"/vbc-demo/contract-optimizer/\":\n # return test_contract_optimizer.layout\n # elif pathname == \"/vbc-demo/contract-optimizer-bundle/\":\n # return test_contract_optimizer_bundle.layout\n # elif pathname == \"/vbc-demo/contract-generator/\":\n # return test_contract_generator.layout\n # elif pathname == \"/vbc-demo/contract-generator-recommend/\":\n # return test_contract_generator_recom.layout\n # elif pathname == \"/vbc-demo/contract-generator-bundle/\":\n # return test_contract_generator_bundle.layout\n elif pathname == \"/vbc-demo/contract-manager-bundle/\":\n return test_contract_manager_bundle.layout\n elif pathname == \"/vbc-demo/contract-manager-drilldown-bundle/\":\n return test_contract_manager_drilldown_bundle.layout\n # elif pathname == \"/vbc-demo/contract-manager/report-generator/\":\n # return test_contract_report_generator.layout\n # elif pathname == \"/vbc-demo/contract-manager-bundle/report-generator/\":\n # return test_contract_report_generator_bundle.layout\n elif pathname == \"/vbc-demo/contract-overview/\":\n return contract_overview.layout\n elif pathname == \"/vbc-demo/contract-optimizer/aco\":\n return aco_contract.layout\n elif pathname == \"/vbc-demo/contract-optimizer/bundle\":\n return bundle_contract.layout\n \n else:\n return launch_layout()\n\n#####################################3\n \n\nif __name__ == \"__main__\":\n\n app.run_server(host='0.0.0.0',port=8099, debug=False)\n\n \n","sub_path":"launch_page.py","file_name":"launch_page.py","file_ext":"py","file_size_in_byte":6373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"16336395","text":"from __future__ import division, print_function\n\n#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nEyeLinkSession.py\n\nCreated by Tomas Knapen on 2011-04-27.\nCopyright (c) 2011 __MyCompanyName__. All rights reserved.\n\"\"\"\n\nimport os, sys, datetime, pickle, re\nimport math\n\nimport numpy as np\nimport scipy as sp\nimport scipy.stats as stats \nimport matplotlib\nmatplotlib.use('Agg') \nimport matplotlib.pyplot as pl\nimport pandas as pd\nimport numpy.linalg as LA\nimport bottleneck as bn\nimport glob\nimport seaborn as sn\nimport scipy.signal as signal\nimport sympy\nimport hedfpy\n\nfrom joblib import Parallel, delayed\nimport itertools\nfrom itertools import chain\n\nimport logging, logging.handlers, logging.config\n\nsys.path.append( os.environ['ANALYSIS_HOME'] )\n\nfrom Tools.log import *\n# from Tools.Operators import ArrayOperator\n# from Tools.other_scripts.plotting_tools import *\nfrom Tools.other_scripts import functions_jw as myfuncs\n# from Tools.other_scripts import savitzky_golay as savitzky_golay\n# from Tools.hedfpy.src import EDFOperator, HDFEyeOperator, EyeSignalOperator, CommandLineOperator\n\nfrom fir import FIRDeconvolution\n\nfrom IPython import embed as shell\n\nclass RLSession(object):\n\t\"\"\"Class object of the BistableMotionSession\"\"\"\n\tdef __init__(self, subject, experiment_name, project_directory, version, aliases, pupil_hp, loggingLevel = logging.DEBUG):\n\t\tself.subject = subject\n\t\tself.experiment_name = experiment_name\n\t\tself.aliases = aliases\n\t\tself.version = version\n\t\tself.pupil_hp = pupil_hp \n\n\t\ttry:\n\t\t\tos.mkdir(os.path.join( project_directory, experiment_name ))\n\t\t\tos.mkdir(os.path.join( project_directory, experiment_name, self.subject.initials ))\n\t\texcept OSError:\n\t\t\tpass\n\t\tself.project_directory = project_directory\n\t\tself.base_directory = os.path.join( self.project_directory, self.experiment_name, self.subject.initials )\n\t\t\n\t\tself.create_folder_hierarchy()\n\t\tself.hdf5_filename = os.path.join(self.base_directory, 'processed', self.subject.initials + '.hdf5')\n\t\t\n\t\tself.ho = hedfpy.HDFEyeOperator(self.hdf5_filename)\n\t\t#self.ho = HDFEyeOperator.HDFEyeOperator(self.hdf5_filename)\n\t\t\n\t\t# add logging for this session\n\t\t# sessions create their own logging file handler\n\t\tself.loggingLevel = loggingLevel\n\t\tself.logger = logging.getLogger( self.__class__.__name__ )\n\t\tself.logger.setLevel(self.loggingLevel)\n\t\taddLoggingHandler( logging.handlers.TimedRotatingFileHandler( os.path.join(self.base_directory, 'log', 'sessionLogFile.log'), when = 'H', delay = 2, backupCount = 10), loggingLevel = self.loggingLevel )\n\t\tloggingLevelSetup()\n\t\tfor handler in logging_handlers:\n\t\t\tself.logger.addHandler(handler)\n\t\tself.logger.info('starting analysis in ' + self.base_directory)\n\t\n\tdef create_folder_hierarchy(self):\n\t\t\"\"\"createFolderHierarchy does... guess what.\"\"\"\n\t\tthis_dir = self.project_directory\n\t\tfor d in [self.experiment_name, self.subject.initials]:\n\t\t\ttry:\n\t\t\t\tthis_dir = os.path.join(this_dir, d)\n\t\t\t\tos.mkdir(this_dir)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\tfor p in ['raw','processed','figs','log']:\n\t\t\ttry:\n\t\t\t\tos.mkdir(os.path.join(self.base_directory, p))\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\n\tdef import_raw_data(self, edf_files, aliases):\n\t\t\"\"\"import_raw_data loops across edf_files and their respective aliases and copies and renames them into the raw directory.\"\"\"\n\t\tfor edf_file, alias in zip(edf_files, aliases):\n\t\t\tself.logger.info('importing file ' + edf_file + ' as ' + alias)\n\t\t\t#ExecCommandLine('cp \"' + edf_file + '\" \"' + os.path.join(self.base_directory, 'raw', alias + '.edf\"') )\n\t\t\t\n\t\t\tCommandLineOperator.ExecCommandLine('cp \"' + edf_file + '\" \"' + os.path.join(self.base_directory, 'raw', alias + '.edf\"') )\n\n\tdef import_msg_data(self, aliases):\n\t\t\"\"\"import_all_data loops across the aliases of the sessions and converts the respective edf files, adds them to the self.ho's hdf5 file. \"\"\"\n\t\tfor alias in aliases:\n\t\t\t\n\t\t\tself.ho.add_edf_file(os.path.join(self.base_directory, 'raw', alias + '.edf'))\n\t\t\tself.ho.edf_message_data_to_hdf(alias = alias)\n\n\tdef import_gaze_data(self, aliases):\n\t\t\"\"\"import_all_data loops across the aliases of the sessions and converts the respective edf files, adds them to the self.ho's hdf5 file. \"\"\"\n\t\tfor alias in aliases:\n\t\n\t\t\tself.ho.add_edf_file(os.path.join(self.base_directory, 'raw', alias + '.edf'))\t\t\t \n\t\t\tself.ho.edf_gaze_data_to_hdf(alias = alias, \n\t\t\t\t\t\t\t\t\t\t pupil_hp = self.pupil_hp, \n\t\t\t\t\t\t\t\t\t\t pupil_lp = 4, \n\t\t\t\t\t\t\t\t\t\t maximal_frequency_filterbank = 0.05, \n\t\t\t\t\t\t\t\t\t\t minimal_frequency_filterbank = 0.002, \n\t\t\t\t\t\t\t\t\t\t nr_freq_bins_filterbank=20, \n\t\t\t\t\t\t\t\t\t\t tf_decomposition_filterbank='lp_butterworth')\t\t\t\n\t\t\n\tdef remove_HDF5(self):\n\t\tos.system('rm ' + os.path.join(self.base_directory, 'processed', self.subject.initials + '.hdf5') )\n\t\t\n\t\n\tdef adjust_eyelink_message_file(self): \n\t\t\"\"\"for this particular experiment, the sound event message string wass not properly written to the message file. Therefore, \n\t\terroneous_message is changed into correct_message \"\"\"\t\n\t\n\t\t#search for positive or negative [-1,1,2] sound events in message string \n\t\terroneous_message = 'MSG\\t([\\d\\.]*)\\ttrial (\\d*) reward ([-+]?\\d*) at : (\\d*.\\d*)'\n\t\tcorrect_message = 'MSG\\t([\\d\\.]*)\\treward ([-+]?\\d*) at (\\d*.\\d*)'\n\n\t\tfor alias in ['RL_train','RL_test']: \n\t\t\t\n\t\t\t#open message file for alias\t\n\t\t\tEDF_message_file = os.path.join(self.base_directory, 'raw', alias + '.msg')\t\t\n\t\t\twith open(EDF_message_file, 'r') as rEDF: \n\t\t\t\tmessage_string = rEDF.read()\n\t\t\t\tshell() \n\t\t\t\t#find all erroneous string values in message \n\t\t\t\terroneous_string_values = re.findall(erroneous_message, message_string) \n\t\t\t\tprint ('there are %i erroneous sound strings in %s BEFORE RE replacement for subject %s'%(len(erroneous_string_values), alias, self.subject.initials))\n\t\t\t\t\n\t\t\t\t#replace all erroneous strings with correct strings, using string values\n\t\t\t\tfor i in range(len(erroneous_string_values)): \n\t\t\t\t\tfill_in_erroneous_message = 'MSG\\t%s\\ttrial %s reward %s at : %s'%(erroneous_string_values[i][0], erroneous_string_values[i][1], erroneous_string_values[i][2], erroneous_string_values[i][3])\n\t\t\t\t\tfill_in_correct_message = 'MSG\\t%s\\treward %s at %s'%(erroneous_string_values[i][0], erroneous_string_values[i][2], erroneous_string_values[i][3])\t\t\t\t\n\t\t\t\t\tmessage_string = message_string.replace(fill_in_erroneous_message, fill_in_correct_message)\n\n\t\t\t\t#check whether there are still erroneous strings after replacement\n\t\t\t\terroneous_string_values = re.findall(re.compile(erroneous_message), message_string) \n\t\t\t\tcorrect_string_values = re.findall(re.compile(correct_message), message_string) \n\t\t\t\tprint ('there are %i INCORRECT RE messages left AFTER string replacement for subject %s'%(len(erroneous_string_values), self.subject.initials))\n\t\t\t\tprint ('there are %i CORRECT RE messages left AFTER string replacement for subject %s'%(len(correct_string_values), self.subject.initials))\n\t\t\t\trEDF.close()\n\t\t\t\n\t\t\t#open message file again to write the corrected message_string to \n\t\t\twith open(EDF_message_file, 'w') as wEDF: \n\t\t\t\twEDF.write(message_string)\n\t\t\t\twEDF.close() \n\n\n\tdef prepocessing_report(self, eye = 'L', downsample_rate=10):\n\t\t\n\t\tfor alias in self.aliases:\n\t\t\t# load times per session:\n\t\t\ttrial_times = self.ho.read_session_data(alias, 'trials')\n\t\t\ttrial_phase_times = self.ho.read_session_data(alias, 'trial_phases')\n\t\t\tsession_start_EL_time = np.array(trial_times['trial_start_EL_timestamp'])[0]\n\t\t\tsession_stop_EL_time = np.array(trial_times['trial_end_EL_timestamp'])[-1]\n\n\t\t\tsample_rate = self.ho.sample_rate_during_period([session_start_EL_time, session_stop_EL_time], alias)\n\n\t\t\tpupil_raw = np.squeeze(self.ho.signal_during_period(time_period = [session_start_EL_time, session_stop_EL_time], alias = alias, signal = 'pupil', requested_eye = eye))\n\t\t\tpupil_int = np.squeeze(self.ho.signal_during_period(time_period = [session_start_EL_time, session_stop_EL_time], alias = alias, signal = 'pupil_int', requested_eye = eye))\n\n\t\t\tpupil_bp = np.squeeze(self.ho.signal_during_period(time_period = [session_start_EL_time, session_stop_EL_time], alias = alias, signal = 'pupil_bp', requested_eye = eye))\n\t\t\tpupil_lp = np.squeeze(self.ho.signal_during_period(time_period = [session_start_EL_time, session_stop_EL_time], alias = alias, signal = 'pupil_lp', requested_eye = eye))\n\t\t\tpupil_hp = np.squeeze(self.ho.signal_during_period(time_period = [session_start_EL_time, session_stop_EL_time], alias = alias, signal = 'pupil_hp', requested_eye = eye))\n\t\t\tpupil_baseline = np.squeeze(self.ho.signal_during_period(time_period = [session_start_EL_time, session_stop_EL_time], alias = alias, signal = 'pupil_baseline', requested_eye = eye))\n\t\t\n\t\t\tpupil_bp_clean = np.squeeze(self.ho.signal_during_period(time_period = [session_start_EL_time, session_stop_EL_time], alias = alias, signal = 'pupil_bp_clean', requested_eye = eye))\n\t\t\tpupil_lp_clean = np.squeeze(self.ho.signal_during_period(time_period = [session_start_EL_time, session_stop_EL_time], alias = alias, signal = 'pupil_lp_clean', requested_eye = eye))\n\n\t\t\tx = sp.signal.decimate(np.arange(len(pupil_raw)) / float(sample_rate), downsample_rate, 1)\n\t\t\tpup_raw_dec = sp.signal.decimate(pupil_raw, downsample_rate, 1)\n\t\t\tpup_int_dec = sp.signal.decimate(pupil_int, downsample_rate, 1)\n\n\t\t\tpupil_bp_dec = sp.signal.decimate(pupil_bp, downsample_rate, 1)\n\t\t\tpupil_lp_dec = sp.signal.decimate(pupil_lp, downsample_rate, 1)\n\t\t\tpupil_hp_dec = sp.signal.decimate(pupil_hp, downsample_rate, 1)\n\t\t\tpupil_baseline_dec = sp.signal.decimate(pupil_baseline, downsample_rate, 1)\n\n\t\t\tpupil_bp_clean_dec = sp.signal.decimate(pupil_bp_clean, downsample_rate, 1)\n\t\t\tpupil_bp_clean_dec_z = (pupil_bp_clean_dec - np.mean(pupil_bp_clean_dec))/np.std(pupil_bp_clean_dec)\n\t\t\tpupil_lp_clean_dec = sp.signal.decimate(pupil_lp_clean, downsample_rate, 1)\n\t\t\tpupil_lp_clean_dec_z = (pupil_lp_clean_dec - np.mean(pupil_lp_clean_dec))/np.std(pupil_lp_clean_dec)\n\n\t\t\tymin = pupil_raw.min(); ymax = pupil_raw.max()\n\n\t\t\t#plot interpolated pupil:\n\t\t\tsn.set(style='ticks', font_scale=0.8)\n\t\t\tfig = pl.figure(figsize = (26,10))\n\t\t\ts = fig.add_subplot(411)\n\t\t\ts.set_title(self.subject.initials + ' standard IRF (blinks, saccades and interpolated x y and intercept fitted)', fontsize=12)\n\t\t\tpl.plot(x, pup_raw_dec, 'b'); pl.plot(x, pup_int_dec, 'g')\n\t\t\tpl.ylabel('pupil size'); pl.xlabel('time (s)')\n\t\t\tpl.legend(['raw pupil', 'blink interpolated pupil'])\n\n\t\t\ttps = (list(trial_phase_times[trial_phase_times['trial_phase_index'] == 2]['trial_phase_EL_timestamp']) - session_start_EL_time, list(trial_phase_times[trial_phase_times['trial_phase_index'] == 3]['trial_phase_EL_timestamp']) - session_start_EL_time)\n\t\t\tfor i in range(tps[0].shape[0]):\n\t\t\t\tpl.axvline(x = tps[0][i] / float(sample_rate), ymin = ymin, ymax = ymax, color = 'r')\n\t\t\t\tpl.axvline(x = tps[1][i] / float(sample_rate), ymin = ymin, ymax = ymax, color = 'k')\n\t\t\ts.set_ylim(ymin = pup_int_dec.min()-100, ymax = pup_int_dec.max()+100)\n\t\t\ts.set_xlim(xmin = tps[0][0] / float(sample_rate), xmax = tps[1][-1] / float(sample_rate))\n\n\t\t\ts = fig.add_subplot(412)\n\t\t\tpl.plot(x, pupil_bp_dec, 'b'); pl.plot(x, pupil_lp_dec, 'g');\n\t\t\tpl.ylabel('pupil size'); pl.xlabel('time (s)')\n\t\t\tpl.legend(['band_passed', 'lowpass'])\n\t\t\t\n\t\t\ttps = (list(trial_phase_times[trial_phase_times['trial_phase_index'] == 2]['trial_phase_EL_timestamp']) - session_start_EL_time, list(trial_phase_times[trial_phase_times['trial_phase_index'] == 3]['trial_phase_EL_timestamp']) - session_start_EL_time)\n\t\t\tfor i in range(tps[0].shape[0]):\n\t\t\t\tpl.axvline(x = tps[0][i] / float(sample_rate), ymin = ymin, ymax = ymax, color = 'r')\n\t\t\t\tpl.axvline(x = tps[1][i] / float(sample_rate), ymin = ymin, ymax = ymax, color = 'k')\n\t\t\ts.set_xlim(xmin = tps[0][0] / float(sample_rate), xmax = tps[1][-1] / float(sample_rate))\n\n\t\t\ts = fig.add_subplot(413)\n\t\t\tpl.plot(x, pupil_bp_dec, 'b'); pl.plot(x, pupil_hp_dec, 'b');\n\t\t\tpl.ylabel('pupil size'); pl.xlabel('time (s)')\n\t\t\tpl.legend(['band_passed', 'highpass'])\n\n\t\t\ttps = (list(trial_phase_times[trial_phase_times['trial_phase_index'] == 2]['trial_phase_EL_timestamp']) - session_start_EL_time, list(trial_phase_times[trial_phase_times['trial_phase_index'] == 3]['trial_phase_EL_timestamp']) - session_start_EL_time)\n\t\t\tfor i in range(tps[0].shape[0]):\n\t\t\t\tpl.axvline(x = tps[0][i] / float(sample_rate), ymin = ymin, ymax = ymax, color = 'r')\n\t\t\t\tpl.axvline(x = tps[1][i] / float(sample_rate), ymin = ymin, ymax = ymax, color = 'k')\n\t\t\ts.set_xlim(xmin = tps[0][0] / float(sample_rate), xmax = tps[1][-1] / float(sample_rate))\n\t\t\t\n\t\t\ts = fig.add_subplot(414)\n\t\t\tpl.plot(x, pupil_bp_clean_dec_z, 'b'); pl.plot(x, pupil_lp_clean_dec_z, 'g');\n\t\t\tpl.ylabel('pupil size'); pl.xlabel('time (s)')\n\t\t\tpl.legend(['cleaned band pass', 'cleaned low pass'], fontsize=12)\n\n\t\t\ttps = (list(trial_phase_times[trial_phase_times['trial_phase_index'] == 2]['trial_phase_EL_timestamp']) - session_start_EL_time, list(trial_phase_times[trial_phase_times['trial_phase_index'] == 3]['trial_phase_EL_timestamp']) - session_start_EL_time)\n\t\t\tfor i in range(tps[0].shape[0]):\n\t\t\t\tpl.axvline(x = tps[0][i] / float(sample_rate), ymin = ymin, ymax = ymax, color = 'r')\n\t\t\t\tpl.axvline(x = tps[1][i] / float(sample_rate), ymin = ymin, ymax = ymax, color = 'k')\n\t\t\ts.set_xlim(xmin = tps[0][0] / float(sample_rate), xmax = tps[1][-1] / float(sample_rate))\n\t\t\ts.set_ylim([-4,4])\n\t\t\tpl.tight_layout()\n\t\t\t \n\t\t\tpl.savefig(os.path.join(self.base_directory, 'figs', alias + '.pdf'))\n\t\t\t\n\t\t\tpl.close() \n\n\tdef calculate_blink_rate(self, data_type='pupil_bp'): \n\t\t\"\"\"blink rate info \"\"\"\n\n\t\tfor alias in ['RL_base']: \n\t\t\t \n\t\t\trun_timings = self.ho.read_session_data(alias, 'trials')\n\t\t\tsession_start_EL_time = int(run_timings['trial_start_EL_timestamp'])\n\t\t\tsession_stop_EL_time = int(run_timings['trial_end_EL_timestamp'])\n\t\t\tself.sample_rate = self.ho.sample_rate_during_period([session_start_EL_time, session_stop_EL_time], alias)\n\t\t\trun_duration = (session_stop_EL_time - session_start_EL_time)/self.sample_rate/60 #time in minutes \n\t\t\teyelink_blink_data = self.ho.read_session_data(alias, 'blinks_from_message_file')\n\n\t\t\teyeblinks_during_session = (np.array(eyelink_blink_data['start_timestamp']) > session_start_EL_time ) & (np.array(eyelink_blink_data['end_timestamp']) < session_stop_EL_time)\n\t\t\tblink_rate = np.sum(eyeblinks_during_session) / run_duration\n\n\t\t\twith pd.get_store(self.ho.input_object) as h5_file: \n\t\t\t\th5_file.put(\"/%s\"%('blinks/blink_rate'), pd.Series(blink_rate))\n\n\t\t\tself.logger.info('saved blink rate: %.3f for subject %s '%(blink_rate, self.subject.initials))\n\n\n\t\t\t# eyelink_blink_data['session_start_EL_time'] = pd.Series(session_start_EL_time, index=eyelink_blink_data.index)\n\t\t\t# eyelink_blink_data['session_stop_EL_time'] = pd.Series(session_stop_EL_time, index=eyelink_blink_data.index)\n\t\t\t# blink_file_name = os.path.join(self.base_directory, 'processed', self.subject.initials + '_blink_data.csv')\n\t\t\t# eyelink_blink_data.to_csv(blink_file_name, header=True, index=True, index_label=self.subject.initials)\n\n\n\n\tdef events_and_signals_in_time_train(self, data_type = 'pupil_bp', requested_eye = 'L'):\t\t\t\t\n\t\t\"\"\"Hier komt de beschrijving van je functie: wat doet ie en met welke data??? \"\"\"\n\t\t\n\t\tpupil_data = []\n\t\tsound_times=[]\n\t\tblink_times=[]\n\t\tsaccade_times=[]\n\t\t\n\t\tsession_time=0\n\t\t\n\t\tfor alias in ['RL_train']:\n\n\t\t\t#concatenate pupil data runs within train session \n\t\t\tfirst_trial_in_run = [0,60,120,180,240,300]\n\t\t\trun_duration = 60\n\t\t\tfor idx, ftir in enumerate(first_trial_in_run):\n\t\t\t\t\n\t\t\t\t# load timing info per session:\n\t\t\t\ttrial_times = self.ho.read_session_data(alias, 'trials')\n\t\t\t\tbehavioural_data = self.ho.read_session_data(alias, 'parameters')\n\t\t\t\tsound_events_train = self.ho.read_session_data(alias, 'sounds')\n\t\t\t\tblink_events_train = self.ho.read_session_data(alias, 'blinks_from_message_file')\t\t\t\t\n\t\t\t\tsaccade_events_train = self.ho.read_session_data(alias, 'saccades_from_message_file')\n\n\t\t\t\tsession_start_EL_time = int(np.array(trial_times['trial_start_EL_timestamp'])[ftir])\n\t\t\t\tsession_stop_EL_time = int(np.array(trial_times['trial_end_EL_timestamp'])[ftir+run_duration-1])\n\t\t\t\teye_during_period = self.ho.eye_during_period([session_start_EL_time, session_stop_EL_time], alias)\t\t\t\t\t\n\t\t\t\t\n\t\t\t\tpupil = np.squeeze(self.ho.signal_during_period(time_period = [session_start_EL_time, session_stop_EL_time], alias = alias, signal = data_type, requested_eye = eye_during_period))\n\t\t\t\tpupil_data.append((pupil - np.mean(pupil))/ pupil.std()) #z-score\n\t\t\t\tself.sample_rate = self.ho.sample_rate_during_period([session_start_EL_time, session_stop_EL_time], alias)\n\t\t\t\t\n\t\t\t\t#concatenate event timings for each pupil block \n\t\t\t\tsound_times.append(np.array(sound_events_train['EL_timestamp'][ftir:ftir+run_duration] - session_start_EL_time + session_time)/self.sample_rate)\n\t\t\t\t\n\t\t\t\t# select blinks and saccades the fall within a single recording block. \n\t\t\t\tall_blink_times = np.array(blink_events_train['start_timestamp'])\t\n\t\t\t\tblinks_this_run = (all_blink_times < session_stop_EL_time) & (all_blink_times > session_start_EL_time)\n\t\t\t\tblink_times.append(np.array(all_blink_times[blinks_this_run] - session_start_EL_time + session_time)/self.sample_rate)\n\t\t\t\t\n\t\t\t\tall_saccade_times = np.array(saccade_events_train['start_timestamp'])\t\n\t\t\t\tsaccades_this_run = (all_saccade_times < session_stop_EL_time) & (all_saccade_times > session_start_EL_time)\n\t\t\t\tsaccade_times.append(np.array(all_saccade_times[saccades_this_run] - session_start_EL_time + session_time)/self.sample_rate)\n\t\t\t\t \n\t\t\t\tsession_time += session_stop_EL_time - session_start_EL_time\n\n\t\t\t\t\t\t\t\t\n\t\t\tself.sound_times_train = np.concatenate(sound_times)\n\t\t\tself.blink_times_train = np.concatenate(blink_times)\n\t\t\tself.saccade_times_train = np.concatenate(saccade_times)\n\t\t\tself.pupil_data_train = np.concatenate(pupil_data)\n\t\t\t\n\t\t\t#calculate accumulated points per choice-pair\n\t\t\tcurrent_stim = np.array(behavioural_data['current_stim'].astype(int))\n\t\t\tAB = np.array([(c_s == 12) or (c_s == 21) for c_s in current_stim])\n\t\t\tCD = np.array([(c_s == 34) or (c_s == 43) for c_s in current_stim])\n\t\t\tEF = np.array([(c_s == 56) or (c_s == 65) for c_s in current_stim])\n\t\t\tcurrent_reward = np.array(behavioural_data['current_reward'])\n\n\t\t\t#parameter indices \n\t\t\tself.key_indices_train = np.array([(behavioural_data['key_response'] == i) for i in [-1,1]]) #left, right\n\t\t\tself.optimal_response_indices_train = np.array([(behavioural_data['optimal_response_code'] == i) for i in [-1, 1]]) #left, right\n\t\t\tself.correct_response_indices_train = np.array([(behavioural_data['correct_response_code'] == i) for i in [-1, 1]]) #left, right\n\t\t\tself.correct_indices_train = np.array([(behavioural_data['correct'] == i) for i in [0,1]]) #incorrect, correct \n\t\t\t\n\t\t\tself.NRPE_indices = ((self.key_indices_train * self.optimal_response_indices_train) * self.correct_indices_train[0]).sum(axis=0, dtype=bool)\n\t\t\tself.NRPE_indices_AB = self.NRPE_indices * AB \n\t\t\tself.NRPE_indices_CD = self.NRPE_indices * CD\n\t\t\tself.NRPE_indices_EF = self.NRPE_indices * EF \n\t\t\thalf_of_all_trials = int(len(self.NRPE_indices)/2)\n\t\n\t\t\t#calculate percentage correct per choice-pair \n\t\t\tpair_mean_correct = np.array([np.mean((self.correct_indices_train).astype(int)[1][c_p]) for c_p in [AB, CD, EF]])\n\t\t\tpair_se_correct = np.array([np.std((self.correct_indices_train).astype(int)[1][c_p]/np.sqrt(np.sum(AB))) for c_p in [AB, CD, EF]])\n\n\t\t\t#reaction time distributions \n\t\t\tRT = np.array(behavioural_data['RT'])\n\t\t\t\n\t\t\tprint ('saving behavioral data of %s'%alias)\n\t\t\tbehavioral_file_name = os.path.join(self.base_directory, 'processed', self.subject.initials + '_behavioural_data.csv')\n\t\t\tbehavioural_data.to_csv(behavioral_file_name, header=True, index=True, index_label=self.subject.initials, float_format='%.1f')\n\t\t\t\t\t\t\n\t\t\t#plot accumulated points across trials\n\t\t\tsn.set(font_scale=1, style=\"ticks\")\t\n\t\t\tf = pl.figure(figsize=(10,5)) \n\t\t\ts = f.add_subplot(131)\t\t\t\t\n\t\t\tpl.plot(np.cumsum(current_reward[AB]), 'b', label='AB') \n\t\t\tpl.plot(np.cumsum(current_reward[CD]), 'g', label='CD' )\n\t\t\tpl.plot(np.cumsum(current_reward[EF]), 'r', label='EF')\n\t\t\tpl.plot(np.linspace(0,9.6, 120), 'b--', alpha=0.3, label = 'perfect AB' )\n\t\t\tpl.plot(np.linspace(0,8.4, 120), 'g--', alpha=0.3, label = 'perfect CD' )\n\t\t\tpl.plot(np.linspace(0,7.2, 120), 'r--', alpha=0.3, label = 'perfect EF')\n\t\t\tpl.plot(np.linspace(0,6, 120), 'k:', alpha=0.8, label = 'chance level' )\n\t\t\tlegend = s.legend(loc='best', frameon=True)\n\t\t\tpl.xlabel('Trials')\n\t\t\tpl.ylabel('Accumulated points')\n\t\t\tsn.despine(offset=10)\n\t\t\tpl.tight_layout()\n\n\t\t\ts = f.add_subplot(132)\n\t\t\tpl.bar([0,1,2], pair_mean_correct)\n\t\t\ts.set_xticks([0.5, 1.5, 2.5])\n\t\t\ts.set_xticklabels(['AB', 'CD', 'EF'], rotation=45) \n\t\t\tpl.ylabel('percentage correct') \n\t\t\tpl.tight_layout()\n\t\t\tsn.despine(offset=10)\n\t\t\tpl.title('Subject %s'%self.subject.initials)\n\n\t\t\ts = f.add_subplot(133)\n\t\t\tfor c_p in [AB,CD,EF]: \n\t\t\t\tpl.hist(RT[c_p], alpha=0.5)\n\t\t\tpl.legend(['AB RT', 'CD RT', 'EF RT'])\n\t\t\tpl.tight_layout()\n\t\t\tsn.despine(offset=10)\n\t\t\tpl.xlabel('reaction time')\n\t\t\tpl.ylabel('count')\n\t\t\tpl.savefig(os.path.join(self.base_directory, 'figs', '%s_RL_train_performance.pdf'%self.subject.initials))\n\n\t\t\twith pd.get_store(self.ho.input_object) as h5_file: \n\t\t\t\th5_file.put(\"/%s\"%('train_behaviour/RT'), pd.DataFrame(np.squeeze(np.array(RT)))) \n\t\t\t\th5_file.put(\"/%s\"%('train_behaviour/correct'), pd.DataFrame(np.squeeze(np.array(self.correct_indices_train[1])))) \n\t\t\t\th5_file.put(\"/%s\"%('train_behaviour/current_reward_AB'), pd.DataFrame(np.squeeze(np.array(current_reward[AB])))) \n\t\t\t\th5_file.put(\"/%s\"%('train_behaviour/current_reward_CD'), pd.DataFrame(np.squeeze(np.array(current_reward[CD])))) \n\t\t\t\th5_file.put(\"/%s\"%('train_behaviour/current_reward_EF'), pd.DataFrame(np.squeeze(np.array(current_reward[EF])))) \n\t\t\t\th5_file.put(\"/%s\"%('train_behaviour/current_stim'), pd.DataFrame(np.squeeze(np.array(current_stim)))) \n\t\t\t\th5_file.put(\"/%s\"%('train_behaviour/mean_percentage_correct'), pd.Series(np.squeeze(np.array(pair_mean_correct)))) \n\t\t\t\th5_file.put(\"/%s\"%('train_behaviour/se_percentage_correct'), pd.Series(np.squeeze(np.array(pair_se_correct))))\n\n\tdef deconvolve_blinks_saccades_and_sounds(self,analysis_sample_rate=10, \n\t\t\t\t\t\t\tinterval =[-0.5, 4.0], \n\t\t\t\t\t\t\tdata_type='pupil_bp_zscore'): \n\t\t\"\"\"first, we deconvolve standard events from the pupil signal \"\"\"\n\n\t\tself.events_and_signals_in_time_train(data_type = data_type)\n\t\tsubsample_ratio = int(self.sample_rate/analysis_sample_rate)\n\t\tinput_signal = self.pupil_data_train[::subsample_ratio]\n\t\t\n\t\tself.logger.info('starting deconvolution for subject %s of data_type %s in interval %s '%(self.subject.initials, data_type, str(interval)))\n\t\t##add regressors \n\t\tblink_times = [self.blink_times_train]\t\t\t\t\t\t\n\t\tsaccade_times = [self.saccade_times_train] \t\n\t\tsound_times = [self.sound_times_train]\n\n\t\tevents=[]\t\t\t\t\t\t\t \t#regressor events: \n\t\tevents.extend(blink_times) \t\t\t \t#[0]\n\t\tevents.extend(saccade_times)\t\t \t#[1]\n\t\tevents.extend(sound_times)\t\t \t\t#[2]\n\t\n\t\tcovariates = {\n\t\t\t'blinks.gain': np.ones(len(events[0])), \n\t\t\t'saccades.gain': np.ones(len(events[1])),\n\t\t\t'sounds.gain': np.ones(len(events[2])),\n\t\t\t}\n\n\t\tfd = FIRDeconvolution(\n\t\t\tsignal = input_signal, \n\t\t\tevents = events,\n\t\t\tevent_names = ['blinks','saccades', 'sounds'], \n\t\t\tsample_frequency = analysis_sample_rate, \n\t\t\tdeconvolution_interval = interval, \n\t\t\tdeconvolution_frequency = analysis_sample_rate,\n\t\t\tcovariates = covariates,\n\t\t\t) \n\n\t\tfd.create_design_matrix()\n\t\tplot_time = 5000\n\t\tf = pl.figure()\n\t\ts = f.add_subplot(111)\t\t\n\t\ts.set_title('design matrix (%i Hz)'%analysis_sample_rate)\n\t\tpl.imshow(fd.design_matrix[:,:plot_time], aspect = 0.075 * plot_time/fd.deconvolution_interval_size, interpolation = 'nearest', rasterized = True)\n\t\tsn.despine(offset=10)\n\t\tpl.savefig(os.path.join(self.base_directory, 'figs', '%s_RL_train_designmatrix.pdf'%self.subject.initials))\n\n\t\tfd.regress() \n\t\tfd.calculate_rsq()\n\t\tprint('r_squared for subject %s is: %.3f'%( self.subject.initials, fd.rsq))\n\n\t\tbetas = pd.DataFrame(np.zeros((fd.deconvolution_interval_size, len(fd.covariates))), columns=fd.covariates.keys())\n\t\tfor i,b in enumerate(fd.covariates.keys()): \n\t\t\tbetas[b] = np.squeeze(fd.betas_for_cov(covariate=b))\n\n\t\t#save deconvolution data in hdf5\n\t\tfolder_name = 'deconvolved_sound_blinks_saccades'\n\t\twith pd.get_store(self.ho.input_object) as h5_file:\n\t\t\th5_file.put(\"/%s/%s\"%(folder_name, 'time_points'), pd.Series(fd.deconvolution_interval_timepoints))\n\t\t\th5_file.put(\"/%s/%s\"%(folder_name, 'deconvolved_pupil_timecourses'), betas)\n\t\t\th5_file.put(\"/%s/%s\"%(folder_name, 'rsquared'), pd.Series(fd.rsq))\n\t\t\th5_file.put(\"/%s/%s\"%(folder_name, 'keys'), pd.DataFrame(fd.covariates.keys()))\n\t\t\th5_file.put(\"/%s/%s\"%(folder_name, 'residuals'), pd.Series(np.squeeze(fd.residuals)))\n\t\tself.logger.info('Saved deconvolution results for participant %s in folder %s' %(self.subject.initials, folder_name))\n\n\tdef deconvolve_negative_prediction_errors(self, analysis_sample_rate=10, interval = [-0.5,4.0], data_type = 'pupil_bp_zscore'): \n\t\t\"\"\" \"\"\"\n\n\t\tself.events_and_signals_in_time_train(data_type=data_type)\n\t\t\n\t\tfolder_name = 'deconvolved_sound_blinks_saccades'\n\t\twith pd.get_store(self.ho.input_object) as h5_file:\n\t\t\tresiduals = h5_file.get(\"/%s/%s\"%(folder_name, 'residuals'))\n\t\t \n\t\tNRPE_AB_events = [self.sound_times_train[self.NRPE_indices_AB]]\n\t\tNRPE_CD_events = [self.sound_times_train[self.NRPE_indices_CD]]\n\t\tNRPE_EF_events = [self.sound_times_train[self.NRPE_indices_EF]]\n\n\t\tevents=[]\t\t\t\t\t\t\t \t#regressor events: \n\t\tevents.extend(NRPE_AB_events) \t\t\t#[0]\n\t\tevents.extend(NRPE_CD_events)\t\t \t#[1]\n\t\tevents.extend(NRPE_EF_events)\t\t #[2]\n\n\t\tcovariates = {\n\t\t\t'NRPE_AB.gain': np.ones(len(events[0])), \n\t\t\t'NRPE_CD.gain': np.ones(len(events[1])),\n\t\t\t'NRPE_EF.gain': np.ones(len(events[2])),\n\t\t\t}\n\n\t\tfd = FIRDeconvolution(\n\t\t\tsignal = residuals, \n\t\t\tevents = events,\n\t\t\tevent_names = ['NRPE_AB','NRPE_CD', 'NRPE_EF'], \n\t\t\tsample_frequency = analysis_sample_rate, \n\t\t\tdeconvolution_interval = interval, \n\t\t\tdeconvolution_frequency = analysis_sample_rate,\n\t\t\tcovariates = covariates,\n\t\t\t) \n\n\t\tfd.create_design_matrix()\n\t\tplot_time = 5000\n\t\tf = pl.figure()\n\t\ts = f.add_subplot(111)\t\t\n\t\ts.set_title('design matrix (%i Hz)'%analysis_sample_rate)\n\t\tpl.imshow(fd.design_matrix[:,:plot_time], aspect = 0.075 * plot_time/fd.deconvolution_interval_size, interpolation = 'nearest', rasterized = True)\n\t\tsn.despine(offset=10)\n\n\t\tfd.regress() \n\t\tfd.calculate_rsq()\n\t\tprint('r_squared for subject %s is: %.3f'%( self.subject.initials, fd.rsq))\n\n\t\tbetas = pd.DataFrame(np.zeros((fd.deconvolution_interval_size, len(fd.covariates))), columns=fd.covariates.keys())\n\t\tfor i,b in enumerate(fd.covariates.keys()): \n\t\t\tbetas[b] = np.squeeze(fd.betas_for_cov(covariate=b))\n\n\t\t#save deconvolution data in hdf5\n\t\tfolder_name = 'deconvolved_negative_prediction_errors'\n\t\twith pd.get_store(self.ho.input_object) as h5_file:\n\t\t\th5_file.put(\"/%s/%s\"%(folder_name, 'time_points'), pd.Series(fd.deconvolution_interval_timepoints))\n\t\t\th5_file.put(\"/%s/%s\"%(folder_name, 'deconvolved_pupil_timecourses'), betas)\n\t\t\th5_file.put(\"/%s/%s\"%(folder_name, 'rsquared'), pd.Series(fd.rsq))\n\t\t\th5_file.put(\"/%s/%s\"%(folder_name, 'residuals'), pd.Series(np.squeeze(fd.residuals)))\n\t\tself.logger.info('Saved NRPE deconvolution results for participant %s in folder %s' %(self.subject.initials, folder_name))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"RLSession.py","file_name":"RLSession.py","file_ext":"py","file_size_in_byte":27291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"588748731","text":"import euler\n\n\nANSWER = 5943040885644\nLIMIT = 10 ** 10\nPRIME_LIST = euler.prime_list(int(LIMIT ** 0.5))\nSQUARES = [pow(n, 2) for n in PRIME_LIST]\nSQUARES_MINUS_PRIMES = [\n square - prime for square, prime in zip(SQUARES, PRIME_LIST)\n]\nSLOW = True\n\n\ndef solve(lst, n, fi_n2):\n result = 0\n items = [(lst, n, fi_n2, 0)]\n length = len(lst)\n while items:\n lst, n, fi_n2, index = items.pop()\n if euler.is_cube(fi_n2):\n result += n\n for i in range(index, length):\n new_n = n * PRIME_LIST[i]\n if new_n >= LIMIT:\n continue\n new_lst, new_fi_n2 = lst.copy(), fi_n2\n new_lst[i] += 1\n if new_lst[i] == 1:\n new_fi_n2 *= SQUARES_MINUS_PRIMES[i]\n else:\n new_fi_n2 *= SQUARES[i]\n items.append((new_lst, new_n, new_fi_n2, i))\n return result\n\n\ndef main():\n total = 0\n for i, prime in enumerate(PRIME_LIST):\n lst = [0 for _ in range(i + 1)]\n lst[-1] = 2\n n = prime ** 2\n fi_n2 = prime ** 3 * (prime - 1)\n total += solve(lst, n, fi_n2)\n return total\n\n\nif __name__ == '__main__':\n print(main())\n","sub_path":"python/problem342.py","file_name":"problem342.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"229339761","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport windowmgr as wm\n\n\ndef findbestlayout(num):\n tmp = int(np.ceil(np.sqrt(num)*2))\n if tmp%2==1:\n return (int((tmp+1)/2), int((tmp-1)/2))\n else:\n return (int(tmp/2), int(tmp/2))\n\nclass BidafFramework():\n def __init__(self, datasource, models, visualizers, settings):\n (h, v) = findbestlayout(len(visualizers))\n self.mgr = wm.WindowMgr(\"BIDAF\", 1400, 1000, h, v, 50, 'vertical')\n self.fig = self.mgr.get_figure()\n self.data_repo = datasource\n self.data_source = datasource\n self.models = []\n self.versions = {}\n self.visualizers = []\n self.callbacks = {}\n self.timer = self.fig.canvas.new_timer(interval = 30)\n self.timer.single_shot = True\n self.timer.add_callback(self.inner_loop)\n self.stopped = True\n self.mgr.install_key_action(\"enter\", self.toggle_run)\n\n for modcl in models:\n mod = modcl()\n mod.set_params(settings)\n mdict = mod.extract_model() \n self.versions[mdict['type']] = mdict['version']\n self.models.append(mod)\n\n hack=0\n d=0.25\n for viscl in visualizers:\n rect = self.mgr.get_next_rect()\n if hack==0:\n rect = (rect[0], rect[1]+d, rect[2], rect[3]-d)\n hack+=1\n elif hack==1:\n rect = (rect[0], rect[1], rect[2], rect[3]+d)\n hack+=1\n vis = viscl(self.fig, rect, self.data_repo, self.callbacks)\n vis.set_params(settings)\n self.mgr.register_target(rect, vis)\n self.visualizers.append(vis)\n \n\n def handle_message(self, dict_msg):\n # 0 - init original features names\n # self.data_repo.set_original_feature_names([name for name in dict_msg.keys() if name not in ['time','entity']])\n # 1 - send data to models, collecting the resulting features\n resultlist = [m.handle_data(dict_msg) for m in self.models]\n # 2 - add all results to dictmsg and add it to repo\n for res in resultlist:\n if res is not None:\n dict_msg.update(res)\n # self.data_repo.add(dict_msg)\n # 3 - update old messages and/or generate new model\n newmodels = []\n features_changed = False\n for m in self.models:\n # get new models\n if m.model_version() > self.versions[m.model_type()]:\n newmodels.append(m.extract_model())\n self.versions[m.model_type()] = m.model_version()\n # update new features\n if m.features_changed():\n self.data_repo.update(m.update_features())\n # We need to redraw visualizations\n features_changed = True\n # 4 - Visualize current message\n for vis in self.visualizers:\n vis.handle_data(dict_msg)\n # 5 - visualize models\n for nmod in newmodels:\n for vis in self.visualizers:\n vis.redraw_model(nmod)\n # 6 - visualize updated features (only if a module changed old features)\n features_changed = True ## Checking interaction\n if features_changed:\n for vis in self.visualizers:\n vis.redraw_features()\n # 7 - Now we draw for every message. And clean up.\n # self.data_repo.cleanup()\n #self.fig.canvas.draw()\n #self.fig.canvas.flush_events()\n # Done processing modules\n\n def handle_batch_data(self, dictdata):\n # 0 - init original features names\n # if len(dictdata) > 0:\n # self.data_repo.set_original_feature_names([name for name in dictdata[0].keys() if name not in ['time','entity']])\n # 1 - send data to models, collecting the resulting features\n resultlist = [m.handle_batch_data(dictdata) for m in self.models]\n # 2 - add all results to dictmsg and add it to repo\n # for dict_msg in dictdata:\n # self.data_repo.add(dict_msg)\n for res in resultlist:\n if res is not None:\n self.data_repo.update(res)\n # 3 - generate new model\n newmodels = []\n for m in self.models:\n # get new models\n if m.model_version() > self.versions[m.model_type()]:\n newmodels.append(m.extract_model())\n self.versions[m.model_type()] = m.model_version()\n # 4 - Don't visualize current messages now, redraw_features below\n # 5 - visualize models\n for nmod in newmodels:\n for vis in self.visualizers:\n vis.redraw_model(nmod)\n # 6 - visualize updated features (only if a module changed old features)\n for vis in self.visualizers:\n vis.redraw_features()\n # 7 - Now we draw for every message. And clean up. Batch is unlimited.\n #self.data_repo.cleanup()\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n # Done processing modules\n\n def inner_loop(self):\n if not self.stopped and self.data_source.available():\n dict_msg = self.data_source.next()\n self.handle_message(dict_msg)\n plt.draw()\n if not self.stopped: # might have been stopped meanwhile\n self.timer.start()\n\n def toggle_run(self):\n if self.stopped:\n self.start()\n else:\n self.stop()\n\n def start(self):\n self.stopped = False\n self.timer.start()\n\n def stop(self):\n self.stopped = True\n self.timer.stop()\n\n def run_batch(self):\n data = self.data_source.all()\n self.handle_batch_data(data)\n","sub_path":"bidaf/bidaf_framework.py","file_name":"bidaf_framework.py","file_ext":"py","file_size_in_byte":5744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"388452724","text":"# (1,1) -> (N,M)\n# 아래, 오른쪽, 오른쪽아래 대각으로 이동 가능\n# 각 방을 방문할 때 사탕 모두 가져갈 수 있다.\n\n# dp[i][j] == (i, j)까지 이동했을 때 가져온 사탕의 최댓값\n# dp[i][j] = field[i][j] + max(dp[i-1][j], dp[i][j-1], dp[i-1][j-1])\n\n# import sys\n# input = sys.stdin.readline\n\n# def solution(N, M):\n# field = [[0] * (M+1)] + [[0] + list(map(int, input().split())) for i in range(N)]\n# dp = [[0] * (M+1) for i in range(N+1)]\n# for i in range(1, N+1):\n# for j in range(1, M+1):\n# dp[i][j] = field[i][j] + max(dp[i-1][j], dp[i][j-1], dp[i-1][j-1])\n# print(dp[N][M])\n\n# if __name__ == '__main__':\n# solution(*map(int, input().split()))\n\n\nimport sys\ninput = sys.stdin.readline\n\ndef solution(N, M):\n dp = [0] * (M+1)\n for _ in range(N):\n line = [0] + list(map(int, input().split()))\n for i in range(1, M+1):\n dp[i] = line[i] + max(dp[i], dp[i-1], line[i-1])\n print(dp[-1])\n\nif __name__ == '__main__':\n solution(*map(int, input().split()))","sub_path":"python/bojprobs/알고리즘 분류/다이나믹 프로그래밍/boj_11048_이동하기.py","file_name":"boj_11048_이동하기.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"47181353","text":"from tkinter import *\n\n\nclass Window(Frame): # Frame is class within tkinter\n\n def __init__(self, master=None): # self is the object which referring to the class itself\n Frame.__init__(self, master)\n self.master = master\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n self.button1 = Button(self, text=\"This is the first button\")\n self.button1.grid()\n\n self.button2 = Button(self)\n self.button2.grid()\n self.button2.configure(text=\"This will show up text\")\n\n self.button3 = Button(self)\n self.button3.grid()\n self.button3[\"text\"] = \"This will also show text\"\n\n\nroot = Tk()\nroot.title(\"Lazy Buttons\")\nroot.geometry(\"200x85\")\n\napp = Window(root)\n\nroot.mainloop()\n","sub_path":"src/Ex14/object_oriented.py","file_name":"object_oriented.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"202285496","text":"##############################################################################\n# Imports:\n\nfrom flask import render_template\nfrom htmlmin import Minifier\n\n\n##############################################################################\n# Minify functions:\n\ndef minify_data(wsgi, data):\n if wsgi.config['MINIFY_PAGES']:\n _setup_minifier(wsgi)\n data = wsgi.extensions['_minifier'].minify(data)\n return data\n\n\ndef minify_render(wsgi, template, **kargs):\n data = render_template(template, **kargs)\n return minify_data(wsgi, data)\n\n\n##############################################################################\n# Private Helpers:\n\ndef _setup_minifier(wsgi):\n if not hasattr(wsgi, 'extensions'):\n wsgi.extensions = {}\n\n if not hasattr(wsgi.extensions, '_minifier'):\n wsgi.extensions['_minifier'] = Minifier(\n remove_empty_space=True,\n remove_all_empty_space=True,\n reduce_empty_attributes=True\n )\n","sub_path":"our_place/minifier.py","file_name":"minifier.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"112824517","text":"#!/usr/bin/python\n\"\"\"\n (C) Copyright 2018-2020 Intel Corporation.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE\n The Government's rights to use, modify, reproduce, release, perform, display,\n or disclose this software are subject to the terms of the Apache License as\n provided in Contract No. B609815.\n Any reproduction of computer software, computer software documentation, or\n portions thereof marked with this legend must also reproduce the markings.\n\"\"\"\nfrom logging import getLogger\nfrom importlib import import_module\nimport re\nimport time\nimport signal\n\nfrom avocado.utils import process\n\nfrom command_utils_base import \\\n CommandFailure, BasicParameter, NamedParameter, ObjectWithParameters, \\\n CommandWithParameters, YamlParameters, EnvironmentVariables\nfrom general_utils import check_file_exists, stop_processes, get_log_file, \\\n run_command, DaosTestError\n\n\nclass ExecutableCommand(CommandWithParameters):\n \"\"\"A class for command with paramaters.\"\"\"\n\n # A list of regular expressions for each class method that produces a\n # CmdResult object. Used by the self.get_output() method to return specific\n # values from the standard ouput yielded by the method.\n METHOD_REGEX = {\"run\": r\"(.*)\"}\n\n def __init__(self, namespace, command, path=\"\", subprocess=False):\n \"\"\"Create a ExecutableCommand object.\n\n Uses Avocado's utils.process module to run a command str provided.\n\n Args:\n namespace (str): yaml namespace (path to parameters)\n command (str): string of the command to be executed.\n path (str, optional): path to location of command binary file.\n Defaults to \"\".\n subprocess (bool, optional): whether the command is run as a\n subprocess. Defaults to False.\n \"\"\"\n super(ExecutableCommand, self).__init__(namespace, command, path)\n self._process = None\n self.run_as_subprocess = subprocess\n self.timeout = None\n self.exit_status_exception = True\n self.verbose = True\n self.env = None\n self.sudo = False\n\n # A list of environment variable names to set and export prior to\n # running the command. Values can be set via the get_environment()\n # method and included in the command string by the set_environment()\n # method.\n self._env_names = []\n\n # Define a list of executable names associated with the command. This\n # list is used to generate the 'command_regex' property, which can be\n # used to check on the progress or terminate the command.\n self._exe_names = [self.command]\n\n def __str__(self):\n \"\"\"Return the command with all of its defined parameters as a string.\n\n Returns:\n str: the command with all the defined parameters\n\n \"\"\"\n value = super(ExecutableCommand, self).__str__()\n if self.sudo:\n value = \" \".join([\"sudo -n\", value])\n return value\n\n @property\n def process(self):\n \"\"\"Getter for process attribute of the ExecutableCommand class.\"\"\"\n return self._process\n\n @property\n def command_regex(self):\n \"\"\"Get the regular expression to use to search for the command.\n\n Typical use would include combining with pgrep to verify a subprocess\n is running.\n\n Returns:\n str: regular expression to use to search for the command\n\n \"\"\"\n return \"'({})'\".format(\"|\".join(self._exe_names))\n\n def run(self):\n \"\"\"Run the command.\n\n Raises:\n CommandFailure: if there is an error running the command\n\n \"\"\"\n if self.run_as_subprocess:\n self._run_subprocess()\n return None\n return self._run_process()\n\n def _run_process(self):\n \"\"\"Run the command as a foreground process.\n\n Raises:\n CommandFailure: if there is an error running the command\n\n \"\"\"\n command = self.__str__()\n try:\n # Block until the command is complete or times out\n return run_command(\n command, self.timeout, self.verbose, self.exit_status_exception,\n \"combined\", env=self.env)\n\n except DaosTestError as error:\n # Command failed or possibly timed out\n raise CommandFailure(error)\n\n def _run_subprocess(self):\n \"\"\"Run the command as a sub process.\n\n Raises:\n CommandFailure: if there is an error running the command\n\n \"\"\"\n if self._process is None:\n # Start the job manager command as a subprocess\n kwargs = {\n \"cmd\": self.__str__(),\n \"verbose\": self.verbose,\n \"allow_output_check\": \"combined\",\n \"shell\": False,\n \"env\": self.env,\n \"sudo\": self.sudo,\n }\n self._process = process.SubProcess(**kwargs)\n self._process.start()\n\n # Deterime if the command has launched correctly using its\n # check_subprocess_status() method.\n if not self.check_subprocess_status(self._process):\n msg = \"Command '{}' did not launch correctly\".format(self)\n self.log.error(msg)\n raise CommandFailure(msg)\n else:\n self.log.info(\"Process is already running\")\n\n def check_subprocess_status(self, sub_process):\n \"\"\"Verify command status when called in a subprocess.\n\n Optional method to provide a means for detecting successful command\n execution when running the command as a subprocess.\n\n Args:\n sub_process (process.SubProcess): subprocess used to run the command\n\n Returns:\n bool: whether or not the command progress has been detected\n\n \"\"\"\n self.log.info(\n \"Checking status of the %s command in %s\",\n self._command, sub_process)\n return True\n\n def stop(self):\n \"\"\"Stop the subprocess command.\n\n Raises:\n CommandFailure: if unable to stop\n\n \"\"\"\n if self._process is not None:\n # Send a SIGTERM to the stop the subprocess and if it is still\n # running after 5 seconds send a SIGKILL and report an error\n signal_list = [signal.SIGTERM, signal.SIGKILL]\n\n # Turn off verbosity to keep the logs clean as the server stops\n self._process.verbose = False\n\n # Send signals while the process is still running\n state = None\n while self._process.poll() is None and signal_list:\n signal_to_send = signal_list.pop(0)\n msg = \"before sending signal {}\".format(signal_to_send)\n state = self.get_subprocess_state(msg)\n self.log.info(\n \"Sending signal %s to %s (state=%s)\", str(signal_to_send),\n self._command, str(state))\n self._process.send_signal(signal_to_send)\n if signal_list:\n time.sleep(5)\n\n if not signal_list:\n if state and (len(state) > 1 or state[0] not in (\"D\", \"Z\")):\n # Indicate an error if the process required a SIGKILL and\n # either multiple processes were still found running or the\n # parent process was in any state except uninterruptible\n # sleep (D) or zombie (Z).\n raise CommandFailure(\"Error stopping '{}'\".format(self))\n\n self.log.info(\"%s stopped successfully\", self.command)\n self._process = None\n\n def get_subprocess_state(self, message=None):\n \"\"\"Display the state of the subprocess.\n\n Args:\n message (str, optional): additional text to include in output.\n Defaults to None.\n\n Returns:\n list: a list of process states for the process found associated with\n the subprocess pid.\n\n \"\"\"\n state = None\n if self._process is not None:\n self.log.debug(\n \"%s processes still running%s:\", self.command,\n \" {}\".format(message) if message else \"\")\n command = \"/usr/bin/ps --forest -o pid,stat,time,cmd {}\".format(\n self._process.get_pid())\n result = process.run(command, 10, True, True, \"combined\")\n\n # Get the state of the process from the output\n state = re.findall(\n r\"\\d+\\s+([DRSTtWXZ:\n # _sub_command: \n # :\n # _sub_command: \n #\n self.sub_command = NamedParameter(\n \"{}_sub_command\".format(self._command), None)\n\n # Define the class to represent the active sub-command and it's specific\n # parameters. Multiple sub-commands may be available, but only one can\n # be active at a given time.\n #\n # The self.get_sub_command_class() method is called after obtaining the\n # main command's parameter values, in self.get_params(), to assign the\n # sub-command's class. This is typically a class based upon the\n # CommandWithParameters class, but can be any object with a __str__()\n # method (including a simple str object).\n #\n self.sub_command_class = None\n\n def get_param_names(self):\n \"\"\"Get a sorted list of the names of the BasicParameter attributes.\n\n Ensure the sub command appears at the end of the list\n\n Returns:\n list: a list of class attribute names used to define parameters\n\n \"\"\"\n names = self.get_attribute_names(BasicParameter)\n names.append(names.pop(names.index(\"sub_command\")))\n return names\n\n def get_params(self, test):\n \"\"\"Get values for all of the command params from the yaml file.\n\n Calls self.get_sub_command_class() to assign the self.sub_command_class\n after obtaining the latest self.sub_command definition. If the\n self.sub_command_class is assigned to an ObjectWithParameters-based\n class its get_params() method is also called.\n\n Args:\n test (Test): avocado Test object\n \"\"\"\n super(CommandWithSubCommand, self).get_params(test)\n self.get_sub_command_class()\n if isinstance(self.sub_command_class, ObjectWithParameters):\n self.sub_command_class.get_params(test)\n\n def get_sub_command_class(self):\n \"\"\"Get the class assignment for the sub command.\n\n Should be overridden to assign the self.sub_command_class using the\n latest self.sub_command definition.\n\n Override this method with sub_command_class assignment that maps to the\n expected sub_command value.\n \"\"\"\n self.sub_command_class = None\n\n def get_str_param_names(self):\n \"\"\"Get a sorted list of the names of the command attributes.\n\n If the sub-command parameter yields a sub-command class, replace the\n sub-command value with the resulting string from the sub-command class\n when assembling that command string.\n\n Returns:\n list: a list of class attribute names used to define parameters\n for the command.\n\n \"\"\"\n names = self.get_param_names()\n if self.sub_command_class is not None:\n index = names.index(\"sub_command\")\n names[index] = \"sub_command_class\"\n return names\n\n def set_sub_command(self, value):\n \"\"\"Set the command's sub-command value and update the sub-command class.\n\n Args:\n value (str): sub-command value\n \"\"\"\n self.sub_command.value = value\n self.get_sub_command_class()\n\n def _get_result(self):\n \"\"\"Get the result from running the configured command.\n\n Returns:\n CmdResult: an avocado CmdResult object containing the command\n information, e.g. exit status, stdout, stderr, etc.\n\n Raises:\n CommandFailure: if the command fails.\n\n \"\"\"\n result = None\n try:\n result = self.run()\n except CommandFailure as error:\n raise CommandFailure(\n \"<{}> command failed: {}\".format(self.command, error))\n\n return result\n\n\nclass SubProcessCommand(CommandWithSubCommand):\n \"\"\"A class for a command run as a subprocess with a sub command.\n\n Example commands: daos_agent, daos_server\n \"\"\"\n\n def __init__(self, namespace, command, path=\"\", timeout=60):\n \"\"\"Create a SubProcessCommand object.\n\n Args:\n namespace (str): yaml namespace (path to parameters)\n command (str): string of the command to be executed.\n path (str, optional): path to location of command binary file.\n Defaults to \"\".\n timeout (int, optional): number of seconds to wait for patterns to\n appear in the subprocess output. Defaults to 60 seconds.\n \"\"\"\n super(SubProcessCommand, self).__init__(namespace, command, path, True)\n\n # Attributes used to determine command success when run as a subprocess\n # See self.check_subprocess_status() for details.\n self.pattern = None\n self.pattern_count = 1\n self.pattern_timeout = BasicParameter(timeout, timeout)\n\n def get_str_param_names(self):\n \"\"\"Get a sorted list of the names of the command attributes.\n\n Exclude the 'pattern_timeout' BasicParameter value from the command\n string as it is only used internally to the class.\n\n Returns:\n list: a list of class attribute names used to define parameters\n for the command.\n\n \"\"\"\n names = self.get_param_names()\n names.remove(\"pattern_timeout\")\n if self.sub_command_class is not None:\n index = names.index(\"sub_command\")\n names[index] = \"sub_command_class\"\n return names\n\n def check_subprocess_status(self, sub_process):\n \"\"\"Verify the status of the command started as a subprocess.\n\n Continually search the subprocess output for a pattern (self.pattern)\n until the expected number of patterns (self.pattern_count) have been\n found (typically one per host) or the timeout (self.pattern_timeout)\n is reached or the process has stopped.\n\n Args:\n sub_process (process.SubProcess): subprocess used to run the command\n\n Returns:\n bool: whether or not the command progress has been detected\n\n \"\"\"\n complete = True\n self.log.info(\n \"Checking status of the %s command in %s with a %s second timeout\",\n self._command, sub_process, self.pattern_timeout.value)\n\n if self.pattern is not None:\n detected = 0\n complete = False\n timed_out = False\n start = time.time()\n\n # Search for patterns in the subprocess output until:\n # - the expected number of pattern matches are detected (success)\n # - the time out is reached (failure)\n # - the subprocess is no longer running (failure)\n while not complete and not timed_out and sub_process.poll() is None:\n output = sub_process.get_stdout()\n detected = len(re.findall(self.pattern, output))\n complete = detected == self.pattern_count\n timed_out = time.time() - start > self.pattern_timeout.value\n\n # Summarize results\n msg = \"{}/{} '{}' messages detected in {}/{} seconds\".format(\n detected, self.pattern_count, self.pattern,\n time.time() - start, self.pattern_timeout.value)\n\n if not complete:\n # Report the error / timeout\n self.log.info(\n \"%s detected - %s:\\n%s\",\n \"Time out\" if timed_out else \"Error\",\n msg,\n sub_process.get_stdout())\n\n # Stop the timed out process\n if timed_out:\n self.stop()\n else:\n # Report the successful start\n self.log.info(\n \"%s subprocess startup detected - %s\", self._command, msg)\n\n return complete\n\n\nclass YamlCommand(SubProcessCommand):\n \"\"\"Defines a sub-process command that utilizes a yaml configuration file.\n\n Example commands: daos_agent, daos_server, dmg\n \"\"\"\n\n def __init__(self, namespace, command, path=\"\", yaml_cfg=None, timeout=60):\n \"\"\"Create a YamlCommand command object.\n\n Args:\n namespace (str): yaml namespace (path to parameters)\n command (str): string of the command to be executed.\n yaml_cfg (YamlParameters, optional): yaml configuration parameters.\n Defaults to None.\n path (str, optional): path to location of daos command binary.\n Defaults to \"\"\n timeout (int, optional): number of seconds to wait for patterns to\n appear in the subprocess output. Defaults to 60 seconds.\n \"\"\"\n super(YamlCommand, self).__init__(namespace, command, path, timeout)\n\n # Command configuration yaml file\n self.yaml = yaml_cfg\n\n def get_params(self, test):\n \"\"\"Get values for the daos command and its yaml config file.\n\n Args:\n test (Test): avocado Test object\n \"\"\"\n super(YamlCommand, self).get_params(test)\n if isinstance(self.yaml, YamlParameters):\n self.yaml.get_params(test)\n\n def create_yaml_file(self):\n \"\"\"Create the yaml file with the current yaml file parameters.\n\n This should be called before running the daos command and after all the\n yaml file parameters have been defined. Any updates to the yaml file\n parameter definitions would require calling this method before calling\n the daos command in order for them to have any effect.\n \"\"\"\n if isinstance(self.yaml, YamlParameters):\n self.yaml.create_yaml()\n\n def set_config_value(self, name, value):\n \"\"\"Set the yaml configuration parameter value.\n\n Args:\n name (str): name of the yaml configuration parameter\n value (object): value to set\n\n Returns:\n bool: if the attribute name was found and the value was set\n\n \"\"\"\n status = False\n if isinstance(self.yaml, YamlParameters):\n status = self.yaml.set_value(name, value)\n return status\n\n def get_config_value(self, name):\n \"\"\"Get the value of the yaml configuration parameter name.\n\n Args:\n name (str): name of the yaml configuration parameter from which to\n get the value\n\n Returns:\n object: the yaml configuration parameter value or None\n\n \"\"\"\n value = None\n if isinstance(self.yaml, YamlParameters):\n value = self.yaml.get_value(name)\n\n return value\n\n def _get_result(self):\n \"\"\"Generate the yaml config if defined, then call the parent method.\n\n Returns:\n CmdResult: an avocado CmdResult object containing the command\n information, e.g. exit status, stdout, stderr, etc.\n\n Raises:\n CommandFailure: if the command fails.\n\n \"\"\"\n if self.yaml:\n self.create_yaml_file()\n\n return super(YamlCommand, self)._get_result()\n\n\nclass SubprocessManager(object):\n \"\"\"Defines an object that manages a sub process launched with orterun.\"\"\"\n\n def __init__(self, command, manager=\"Orterun\"):\n \"\"\"Create a SubprocessManager object.\n\n Args:\n command (YamlCommand): command to manage as a subprocess\n manager (str, optional): the name of the JobManager class used to\n manage the YamlCommand defined through the \"job\" attribute.\n Defaults to \"OpenMpi\"\n \"\"\"\n self.log = getLogger(__name__)\n\n # Define the JobManager class used to manage the command as a subprocess\n try:\n manager_module = import_module(\"job_manager_utils\")\n manager_class = getattr(manager_module, manager)\n except (ImportError, AttributeError) as error:\n raise CommandFailure(\n \"Invalid '{}' job manager class: {}\".format(manager, error))\n self.manager = manager_class(command, subprocess=True)\n\n # Define the list of hosts that will execute the daos command\n self._hosts = []\n\n def __str__(self):\n \"\"\"Get the complete manager command string.\n\n Returns:\n str: the complete manager command string\n\n \"\"\"\n return str(self.manager)\n\n @property\n def hosts(self):\n \"\"\"Get the hosts used to execute the daos command.\"\"\"\n return self._hosts\n\n @hosts.setter\n def hosts(self, value):\n \"\"\"Set the hosts used to execute the daos command.\n\n Args:\n value (tuple): a tuple of a list of hosts, a path in which to create\n the hostfile, and a number of slots to specify per host in the\n hostfile (can be None)\n \"\"\"\n self._set_hosts(*value)\n\n def _set_hosts(self, hosts, path, slots):\n \"\"\"Set the hosts used to execute the daos command.\n\n Defined as a private method to enable overriding the setter method.\n\n Args:\n hosts (list): list of hosts on which to run the command\n path (str): path in which to create the hostfile\n slots (int): number of slots per host to specify in the hostfile\n \"\"\"\n self._hosts = hosts\n self.manager.assign_hosts(self._hosts, path, slots)\n self.manager.assign_processes(len(self._hosts))\n\n def get_params(self, test):\n \"\"\"Get values for all of the command params from the yaml file.\n\n Use the yaml file paramter values to assign the server command and\n orterun command parameters.\n\n Args:\n test (Test): avocado Test object\n \"\"\"\n # Get the parameters for the JobManager command parameters\n self.manager.get_params(test)\n\n # Get the values for the job parameters\n self.manager.job.get_params(test)\n\n def start(self):\n \"\"\"Start the daos command.\n\n Raises:\n CommandFailure: if the daos command fails to start\n\n \"\"\"\n # Create the yaml file for the daos command\n self.manager.job.create_yaml_file()\n\n # Start the daos command\n try:\n self.manager.run()\n except CommandFailure:\n # Kill the subprocess, anything that might have started\n self.kill()\n raise CommandFailure(\n \"Failed to start {}.\".format(str(self.manager.job)))\n\n def stop(self):\n \"\"\"Stop the daos command.\"\"\"\n self.manager.stop()\n\n def kill(self):\n \"\"\"Forcably terminate any sub process running on hosts.\"\"\"\n regex = self.manager.job.command_regex\n result = stop_processes(self._hosts, regex)\n if 0 in result and len(result) == 1:\n print(\n \"No remote {} processes killed (none found), done.\".format(\n regex))\n else:\n print(\n \"***At least one remote {} process needed to be killed! Please \"\n \"investigate/report.***\".format(regex))\n\n def verify_socket_directory(self, user):\n \"\"\"Verify the domain socket directory is present and owned by this user.\n\n Args:\n user (str): user to verify has ownership of the directory\n\n Raises:\n CommandFailure: if the socket directory does not exist or is not\n owned by the user\n\n \"\"\"\n if self._hosts and hasattr(self.manager.job, \"yaml\"):\n directory = self.get_user_file()\n status, nodes = check_file_exists(self._hosts, directory, user)\n if not status:\n raise CommandFailure(\n \"{}: Server missing socket directory {} for user {}\".format(\n nodes, directory, user))\n\n def set_config_value(self, name, value):\n \"\"\"Set the yaml configuration parameter value.\n\n Args:\n name (str): name of the yaml configuration parameter\n value (object): value to set\n\n Returns:\n bool: if the attribute name was found and the value was set\n\n \"\"\"\n status = False\n if self.manager.job and hasattr(self.manager.job, \"set_config_value\"):\n status = self.manager.job.set_config_value(name, value)\n return status\n\n def get_config_value(self, name):\n \"\"\"Get the value of the yaml configuration parameter name.\n\n Args:\n name (str): name of the yaml configuration parameter from which to\n get the value\n\n Returns:\n object: the yaml configuration parameter value or None\n\n \"\"\"\n value = None\n if self.manager.job and hasattr(self.manager.job, \"get_config_value\"):\n value = self.manager.job.get_config_value(name)\n return value\n\n def get_user_file(self):\n \"\"\"Get the file defined in the yaml file that must be owned by the user.\n\n Returns:\n str: file defined in the yaml file that must be owned by the user\n\n \"\"\"\n return self.get_config_value(\"socket_dir\")\n","sub_path":"src/tests/ftest/util/command_utils.py","file_name":"command_utils.py","file_ext":"py","file_size_in_byte":30081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"322712734","text":"from abc import ABCMeta, abstractmethod\n\nfrom firm_result import FirmResult\nfrom stats import Stats\n\n\nclass Firm:\n __metaclass__ = ABCMeta\n\n def __init__(self, id):\n self.workers = set()\n self.id = id\n self.stock = 0\n self.sold = 0\n self.price = 20\n self.money = 100000\n self.efficiency_coefficient = 10\n self.current_salary = 200\n self.sales = 0\n self.salary = 200\n self.profit = 0\n\n def work(self):\n for worker in self.workers:\n self.stock += worker.productivity * self.efficiency_coefficient\n self.money -= worker.salary\n\n def apply_result(self, result):\n \"\"\"\n\n :type result: FirmResult\n \"\"\"\n self.salary = 0\n self.sales = 0\n for worker in result.quit_workers:\n self.remove_worker(worker)\n for worker in result.new_workers:\n self.add_worker(worker, result.salary)\n for worker in self.workers:\n self.salary += worker.salary\n self.sold = result.sold_count\n self.stock -= result.sold_count\n self.sales = self.price * result.sold_count\n self.profit = self.sales - self.salary\n if len(self.workers) > 0:\n self.salary /= len(self.workers)\n self.money += self.price * result.sold_count\n\n def apply_labormarket_result(self, result):\n for worker in result.quit_workers:\n self.remove_worker(worker)\n for worker in result.new_workers:\n self.add_worker(worker, result.salary)\n\n def apply_goodmarket_result(self, result):\n total_salary = 0\n for worker in self.workers:\n total_salary += worker.salary\n self.sold = result.sold_count\n self.stock -= result.sold_count\n self.sales = self.price * result.sold_count\n self.profit = self.sales - total_salary\n if len(self.workers) > 0:\n self.salary = total_salary / len(self.workers)\n self.money += self.price * result.sold_count\n\n def add_worker(self, worker, salary):\n worker.employer = self.id\n worker.salary = salary\n self.workers.add(worker)\n\n def fire_worker(self, worker):\n worker.employer = None\n worker.salary = 0\n self.workers.remove(worker)\n\n def remove_worker(self, worker):\n self.workers.remove(worker)\n\n def bankrupt(self):\n for worker in self.workers:\n worker.employer = None\n worker.salary = 0\n\n @abstractmethod\n def decide(self, stats):\n pass\n\n def decide_price(self, stats):\n pass\n\n def decide_salary(self, stats):\n pass\n\n def __str__(self):\n return u\"Firm id: {0:d}. Stock: {1:d} Price: {2:d} Money: {3:d} Workers: {4:d}\" \\\n .format(self.id, self.stock, self.price, self.money, len(self.workers))\n\n def __repr__(self):\n return self.__str__()\n","sub_path":"firm.py","file_name":"firm.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"580181908","text":"import Solution\nimport unittest\n\n\nclass TestSolution(unittest.TestCase):\n\n def setUp(self):\n self.solution = Solution.Solution()\n\n def testPermuteUnique(self):\n\n v = [[1, 1, 2], [1, 2, 1], [2, 1, 1]]\n self.assertEqual(v, self.solution.permuteUnique([1, 1, 2]))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"PermutationsII/TestSolution.py","file_name":"TestSolution.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"109514450","text":"# -*- coding: utf-8 -*-\n# WilcoxonTestPValueTwoTailed\n# mu=70\nimport scipy.stats as st\nfrom pandas import Series\na=Series([71, 69, 67, 68, 73, 72, 71, 71, 68, 72, 69, 72])\np_value=st.wilcoxon(x=a-70, zero_method=\"wilcox\", correction=True).pvalue\na_result=0.05 0.05\")\n print(\"Accept null hypothesis\")\nif a_result==False:\n print(\"P-Value:\", p_value, \"< 0.05\")\n print(\"Reject null hypothesis\")\n# P-Value: 0.6902117434795202 > 0.05\n# Accept null hypothesis","sub_path":"fig/WilcoxonTestPValueTwoTailed.py","file_name":"WilcoxonTestPValueTwoTailed.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"630410616","text":"from django.conf import settings\nfrom django.db import IntegrityError\nfrom rest_framework import status, generics, mixins\nfrom rest_framework.response import Response\n\nfrom ..models import Like, Item, Comment\nfrom ..serializers.item import ItemDetailSerializer\n\nimport json\n\n\nclass LikeItem(generics.CreateAPIView):\n def post(self, request):\n request_data = json.loads(request.body.decode('utf-8'))\n\n if 'item_id' not in request_data:\n return Response({'error': \"no item in request\"}, status=status.HTTP_400_BAD_REQUEST)\n\n item = Item.objects.get(pk=request_data['item_id'])\n try:\n like = Like()\n like.item = item\n like.owner = request.user\n like.save()\n\n serializer = ItemDetailSerializer(item)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except IntegrityError:\n return Response({'error' : \"this item has already liked by authenticated user\"}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass CommentItem(generics.CreateAPIView):\n def post(self, request):\n request_data = json.loads(request.body.decode('utf-8'))\n\n if 'item_id' not in request_data:\n return Response({'error': \"no item in request\"}, status=status.HTTP_400_BAD_REQUEST)\n\n if 'body' not in request_data:\n return Response({'error': \"no body in request\"}, status=status.HTTP_400_BAD_REQUEST)\n\n item = Item.objects.get(pk=request_data['item_id'])\n\n comment = Comment()\n comment.item = item\n comment.body = request_data['body']\n comment.owner = request.user\n comment.save()\n\n serializer = ItemDetailSerializer(item)\n return Response(serializer.data, status=status.HTTP_200_OK)\n","sub_path":"api/views/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"165434938","text":"from setmenu import SetMenu\nfrom menuitem import MenuItem\n\nmenuItem1 = MenuItem('Chicken Popcorn', 98, 4.1)\nmenuItem2 = MenuItem('Zinger Burger', 258, 4.6)\nmenuItem3 = MenuItem('Chicken Pizza', 298, 4.0)\nmenuItem4 = MenuItem('Lime Krusher', 75, 4.3)\nmenuItem5 = MenuItem('Chicken Popcorn', 60, 4.1)\nmenuItem6 = MenuItem('Chicken Popcorn', 60, 4.1)\n\nprint(menuItem1)\nprint(menuItem1 == menuItem5)\nprint(menuItem5 == menuItem6)\n\nsetmenu = SetMenu([menuItem1, menuItem2, menuItem3, menuItem4, menuItem5, menuItem6])\n\nprint(len(setmenu))\n\nprint(setmenu)\nsortedList = sorted(setmenu.menuitems, reverse=True)\nfor element in sortedList:\n print(element)\n","sub_path":"Outlab 4- Python Advanced/kfctest2.py","file_name":"kfctest2.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"208054146","text":"from fractions import Fraction\nimport math\n\nb = [Fraction(1, 1)]\nprePascal = [1, 1]\nfor i in range(1, 500):\n pascal = [1] + [prePascal[i] + prePascal[i + 1] for i in range(len(prePascal) - 1)] + [1]\n s = 0\n for j in range(len(pascal) - 2):\n s += b[j] * pascal[j]\n b.append(Fraction(-s, pascal[-2]))\n prePascal = pascal\n\n\ndef bernoulli_number(n):\n return b[n]\n\n\nprint(bernoulli_number(103))\n","sub_path":"codewar/2021/Bernoulli_numbers.py","file_name":"Bernoulli_numbers.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"393508328","text":"# STRETCH: implement Linear Search\t\t\t\t\ndef linear_search(arr, target):\n \n for idx, item in enumerate(arr):\n if item == target:\n return idx\n #end of conditional \n #end of for loop \n\n return -1 # not found\n\n\n# STRETCH: write an iterative implementation of Binary Search \ndef binary_search(arr, target):\n\n \"\"\"\n should take in a sorted array. time complexity O(log n)\n \"\"\"\n if len(arr) == 0:\n return -1 # array empty\n \n low = 0\n high = len(arr)-1\n\n pivot = (low + high) // 2\n\n while True:\n if target == arr[pivot]:\n return pivot\n elif target < arr[pivot]:\n low = 0\n high = pivot-1\n pivot = (low + high) // 2\n elif target > arr[pivot]:\n low = pivot + 1\n high = len(arr)\n pivot = (low + high) // 2\n\n return -1 # not found\n\n\n# STRETCH: write a recursive implementation of Binary Search \ndef binary_search_recursive(arr, target, low, high):\n \n \"\"\"\n should take in a sorted array. time complexity O(log n)\n uses recursion\n \"\"\"\n\n middle = (low + high)// 2\n\n if len(arr) == 0:\n return -1 # empty array \n \n if target == arr[middle]:\n return middle\n elif target < arr[middle]:\n return binary_search_recursive(arr, target, 0, middle -1)\n elif target > arr[middle]:\n return binary_search_recursive(arr, target, middle + 1, len(arr))\n","sub_path":"project/searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"183198282","text":"#########################################################################################################\n# Instructions can be found here: http://mathwithbaddrawings.com/2013/06/16/ultimate-tic-tac-toe/\t\t# \t\t\t\t\t\t\t\t\t\t\t\t#\n#########################################################################################################\n\n\n\nfrom Tkinter import *\nimport tkFileDialog\nimport pickle\n\nwindow = Tk()\nwindow.title(\"Ultimate Tic-Tac-Toe\")\nframe = Frame(window)\nframe.grid(row = 0, column = 0, columnspan = 3, rowspan = 1)\ngameFrame = Frame(window, border = 3, bg = 'black')\ngameFrame.grid(row = 2, column = 0, columnspan = 3, rowspan = 1)\nstatusFrame = Frame(window, border = 3, bg = 'black')\nstatusFrame.grid(row = 3, column = 0, columnspan = 3, rowspan = 1, sticky = N+E+W)\nuserFrame = Frame(window)\nuserFrame.grid(row = 1, column = 0, columnspan = 3, rowspan = 1, sticky = N+E+W)\n\n#####################\n# Game Setup\t\t#\n#####################\n\nturn = 1 \t\t\t# required to check which player is playing\ngridList = []\t\t# list of grid that is yet unplayed\ngridStatus = [0]\t# status of overall grid. 1 - Player 1 won. 2 - Player 2 won. 0 - Neither won.\ndict1 = {}\t\t\t# Player 1 - which grids he selected\ndict2 = {}\t\t\t# Player 2 - which grids he selected\nlastMove = \"\"\t\t# Last move. Essential for saving game.\n\nturnLabel = StringVar()\t\t\t# Label at bottom of game\nstartLabel = StringVar()\t\t# Label for start button\nstartLabel.set(\"New Game\")\nturnLabel.set(\"Click on New Game to start a game!\")\nLabel(frame, text = \"Ultimate Tic-Tac-Toe\", font = \"Arial 50\").pack()\t\t# Title Label\nstatus = Label(statusFrame, textvariable = turnLabel, font = \"Arial 30\", fg = 'white', bg = 'black')\t\t# Status Label\nstatus.pack()\n\n# Buttons\nnew = Button(userFrame, textvariable = startLabel, command = lambda: newGame()).grid(row = 0, column = 0, sticky = E)\nsave = Button(userFrame, text = \"Save Game\", command = lambda: saveGame()).grid(row = 0, column = 1)\nload = Button(userFrame, text = \"Load Game\", command = lambda: loadGame()).grid(row = 0, column = 2)\nGrid.columnconfigure(userFrame, 3, weight = 1)\nButton(userFrame, text = \"Exit\", command = lambda: exitGame()).grid(row = 0, column = 4, sticky = W)\n\n\n# One giant frame for every small TTT game\nfor i in range(1, 10):\n\tglobals()['c%s' %i] = Frame(gameFrame, bg = \"white\", bd = 5)\n\tglobals()['c%s' %i].grid(row = (i-1)/3, column = (i-1)%3)\n\tdict1[i] = []\n\tdict2[i] = []\n\tgridStatus.append(0)\n\nfor i in [c1, c2, c3, c4, c5, c6, c7, c8, c9]:\n\ti.config(highlightthickness = 1, highlightbackground= 'black')\n\n# Import photos into star moon and none variables\nstar = PhotoImage(file = \"star.gif\")\nmoon = PhotoImage(file = \"moon.gif\")\nnone = PhotoImage(file = \"none.gif\")\n\n\n# Every Button is in a frame. 81 buttons in 81 frames\nfor i in range(1, 10):\n\tfor j in range(1, 10):\n\t\tglobals()['c%s%s'%(i, j)] = Canvas(globals()['c%s'%i])\n\t\tglobals()['c%s%s'%(i, j)].grid(row = (j-1)/3, column = (j-1)%3)\n\t\tglobals()['b%s%s'%(i, j)] = Button(globals()['c%s%s'%(i, j)], image = none, \\\n\t\t\tcommand = lambda i=i, j=j: f(i, j), bd = 2)\n\t\tglobals()['b%s%s'%(i, j)].image = star\n\t\tglobals()['b%s%s'%(i, j)].pack()\n\t\tgridList.append(\"%s %s\"%(i, j))\n\t\tglobals()['b%s%s'%(i, j)].configure(state = \"disabled\")\n\n\n\t\t\n#########################\n# Game functions\t\t#\n#########################\n\ndef f(i, j):\n\tglobal turn, dict1, dict2, gridStatus, lastMove\n\n\t# player 2 star, player 1 moon\n\tif (turn%2) == 0:\n\t\tplayer = 1\n\t\tdict1[i].append(j)\n\t\tturnLabel.set(\"Star's turn\")\n\t\tstatus.configure(fg = \"blue\")\n\telse:\n\t\tplayer = 2\n\t\tdict2[i].append(j)\n\t\tturnLabel.set(\"Moon's turn\")\n\t\tstatus.configure(fg = \"red\")\n\n\t# Remove item from grid list. Store last move.\n\tgridList.remove(\"%s %s\"%(i, j))\n\tlastMove = \"%s %s\"%(i, j)\n\n\t# Activates only the buttons in the frame that is determined by the position of the button.\n\tfor item in gridList:\n\t\t(c, d) = item.split()\n\t\tglobals()['b%s%s'%(c, d)].configure(state = \"active\")\n\n\n\t# Disables the button that is selected and mark it with an image. Player 1 - Star. Player 2 - Moon.\n\tif (turn % 2) == 0:\n\t\tglobals()['b%s%s'%(i, j)].configure(image = moon, state = \"disabled\")\n\t\tglobals()['b%s%s'%(i, j)].image = moon\n\t\tturn += 1\n\telse:\n\t\tglobals()['b%s%s'%(i, j)].configure(image = star, state = \"disabled\")\n\t\tglobals()['b%s%s'%(i, j)].image = star\n\t\tturn += 1\n\n\t# Those that are already selected stays disabled\n\tfor a in range(1, 10):\n\t\tfor b in range(1, 10):\n\t\t\tif gridStatus[j] > 0:\n\t\t\t\tNone\n\t\t\telif a == j:\n\t\t\t\tNone\n\t\t\telse:\n\t\t\t\tglobals()['b%s%s'%(a, b)].configure(state = \"disabled\")\n\n\tgridStatus[i] = checkWin(i, player)\t\t# check if player won the grid\n\tcheckVictory(player)\t\t\t\t\t# check if player won the game\n\n#################################\n#\tChecks if the grid is won\t#\n#################################\n\ndef checkWin(gridNumber, player):\n\twin = 0\n\n\t# There are only 8 ways to win a tic tac toe game. These are all the 8 ways.\n\twinCoord = [[1,2,3], [4,5,6], [7,8,9], [1,4,7], [2,5,8], [3,6,9], [1,5,9], [3,5,7]]\n\n\t# If the grid is already won, this function does not do anything\n\tif gridStatus[gridNumber] > 0:\n\t\treturn gridStatus[gridNumber]\n\n\tif player == 1:\n\t\tcolor = \"red\"\n\t\tlist = dict1[gridNumber]\n\telse:\n\t\tcolor = \"blue\"\n\t\tlist = dict2[gridNumber]\t\n\n\tlist.sort()\n\n\t# If all the items in one of the lists in winCoord are inside the respective key of the player, it means the player won the game.\n\tfor item in winCoord:\n\t\tif (item[0] in list) and (item[1] in list) and (item[2] in list):\n\t\t\twin += 1\n\n\t# If game is won, change gridStatus and change frame color\n\tif win > 0:\n\t\tglobals()[\"c%s\"%gridNumber].configure(bg = color)\n\t\treturn player\n\telse:\n\t\treturn 0\n\n#################################\n#\tChecks if the game is won\t#\n#################################\n\ndef checkVictory(player):\n\twin = 0\n\tlist = []\n\twinCoord = [[1,2,3], [4,5,6], [7,8,9], [1,4,7], [2,5,8], [3,6,9], [1,5,9], [3,5,7]]\n\n\tfor i in range(1, 10):\n\t\tif gridStatus[i] == player:\n\t\t\tlist.append(i)\n\n\t# Use same algorithm in checkWin to determine if entire game is won.\n\tfor item in winCoord:\n\t\tif (item[0] in list) and (item[1] in list) and (item[2] in list):\n\t\t\twin += 1\n\n\t# If game is not won, this funciton does nothing\n\tif win > 0:\n\t\tvictory(player)\n\telse:\n\t\tpass\n\n#############################################\n#\tDisable all functions when game is won\t#\n#############################################\n\ndef victory(player):\n\tif player == 1:\n\t\tcolor = \"red\"\n\t\tname = \"Moon\"\n\telse:\n\t\tcolor = \"blue\"\n\t\tname = \"Star\"\n\n\t# Disable all button and declare winner in turn label\n\tfor i in range(1, 10):\n\t\tfor j in range(1, 10):\n\t\t\tglobals()['b%s%s'%(i, j)].configure(state = \"disabled\")\n\t\t\tturnLabel.set(\"%s won!\"%name)\n\t\t\tstatus.configure(fg = color)\n\n\n#########################\n#\tStarts a new game\t#\n#########################\n\ndef newGame():\n\tglobal turn, gridList, gridStatus, dict1, dict2, lastMove\n\tturn = 1\n\tgridList = []\n\tgridStatus = [0]\n\tdict1 = {}\n\tdict2 = {}\n\tlastMove = \"\"\n\n\tturnLabel.set(\"Star's turn\")\n\tstatus.configure(fg = \"blue\")\n\tstartLabel.set(\"Restart\")\n\n\tfor i in range(1, 10):\n\t\tdict1[i] = []\n\t\tdict2[i] = []\n\t\tglobals()[\"c%s\"%i].configure(bg = \"white\")\n\t\tgridStatus.append(0)\n\n\tfor i in range(1, 10):\n\t\tfor j in range(1, 10):\n\t\t\tglobals()['b%s%s'%(i, j)].configure(state = \"active\", image = none)\n\t\t\tgridList.append(\"%s %s\"%(i, j))\n\n\n#############################################\n#\tSaves game into a file using pickle\t\t#\n#############################################\n\ndef saveGame():\n\tglobal turn, gridList, gridStatus, dict1, dict2, lastMove\n\n\tfilename = tkFileDialog.asksaveasfilename()\n\tf = open(filename, 'wb')\n\tpickle.dump((turn, gridList, gridStatus, dict1, dict2, lastMove), f)\n\tf.close()\n\n#################################\n#\tLoad game state from file\t#\n#################################\n\ndef loadGame():\n\tglobal turn, gridList, gridStatus, dict1, dict2, lastMove\n\n\tnewGame()\n\n\tfilename = tkFileDialog.askopenfilename()\n\tf = open(filename, 'rb')\n\tturn, gridList, gridStatus, dict1, dict2, lastMove = pickle.load(f)\n\tf.close()\n\n\tfor i, v in dict1.iteritems():\n\t\tfor j in v:\n\t\t\tglobals()['b%s%s'%(i, j)].configure(image = moon, state = \"disabled\")\n\t\t\tglobals()['b%s%s'%(i, j)].image = moon\n\n\tfor i, v in dict2.iteritems():\n\t\tfor j in v:\n\t\t\tglobals()['b%s%s'%(i, j)].configure(image = star, state = \"disabled\")\n\t\t\tglobals()['b%s%s'%(i, j)].image = star\n\n\tif (turn%2) == 0:\n\t\tplayer = 1\n\t\tturnLabel.set(\"Moon's turn\")\n\t\tstatus.configure(fg = \"red\")\n\telse:\n\t\tplayer = 2\n\t\tturnLabel.set(\"Star's turn\")\n\t\tstatus.configure(fg = \"blue\")\n\n\tfor i in range(1, 10):\n\t\tif gridStatus[i] == 1:\n\t\t\tglobals()[\"c%s\"%i].configure(bg = \"red\")\n\t\tif gridStatus[i] == 2:\n\t\t\tglobals()[\"c%s\"%i].configure(bg = \"blue\")\n\n\n\t(i, j) = lastMove.split()\n\tj = int(j)\n\n\tfor item in gridList:\n\t\t(c, d) = item.split()\n\t\tglobals()['b%s%s'%(c, d)].configure(state = \"active\")\n\n\tfor a in range(1, 10):\n\t\tfor b in range(1, 10):\n\t\t\tif gridStatus[j] > 0:\n\t\t\t\tNone\n\t\t\telif a == j:\n\t\t\t\tNone\n\t\t\telse:\n\t\t\t\tglobals()['b%s%s'%(a, b)].configure(state = \"disabled\")\n\n\tcheckVictory(player)\n\n\n#########################################\n#\tExits game. Destroy Tkinter window\t#\n#########################################\n\ndef exitGame():\n\twindow.destroy()\n\nwindow.mainloop()\n\n\n\n\n\n","sub_path":"Ultimate_TTT.py","file_name":"Ultimate_TTT.py","file_ext":"py","file_size_in_byte":9238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"2298568","text":"\ndef piglatinify(word):\n \"\"\"\n word : a single word\n return : the word converted into piglatin\n \n rules : starts with a vowel --> append \"ay\"\n otherwise --> move first to last and append \"ay\"\n \"\"\"\n \n if len(word) == 0:\n return word\n \n # if there's a period, strip it out and\n # set append_period so that we know to add it back in at the end\n append_period = False\n if word[-1] == '.':\n append_period = True\n word = word[:-1]\n \n capitalize = False\n if word[0].isupper():\n word=word[0].lower() + word[1:]\n capitalize = True\n \n \n first = word[0]\n if first in \"aeiou\":\n result = word + \"ay\"\n elif first:\n result = word[1:] + first + \"ay\"\n\n # if we stripped out a period at the top, add it back in\n if append_period:\n result = result + \".\"\n \n if capitalize:\n result = result.capitalize()\n return result\n\n\ndef sentence_to_piglatin(sentence):\n rlist = []\n for word in sentence.split():\n new_word = piglatinify(word)\n rlist.append(new_word)\n # rlist = rlist + [new_word] <-- same as line above\n \n return \" \".join(rlist)\n\n\nencoded_sentence = sentence_to_piglatin(\"This is a sentence.\")\nprint(encoded_sentence)\n","sub_path":"classcode/piglatin/piglatin.py","file_name":"piglatin.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"529011863","text":"def choice():\r\n option=input('''\r\n Would you like to proceed?\r\n Choose your option by entering the number.\r\n \r\n 1 Yes\r\n 2 No\r\n \r\n '''\r\n )\r\n \r\n if option == \"1\":\r\n print(\"You will proceed\")\r\n elif option == \"2\":\r\n print(\"You will not proceed\")\r\n else:\r\n print(\"Incorrect option\")\r\n# Is there any way to pause this? Require the user to press enter in order to contiune? \r\n choice()\r\n\r\n\r\nchoice()\r\n \r\n \r\n","sub_path":"Function.question.py","file_name":"Function.question.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"216388456","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport dolfin as df\n\n\n# get solution, values known at Lagrange nodes, compute at \"xi\"\ndef get_sol(p, W, u, xi):\n u_sol = u[0] * basis1d(p, 0, xi)\n for k in range(1, p + 1):\n u_sol += u[k] * basis1d(p, k, xi)\n\n u_sol_fenics = df.Function(W)\n u_sol_fenics.assign(u_sol)\n\n return u_sol_fenics\n\n\n# get solution derivative, values known at Lagrange nodes, compute at \"xi\"\ndef get_sol_deriv(p, W, u, xi):\n du_sol = u[0] * basis1d_deriv(p, 0, xi)\n for k in range(1, p + 1):\n du_sol += u[k] * basis1d_deriv(p, k, xi)\n\n du_sol_fenics = df.Function(W)\n du_sol_fenics.assign(du_sol)\n\n return du_sol_fenics\n\n\n# 1d Lagrange basis\ndef basis1d(p, basis_id, xi):\n if p == 0:\n return zerobasis1d(basis_id, xi)\n elif p == 1:\n return linearbasis1d(basis_id, xi)\n elif p == 2:\n return quadraticbasis1d(basis_id, xi)\n elif p == 3:\n return cubicbasis1d(basis_id, xi)\n elif p == 4:\n return biquadraticbasis1d(basis_id, xi)\n\n\n# Derivative of 1d Lagrange basis\ndef basis1d_deriv(p, basis_id, xi):\n if p == 1:\n return linearbasis1d_deriv(basis_id, xi)\n elif p == 2:\n return quadraticbasis1d_deriv(basis_id, xi)\n elif p == 3:\n return cubicbasis1d_deriv(basis_id, xi)\n\n\n# Constant 1d basis, reference interval (0,1)\n# noinspection PyUnusedLocal\ndef zerobasis1d(basis_id, xi):\n if basis_id == 0:\n return 1\n\n\n# Linear 1d Lagrange basis, reference interval (0,1)\ndef linearbasis1d(basis_id, xi):\n lambda1 = 1 - xi\n lambda2 = xi\n\n if basis_id == 0:\n return lambda1\n elif basis_id == 1:\n return lambda2\n\n\n# Derivative of the linear basis\n# noinspection PyUnusedLocal\ndef linearbasis1d_deriv(basis_id, xi):\n if basis_id == 0:\n return -1\n elif basis_id == 1:\n return +1\n\n\n# Quadratic 1d Lagrange basis, reference interval (0,1)\ndef quadraticbasis1d(basis_id, xi):\n lambda1 = 1 - xi\n lambda2 = xi\n\n if basis_id == 0:\n return lambda1 * (2 * lambda1 - 1)\n elif basis_id == 1:\n return 4 * lambda1 * lambda2\n elif basis_id == 2:\n return lambda2 * (2 * lambda2 - 1)\n\n\n# Derivative of the quadratic basis\ndef quadraticbasis1d_deriv(basis_id, xi):\n lambda1 = 1 - xi\n lambda2 = xi\n\n if basis_id == 0:\n return -(4 * lambda1 - 1)\n elif basis_id == 1:\n return +4 * (lambda1 - lambda2)\n elif basis_id == 2:\n return +(4 * lambda2 - 1)\n\n\n# Cubic 1d Lagrange basis, reference interval (0,1)\ndef cubicbasis1d(basis_id, xi):\n lambda1 = 1 - xi\n lambda2 = xi\n\n if basis_id == 0:\n return 1. / 2. * lambda1 * (3 * lambda1 - 2) * (3 * lambda1 - 1)\n elif basis_id == 1:\n return 9. / 2. * lambda1 * lambda2 * (3 * lambda1 - 1)\n elif basis_id == 2:\n return 9. / 2. * lambda1 * lambda2 * (3 * lambda2 - 1)\n elif basis_id == 3:\n return 1. / 2. * lambda2 * (3 * lambda2 - 2) * (3 * lambda2 - 1)\n\n\n# Derivative of the cubic basis\ndef cubicbasis1d_deriv(basis_id, xi):\n lambda1 = 1 - xi\n lambda2 = xi\n\n if basis_id == 0:\n return -1. / 2. * (27 * lambda1 ** 2 - 18 * lambda1 + 2)\n elif basis_id == 1:\n return +9. / 2. * (3 * lambda1 ** 2 - 6 * lambda1 * lambda2 - lambda1 + lambda2)\n elif basis_id == 2:\n return -9. / 2. * (3 * lambda2 ** 2 - 6 * lambda1 * lambda2 + lambda1 - lambda2)\n elif basis_id == 3:\n return +1. / 2. * (27 * lambda2 ** 2 - 18 * lambda2 + 2)\n\n\n# Biquadratic 1d Lagrange basis, reference interval (0,1)\ndef biquadraticbasis1d(basis_id, xi):\n lambda1 = 1 - xi\n lambda2 = xi\n\n if basis_id == 0:\n return +1. / 3. * lambda1 * (4 * lambda1 - 1) * (4 * lambda2 - 1) * (2 * lambda2 - 1)\n elif basis_id == 1:\n return -16. / 3. * lambda1 * lambda2 * (4 * lambda1 - 1) * (2 * lambda2 - 1)\n elif basis_id == 2:\n return +4 * lambda1 * lambda2 * (4 * lambda1 - 1) * (4 * lambda2 - 1)\n elif basis_id == 3:\n return -16. / 3. * lambda1 * lambda2 * (4 * lambda2 - 1) * (2 * lambda1 - 1)\n elif basis_id == 4:\n return +1. / 3. * lambda2 * (4 * lambda2 - 1) * (4 * lambda1 - 1) * (2 * lambda1 - 1)\n\n# End of file\n","sub_path":"src/lagrange.py","file_name":"lagrange.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"498890486","text":"# -*- coding: utf-8 -*-\nimport ConfigParser\n\nimport MySQLdb\n\n\nclass IncrManager(object):\n\n def __init__(self):\n cf = ConfigParser.ConfigParser()\n cf.read(\"db_config.ini\")\n\n self.conn = MySQLdb.Connect(\n host = cf.get(\"baseconf\", \"host\"),\n port = cf.getint(\"baseconf\", \"port\"),\n user = cf.get(\"baseconf\", \"user\"),\n passwd = cf.get(\"baseconf\", \"password\"),\n db = cf.get(\"baseconf\", \"dbname\"),\n charset = 'utf8'\n )\n self.cursor_incr = self.conn.cursor()\n self.cursor_item = self.conn.cursor()\n\n self.count = 0\n\n def upsert_incr(self, skuid, comStrIncrs, priceIncrs):\n\n # 获取分类信息\n sql_item = \"select category1, category2, category3, category4 from spider_item where skuid=%s \" % (skuid)\n self.cursor_item.execute(sql_item)\n categorys = self.cursor_item.fetchone()\n if categorys is None:\n return False\n\n sql = \"replace into analyze_comment_incr \" \\\n \"(skuid, \" \\\n \"incr_3h, incr_6h, incr_12h, incr_24h, incr_48h, incr_72h, \" \\\n \"price_incr_1d, price_incr_2d, price_incr_3d, price_incr_7d, price_incr_10d, price_incr_15d, \" \\\n \"category1, category2, category3, category4, upsert_time) \" \\\n \"values ('%s', %s, %s, %s, %s, %s, %s, \" \\\n \"%f, %f, %f, %f, %f, %f, \" \\\n \"'%s', '%s', '%s', '%s', now()) \"\n\n list_incrs = list()\n list_incrs.append(skuid)\n for ele in comStrIncrs:\n list_incrs.append(ele)\n for ele in priceIncrs:\n list_incrs.append(float(ele))\n for ele in categorys:\n list_incrs.append(ele)\n\n sql = sql % (tuple(list_incrs))\n self.cursor_incr.execute(sql)\n\n self.count += 1\n if self.count >= 20:\n self.conn.commit()\n self.count = 0\n return True\n\n def close(self):\n self.conn.commit()\n self.cursor_item.close()\n self.cursor_incr.close()\n self.conn.close()\n\n\n","sub_path":"analyze/incr_manager.py","file_name":"incr_manager.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"230399424","text":"\"\"\"\nio多路复用select实现多个客户端通信\n重点代码\n\"\"\"\nfrom socket import *\nfrom select import select\n\n# 设置套接字为关注IO\ns = socket()\ns.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\ns.bind(('0.0.0.0', 9999))\ns.listen(3)\n\n# 设置关注io\nrlist = [s]\nwlist = []\nxlist = []\n\nwhile True:\n # 监控io的发生\n rs, ws, xs = select(rlist, wlist, xlist)\n # 遍历三个返回值列表,判断哪个io发生\n for r in rs:\n # 如果套接字就绪则处理连接\n if r is s:\n c, addr = r.accept()\n print(\"Connect from\", addr)\n rlist.append(c) # 循环加入新的关注io\n else:\n data = r.recv(1024)\n if not data:\n rlist.remove(r)\n r.close()\n continue\n print(data.decode())\n # r.send(b'OK')\n wlist.append(r) # 主动处理这个io\n\n for w in ws:\n w.send(b'OK,Thanks')\n wlist.remove(w)\n\n for x in xs:\n pass\n","sub_path":"Mr.左/mothon02/代码/Concurrent_并发编程/day05/select_server.py","file_name":"select_server.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"274235145","text":"import sasi.sa.session as sa_session\nfrom sasi.dao.results.sa_result_dao import SA_Result_DAO\nimport sasi.viz.results.map.mapserver as results_ms\n\nimport re\nimport os\nimport copy\n\ndef get_dao():\n\tsession = sa_session.get_session()\n\treturn SA_Result_DAO(session=session)\n\n\ndef get_choice_facet(id_field=None, value_field=None, label_field=None, filters=None, aggregate_func='sum'):\n\tdao = get_dao()\n\n\t# We use the current PID to create unique labels.\n\tpid = \"%s\" % os.getpid()\n\tid_label = '%s_id' % pid\n\tlabel_label = '%s_label' % pid\n\tvalue_label = '%s_value' % pid\n\n\t# Get aggregates for choices.\n\taggregates = dao.get_aggregates(\n\t\t\tfields=[{\n\t\t\t\t'id': value_field, \n\t\t\t\t'label': value_label,\n\t\t\t\t'aggregate_funcs': [aggregate_func],\n\t\t\t\t}],\n\t\t\tgrouping_fields=[{\n\t\t\t\t'id': id_field,\n\t\t\t\t'label': id_label,\n\t\t\t\tlabel_field: {'id': label_field, 'label': label_label}\n\t\t\t\t}],\n\t\t\tfilters=filters \n\t\t\t)\n\n\t# Assemble facet choices from aggregates.\n\tchoices = []\n\tfor leaf in aggregates.get('children', {}).values():\n\t\tchoices.append({\n\t\t\t\"id\": leaf['id'],\n\t\t\t\"label\": leaf['label'],\n\t\t\t\"count\": leaf['data'][0]['value']\n\t\t\t})\n\n\tchoices.sort(key=lambda o:o['label'])\n\n\t# Get total for value field.\n\ttotal = aggregates['data'][0]['value']\n\n\t# Assemble facet.\n\tfacet = {\n\t\t\t'choices': choices,\n\t\t\t'value_total': total\n\t\t\t}\n\n\treturn facet\n\n\ndef get_numeric_facet(value_field=None, grouping_field=None, base_filters=[], filters=[]):\n\n\t# Set properties on grouping field.\n\tgrouping_field['as_histogram'] = True\n\tgrouping_field['all_values'] = True\n\tgrouping_field.setdefault('num_buckets', 25)\n\n\t# Get filtered and unfiltered aggregates.\n\taggregates = get_aggregates(\n\t\t\tvalue_fields = [value_field],\n\t\t\tgrouping_fields = [grouping_field],\n\t\t\tfilters=filters,\n\t\t\twith_unfiltered=True,\n\t\t\tbase_filters=base_filters\n\t\t\t)\n\n\t# Assemble histograms\n\tbase_histogram = []\n\tfiltered_histogram = []\n\n\tfor node in aggregates.get('children', {}).values():\n\n\t\t# Get min/max from label.\n\t\tbucket_label = node['label']\n\t\tbucket_min = \"\"\n\t\tbucket_max = \"\"\n\t\tm = re.match('(.*) to (.*)', bucket_label)\n\t\tif m:\n\t\t\tbucket_min = float(m.group(1))\n\t\t\tbucket_max = float(m.group(2))\n\n\t\tbase_bucket = {\n\t\t\t\t'bucket': node['label'],\n\t\t\t\t'min': bucket_min,\n\t\t\t\t'max': bucket_max,\n\t\t\t\t'count': node['data'][0]['value']\n\t\t\t\t}\n\t\tbase_histogram.append(base_bucket)\n\n\t\tfiltered_bucket = base_bucket.copy()\n\t\tfiltered_bucket['count'] = node['data'][1]['value']\n\t\tfiltered_histogram.append(filtered_bucket)\n\n\tfor h in [base_histogram, filtered_histogram]:\n\t\th.sort(key=lambda b: b['min'])\n \n\t# Assemble facet\n\tfacet = {\n\t\t\t'base_histogram': base_histogram,\n\t\t\t'filtered_histogram': filtered_histogram,\n\t\t\t}\n\n\treturn facet\n\n\ndef get_map(wms_parameters=None, filters=[], result_field=None):\n\tdao = get_dao()\n\n\t# Generate map image for the given parameters.\n\tmap_image = results_ms.get_map_image_from_wms(wms_parameters=wms_parameters, result_field=result_field, result_dao=dao, filters=filters)\n\n\t# Return the image.\n\treturn map_image\n\ndef get_totals(value_field=None, base_filters=[], filters=[]):\n\tvalue_field = {'id': value_field, 'label': 'value_field', 'aggregate_funcs': ['sum']}\n\n\t# Get filtered and unfiltered aggregates.\n\taggregates = get_aggregates(\n\t\t\tvalue_fields = [value_field],\n\t\t\tfilters=filters,\n\t\t\twith_unfiltered=True,\n\t\t\tbase_filters=base_filters\n\t\t\t)\n\n\t# Assemble totals\n\ttotals = {\n\t\t\t'filtered_total': aggregates['data'][0]['value'],\n\t\t\t'unfiltered_total': aggregates['data'][1]['value'],\n\t\t\t}\n\n\treturn totals\n\n\ndef get_aggregates(value_fields=None, grouping_fields=[], filters=[], with_unfiltered=False, base_filters=[]):\n\tdao = get_dao()\n\n\tfor vf in value_fields: \n\t\tvf.setdefault('label', \"{}--label\".format(vf.get('id')))\n\n\taggregates = dao.get_aggregates(\n\t\t\tfields=value_fields,\n\t\t\tgrouping_fields=grouping_fields,\n\t\t\tfilters=filters)\n\n\tif with_unfiltered:\n\t\tunfiltered_value_fields = copy.deepcopy(value_fields)\n\t\tfor vf in unfiltered_value_fields:\n\t\t\tvf['label'] += '--unfiltered'\n\n\t\tunfiltered_aggregates = dao.get_aggregates(\n\t\t\t\tfields=unfiltered_value_fields,\n\t\t\t\tgrouping_fields=grouping_fields,\n\t\t\t\tfilters=base_filters\n\t\t\t\t)\n\n\t\t# Make path dicts for each tree.\n\t\tfiltered_path_dict = {}\n\t\tunfiltered_path_dict = {}\n\t\tupdate_path_dict(aggregates, tuple(), filtered_path_dict)\n\t\tupdate_path_dict(unfiltered_aggregates, tuple(), unfiltered_path_dict)\n\n\t\t# Add unfiltered data to filtered data.\n\t\tfor path, filtered_node in filtered_path_dict.items():\n\t\t\tunfiltered_node = unfiltered_path_dict.get(path)\n\t\t\tfor d in unfiltered_node['data']:\n\t\t\t\tfiltered_node['data'].append(d)\n\t\n\n\treturn aggregates\n\n# Helper function to make a dictionary of path:leaf pairs for a given tree node.\ndef update_path_dict(node, path, path_dict):\n\tcur_path = path + (node.get('id'),)\n\n\tpath_dict[cur_path] = node\n\n\tif node.has_key('children'):\n\t\tfor c in node['children'].values():\n\t\t\tupdate_path_dict(c, cur_path, path_dict)\n\n\n\n\t\n","sub_path":"results/results_services.py","file_name":"results_services.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"534975123","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nfrom test.page.base_page import BasePage\nfrom utils.config import DATA_PATH\nfrom utils.common.log import logger\n\n\nclass FileManagementPage(BasePage):\n\n def __init__(self, driver):\n super(FileManagementPage, self).__init__(driver)\n\n @classmethod\n def get_upload_files(cls, version):\n cls.path_list = []\n version_upload_file = os.path.join(DATA_PATH, ''.join([version, '_upload']))\n version_file_paths = os.listdir(version_upload_file)\n if version == 'pico':\n for files_path in version_file_paths:\n if '4.5' in files_path:\n file_path = \"\\\\\".join([version_upload_file, files_path])\n logger.debug(\"get version file from: {}\".format(file_path))\n for file in os.listdir(file_path):\n cls.path_list.append(\"\\\\\".join([file_path, file]))\n if version == 'femto':\n pass\n return cls.path_list\n","sub_path":"test/page/SmallCellMgtPage/file_mgt_page.py","file_name":"file_mgt_page.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"31663337","text":"def check_netconf_args(self, result):\n ' Check invalid netconf args '\n need_cfg = True\n same_flag = True\n delete_flag = False\n result['target_host_info'] = []\n if self.host_name:\n if ((len(self.host_name) > 32) or (len(self.host_name) < 1)):\n self.module.fail_json(msg='Error: The len of host_name is out of [1 - 32].')\n if (self.vpn_name and (self.is_public_net != 'no_use')):\n if (self.is_public_net == 'true'):\n self.module.fail_json(msg='Error: Do not support vpn_name and is_public_net at the same time.')\n conf_str = CE_GET_SNMP_TARGET_HOST_HEADER\n if self.domain:\n conf_str += ''\n if self.address:\n if (not check_ip_addr(ipaddr=self.address)):\n self.module.fail_json(msg=('Error: The host address [%s] is invalid.' % self.address))\n conf_str += '
'\n if self.notify_type:\n conf_str += ''\n if self.vpn_name:\n if ((len(self.vpn_name) > 31) or (len(self.vpn_name) < 1)):\n self.module.fail_json(msg='Error: The len of vpn_name is out of [1 - 31].')\n conf_str += ''\n if self.recv_port:\n if ((int(self.recv_port) > 65535) or (int(self.recv_port) < 0)):\n self.module.fail_json(msg='Error: The value of recv_port is out of [0 - 65535].')\n conf_str += ''\n if self.security_model:\n conf_str += ''\n if self.security_name:\n if ((len(self.security_name) > 32) or (len(self.security_name) < 1)):\n self.module.fail_json(msg='Error: The len of security_name is out of [1 - 32].')\n conf_str += ''\n if self.security_name_v3:\n if ((len(self.security_name_v3) > 32) or (len(self.security_name_v3) < 1)):\n self.module.fail_json(msg='Error: The len of security_name_v3 is out of [1 - 32].')\n conf_str += ''\n if self.security_level:\n conf_str += ''\n if (self.is_public_net != 'no_use'):\n conf_str += ''\n if self.interface_name:\n if ((len(self.interface_name) > 63) or (len(self.interface_name) < 1)):\n self.module.fail_json(msg='Error: The len of interface_name is out of [1 - 63].')\n find_flag = False\n for item in INTERFACE_TYPE:\n if (item in self.interface_name.lower()):\n find_flag = True\n break\n if (not find_flag):\n self.module.fail_json(msg='Error: Please input full name of interface_name.')\n conf_str += ''\n conf_str += CE_GET_SNMP_TARGET_HOST_TAIL\n recv_xml = self.netconf_get_config(conf_str=conf_str)\n if ('' in recv_xml):\n if (self.state == 'present'):\n same_flag = False\n else:\n delete_flag = False\n else:\n xml_str = recv_xml.replace('\\r', '').replace('\\n', '').replace('xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\"', '').replace('xmlns=\"http://www.huawei.com/netconf/vrp\"', '')\n root = ElementTree.fromstring(xml_str)\n target_host_info = root.findall('snmp/targetHosts/targetHost')\n if target_host_info:\n for tmp in target_host_info:\n tmp_dict = dict()\n for site in tmp:\n if (site.tag in ['nmsName', 'domain', 'address', 'notifyType', 'vpnInstanceName', 'portNumber', 'securityModel', 'securityName', 'securityNameV3', 'securityLevel', 'isPublicNet', 'interface-name']):\n tmp_dict[site.tag] = site.text\n result['target_host_info'].append(tmp_dict)\n if result['target_host_info']:\n for tmp in result['target_host_info']:\n same_flag = True\n if ('nmsName' in tmp.keys()):\n if (tmp['nmsName'] != self.host_name):\n same_flag = False\n else:\n delete_flag = True\n if ('domain' in tmp.keys()):\n if (tmp['domain'] != self.domain):\n same_flag = False\n if ('address' in tmp.keys()):\n if (tmp['address'] != self.address):\n same_flag = False\n if ('notifyType' in tmp.keys()):\n if (tmp['notifyType'] != self.notify_type):\n same_flag = False\n if ('vpnInstanceName' in tmp.keys()):\n if (tmp['vpnInstanceName'] != self.vpn_name):\n same_flag = False\n if ('portNumber' in tmp.keys()):\n if (tmp['portNumber'] != self.recv_port):\n same_flag = False\n if ('securityModel' in tmp.keys()):\n if (tmp['securityModel'] != self.security_model):\n same_flag = False\n if ('securityName' in tmp.keys()):\n if (tmp['securityName'] != self.security_name):\n same_flag = False\n if ('securityNameV3' in tmp.keys()):\n if (tmp['securityNameV3'] != self.security_name_v3):\n same_flag = False\n if ('securityLevel' in tmp.keys()):\n if (tmp['securityLevel'] != self.security_level):\n same_flag = False\n if ('isPublicNet' in tmp.keys()):\n if (tmp['isPublicNet'] != self.is_public_net):\n same_flag = False\n if ('interface-name' in tmp.keys()):\n if (tmp.get('interface-name') is not None):\n if (tmp['interface-name'].lower() != self.interface_name.lower()):\n same_flag = False\n else:\n same_flag = False\n if same_flag:\n break\n if (self.state == 'present'):\n need_cfg = True\n if same_flag:\n need_cfg = False\n else:\n need_cfg = False\n if delete_flag:\n need_cfg = True\n result['need_cfg'] = need_cfg","sub_path":"Data Set/bug-fixing-5/027cd16b1ade78ef0c69b5af233e83445085d340--fix.py","file_name":"027cd16b1ade78ef0c69b5af233e83445085d340--fix.py","file_ext":"py","file_size_in_byte":6740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"295294169","text":"links = []\nfile = open('links.txt','w')\n\ndef getLinks(start,end):\n for i in range(start,end+1):\n leagueUrl = 'https://us.soccerway.com/matches/2018/08/10/england/premier-league/manchester-united-fc/leicester-city-fc/'\n file.write( leagueUrl +str(i)+'/' + \"\\n\")\n \n#Brasil Serie A\ngetLinks(2988916,2989295)\n#Brasil Serie B\ngetLinks(2987874,2988253)\n","sub_path":"Brazil Serie A 3.0 Goals/linkGenerator.py","file_name":"linkGenerator.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"494461847","text":"\"\"\" Reading and post-processing functions for NSTX correlation reflectometry output. Raw data given by Dr. Ahmed Diallo.\n\nProgram by Lei Shi, 05/16/2014\n\nmodules needed: h5py, numpy, scipy\n\"\"\"\n\nimport h5py as h5\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\nfrom ....math.funcs import band_pass_box\nfrom ..analysis import phase, magnitude\n\n\nclass NSTX_Error(Exception):\n def __init__(self,value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass NSTX_REF_Loader:\n \"\"\" Loader class which contains all the reading and post-processing methods\n \"\"\"\n\n def __init__(this,filename):\n \"\"\"initialize with a hdf5 filename, should be a raw data output from NSTX reflectometry measurement.\n\n\n \"\"\"\n this.filename = filename\n f = h5.File(filename,'r')\n this.t0 = f['mydata'][0]['T0']\n this.dt = f['mydata'][0]['DT']\n this.freq = f['mydata'][0]['FREQUENCY']*1e-9 #Change to GHz\n this.nt = len(f['mydata'][0]['INPHASE'])\n f.close()\n def getI(this):\n \"\"\"returns the inphase component of the reflectometry signal\n \"\"\"\n f = h5.File(this.filename,'r')\n this.I = f['mydata'][0]['INPHASE']\n f.close()\n return this.I\n\n def getQ(this):\n \"\"\"returns the out of phase component of the reflectometry signal\n \"\"\"\n f= h5.File(this.filename,'r')\n this.Q = f['mydata'][0]['QUADRATURE']\n f.close()\n return this.Q\n\n def getT(this):\n \"\"\"returns the time array with the same shape as I and Q\n \"\"\"\n\n this.T = this.t0 + this.dt*np.arange(this.nt)\n return this.T\n\n def signal(this,tstart,tend):\n \"\"\" returns the complex signal for a chosen time period, and the corresponding time array.\n Inputs:\n tstart,tend: double, the start and end of the chosen time period in seconds.\n Outputs:\n output1: the complex signal,with original resolution\n output2: the corresponding time array\n \"\"\"\n\n try:\n if(tstart< this.t0 or tend > this.T[-1]):\n raise NSTX_Error('Reading raw signal error: time period outside original data.')\n except AttributeError:\n this.getT()\n if(tstart< this.t0 or tend > this.T[-1]):\n raise NSTX_Error('Reading raw signal error: time period outside original data.')\n\n nstart = int( (tstart-this.t0)/this.dt )\n nend = int( (tend-this.t0)/this.dt )\n\n try:\n I = this.I[nstart:nend+1]\n except AttributeError:\n this.getI()\n I = this.I[nstart:nend+1]\n\n try:\n Q = this.Q[nstart:nend+1]\n except AttributeError:\n this.getQ()\n Q = this.Q[nstart:nend+1]\n\n try:\n T = this.T[nstart:nend+1]\n except AttributeError:\n this.getT()\n T = this.T[nstart:nend+1]\n\n return (I + 1j * Q, T)\n\n\nclass FFT_result:\n \"\"\"Contains returned arrays from fft analysis\n\n Attributes:\n origin: original time series data\n shift_fft: array after fft, and shifted so that zero frequency is located in middle\n t: time array corresponds to original data\n f: frequency array corresponds to fft data\n \"\"\"\n\n def __init__(this, origin,fft,t,f):\n this.origin = origin\n this.fft = fft\n this.t = t\n this.f = f\n\nclass Analyser:\n \"\"\" Contains all the Post-process methods\n \"\"\"\n\n def __init__(this, nstx_loaders):\n \"\"\" Initialize with an NSTX_REF_loader array\n \"\"\"\n this.loaders = nstx_loaders\n\n\n def phase(this, time_arr, tol = 1e-5, **params):\n \"\"\"Calculate the extended phase curve in a given time.\n\n The purpose of extending the phase range to (-infi,+infi) is to avoid jumps from +pi -> -pi or the other way around on the normal [-pi,pi) range. In this case, the phase curve looks much smoother and more meaningful.\n The method we are using is first calculate the phase for each time step in the normal [-pi,pi) range, then, calculate the phase change for each time interval : dphi. For dphi>pi, we pick dphi-2*pi as the new phase change; and for dphi < -pi, we pick dphi+2*pi. In other words, we minimize the absolute value of the phase change. This treatment is valid if time step is small compared to plasma changing time scale, so the change of reflected phase shouldn't be very large.\n\n Arguments:\n time_arr: ndarray double, the time (real time in experimental record, unit: second) array on which we acquire the phase.\n keyword list:\n 1)Loader is specified by either of the following ways:\n loader_num : loader = this.loaders[loader_num]\n frequency : check if abs(loader.freq-frequency)/frequency dph, and 2 -> dph+2*pi, therefore, this expression is valid for all 3 cases.\n phase_mod = dph_new.cumsum() # numpy.ndarray.cumsum method returns the accumulated array, since we are accumulating the whole dph_new array, the phase we got is relative to the initial phase at the start of the experiment.\n phase_interp = interp1d(T[1:-1],phase_raw[0]+phase_mod) # note that the time array now needs to be shorten by 1.\n return (phase_interp(time_arr),phase_interp,phase_mod,dph_new)\n\n\n def amp(this, time_arr, tol = 1e-5, **params):\n \"\"\"calculates the amplitude of the fluctuating signal\n Since amplitude is much simpler than phase, we can simply calculate sqrt(I**2 + Q**2) where I,Q are in-phase and out-of-phase components.\n \"\"\"\n\n if('loader_num' in params.keys()):\n loader = this.loaders[params['loader_num']]\n else:\n loader_found = False\n for l in this.loaders:\n if(np.abs(params['frequency']-l.freq)/float(l.freq) < tol):\n loader = l\n loader_found = True\n break\n if(not loader_found):\n raise Exception('fft initialization error: no matching frequency data')\n T = loader.getT()\n S = loader.getI()+loader.getQ()*1j #get the complex signal\n amp = np.abs(S)\n amp_interp = interp1d(T,amp)\n return amp_interp(time_arr)\n\n\n def fft(this,tol = 1e-5, **params):\n \"\"\"OUT OF DATE. WILL BE UPDATED SOON.\n\n FFT analysis in time.\n arguments:\n keyword list:\n 1)Time steps can be given by either of the following ways:\n tstart,tend,nt: time steps = np.linspace(tstart,tend,nt)\n tstart,dt,nt: time step = tstart + np.arange(nt)*dt\n 2)Loader is specified by either of the following ways:\n loader_num : loader = this.loaders[loader_num]\n frequency : check if abs(loader.freq-frequency)/frequency/sqrt(<|M(w)|^2>)\n\n cross_correlation function r(w0,w1) is defined as:(see ref.[1])\n r(w0,w1) = < M(w0)M(w1) >/ sqrt(<|M(w0)|^2> <|M(w1)|^2>)\n\n where M(w) is the complex received signal for channel with frequency w, <...> denotes the ensemble average, which in this case, is the average over all time steps.\n\n arguments: tstart, tend: start and end time for calculation, unit: second.\n\n Returns: tuple of two components:\n ( 1D array (n), contains all the self correlation results;\n 2D array (n,n), where row (n0,:) is the cross-correlation of channel n0 with respect to all n channels. The diagonal terms should always be 1. )\n\n Reference:\n [1] Two-dimensional simulations of correlation reflectometry in fusion plasmas, E.J. Valeo, G.J. Kramer and R. Nazikian, Plasma Phys. Control. Fusion 44(2002)L1-L10\n \"\"\"\n\n nf = len(this.loaders)\n\n #first load all the signals from the loaders\n\n M = []\n\n for i in range(nf):\n loader = this.loaders[i]\n M.append(loader.signal(tstart,tend))\n\n M = np.array(M)\n M_bar = np.average(M,axis = 1)\n M2_bar = np.average(M*np.conj(M),axis = 1)\n self = M_bar/np.sqrt(M2_bar)\n\n cross = np.zeros((nf,nf)) + 1j* np.zeros((nf,nf))\n\n for f0 in np.arange(nf):\n M0 = M[f0,:]\n for f1 in np.arange(nf):\n if (f1>=f0):\n M1 = M[f1,:]\n cross_bar = np.average(M0 * np.conj(M1))\n denominator = np.sqrt(M2_bar[f0]*M2_bar[f1])\n cross[f0,f1] = cross_bar / denominator\n cross[f1,f0] = np.conj(cross[f0,f1])\n else:\n pass\n\n return (self,cross)\n\n def Coherent_over_time(this,start, end, step, window, loader_num = 'all'):\n \"\"\"The coherent signal (also called 'self_correlation' before) is defined in function Self_and_Cross_Correlation.\n Arguments:\n loader_num: int, the index of loader to use. If not given, default to be string 'all', such that all the channels will be calculated and returned\n start, end, step: double, units: second, the start and end time, and the time step to calculate each time coherent signal.\n window: double, units: second, the length of time to carry out the ensemble average\n\n Return:\n 2D double array,if loader_num is specified,then the time series of coherent signals from the corresponding channel is returned, if not, results for all channels are returned.\n \"\"\"\n if(loader_num != 'all'):\n loaders = [this.loaders[loader_num]]\n else:\n loaders = this.loaders\n\n if(start < window/2):\n start = window/2\n\n t_arr = np.arange(start,end,step)\n\n NL = len(loaders)\n NT = len(t_arr)\n coh_sig = np.zeros((NL,NT)) + 1j* np.zeros((NL,NT))\n\n for i in np.arange(NL):\n\n loader = loaders[i]\n\n I = loader.getI()\n Q = loader.getQ()\n sig = I+ 1j*Q\n\n for j in np.arange(NT):\n t = t_arr[j]\n\n left_bdy = t-window/2\n right_bdy = t+window/2\n\n n_left = int((left_bdy - loader.t0)/loader.dt)\n n_right = int((right_bdy - loader.t0)/loader.dt)\n\n M = sig[n_left:n_right]\n\n M_bar = np.average(M)\n M2_bar = np.average(M*np.conj(M))\n\n coh_sig[i,j] = M_bar/np.sqrt(M2_bar)\n\n return coh_sig\n\n def Cross_Correlation_by_fft(this,start,end,nt,loader_nums = 'all'):\n \"\"\"Another way to calculate the cross correlation between channels. Assume f(t) and g(t) are signals from two channels, and F(w), G(w') are the corresponding Forier transform of them. Then the cross correlation (not normalized) is[1]:\n \\gamma(tau) = FT(F*(w)G(w))\n\n a proper normalization would be (|F|*|G|)^-1 where |F| = sqrt(integral F*(w)F(w)dw)\n\n Arguments:\n start,end,nt: double; time inteval chosen to carry out the cross correlation. The time series will be determined as t_arr = np.linspace(start,end,nt)\n loader_num: list of int (default to be a string 'all');the loaders used in calculating the cross correlation. if given, need to be a list of int. Otherwise, by default, all the channels in the analyser will be used.\n [1] Observation of ion scale fluctuations in the pedestal region during the edge-localized-mode cycle on the National Spherical torus Experiment. A.Diallo, G.J.Kramer, at. el. Phys. Plasmas 20, 012505(2013)\n \"\"\"\n\n if(loader_nums == 'all'):\n loader_nums = np.arange(len(this.loaders))\n\n NL = len(loader_nums)\n\n cross_corr = np.zeros((NL,NL)) + np.zeros((NL,NL))*1j\n\n F = []#a list of forier transforms of each channel signal\n F2 = []# list of square of F\n F_norm = []#list of normalization term related to F\n for i in loader_nums:\n f = this.fft(tstart = start,tend = end,nt = nt,loader_num = i,component = 'Cplx').fft\n f2 = np.conj(f)*f\n f_norm = np.sqrt(np.average(f2))\n F.append(f)\n F2.append(np.conj(f)*f)\n F_norm.append(f_norm)\n\n for i in range(NL):\n f = F[i]\n f2 = F2[i]\n f_norm = F_norm[i]\n for j in range(NL):\n if(j == i): # if on the diagonal\n gamma_f = f2/f_norm**2\n gamma_t = np.fft.ifft(gamma_f)\n cross_corr[i,i] = gamma_t[0]\n elif(j>i): #if in upper triangle region, need to calculate this term\n g = F[j]\n g_norm = F_norm[j]\n gamma_f = np.conj(f)*g/(np.conj(f_norm)*g_norm)\n gamma_t = np.fft.ifft(gamma_f)\n cross_corr[i,j] = gamma_t[0]\n else: #if in lower triangle region, use the Hermitian property of the cross_correlation matrix\n cross_corr[i,j] = np.conj(cross_corr[j,i])\n return cross_corr\n\n\n def Phase_Correlation(this,time_arr,loader_nums = 'all'):\n \"\"\"Calculate the time translated cross correlation of the phase fluctuations between channels.\n\n Arguments:\n time_arr: double ndarray, contains all the time steps for calculation, (units: second)\n loader_nums: (optional) the channel numbers chosen for cross correlation. default to use all the channels in Analyser.\n\n Output: 3D array: shape (NL,NL,NT), NL = len(loader_nums) is the number of chosen channels, NT = len(time_arr) is the length of time series. The component (i,j,k) is the cross correlation between channel i and channel j. k <= [(NT-1)/2] and >= [-(NT-1)/2] denotes the time displacement between these two channels. Our convention is that i is delayed k*dT time compared to j. If k<0, it means that i is putting ahead of j.\n\n \"\"\"\n\n if(loader_nums == 'all'):\n loader_nums = np.arange(len(this.loaders))\n NL = len(loader_nums)\n NT = len(time_arr)\n corr = np.zeros((NL,NL,NT))\n phase = np.array([ this.phase(time_arr,loader_num = i)[0] for i in loader_nums ])\n phase_fluc = phase - np.mean(phase,axis = 1)[:,np.newaxis]\n for i in range(NL):\n for j in range(NL):\n for k in np.arange(NT)+np.floor(-(NT-1)/2):\n if k<0: # i is ahead of j by k step\n p1 = phase_fluc[i,-k:-1]\n p2 = phase_fluc[j,0:k-1]\n corr[i,j,k] = np.mean(p1*p2)/np.sqrt(np.mean(p1**2)*np.mean(p2**2)) #cross correlation is normalized to the averaged intensity of the two phase.\n else: # i is delayed compared to j by k step\n p1 = phase_fluc[i,0:-k-1]\n p2 = phase_fluc[j,k:-1]\n corr[i,j,k] = np.mean(p1*p2)/np.sqrt(np.mean(p1**2)*np.mean(p2**2))\n\n return corr\n\n\n\n\n\n\ndef band_pass_filter(sig,dt,freq_low,freq_high):\n \"\"\" Band passing filter for NSTX reflectometry time series signals\n This function is dedicated to filter the time series of complex signals read from NSTX datafiles\n The raw signal is passed through an ideal band pass filter, realised by chopping low and high frequency components from FFT array, and then inversely FFT back to time domain. In order to obtain the most relavant information, the phase signal (obtained by accumulating 'phase shifts' over time, see description of function 'phase' in sdp.diagnostic.Reflectometry.analysis) is filtered as well as the magnitude signal of the given complex signal. The logic here is that to the first order, density flucutations cause phase shifts in reflected signals, magnitude modulations are somewhat higher order effects (or due to other more complicated interactions). We keep the magnitude modulations in the same frequency band for more complete assessment. The averaged magnitude is used as the unperturbed magnitude.\n\n Inputs:\n sig: array-like, complex, the time series of the raw reflected signal\n dt: float, sampling time step, using this to compute the frequency in proper unit\n freq_low: float, in Hz, lower limit of the passing band, any component in lower frequency will be erased in filtered signal.\n freq_high: float, in Hz, higher limit of the passing band.\n\n Return:\n filtered_sig: array-like, complex, same shape as sig. The reconstructed filtered signal. Notice that phase and magnitude are filtered and reconstructed separately, and then combined to get the complex signal. It is NOT the same as directly filter the complex input signal with a given frequency band.\n\n \"\"\"\n #get the phase and magnitude series.\n pha = phase(sig)[0]\n mag = magnitude(sig)\n #averaged magnitude will be used for reconstruction of the signal\n mean_mag = np.mean(mag)\n\n #get the fft frequency array\n n = len(sig)\n freqs = np.fft.fftfreq(n,dt)\n idx_low,idx_high = np.searchsorted(freqs[:n/2+1],[freq_low,freq_high]) #note that only first half of the frequency array is holding positive frequencies. The rest are negative ones.\n\n #get the fft result for pahse and magnitude\n pha_spect = np.fft.fft(pha) #Full fft is used here for filtering and inverse fft\n filtered_pha_spect = band_pass_box(pha_spect,idx_low,idx_high)\n\n mag_spect = np.fft.fft(mag)\n filtered_mag_spect= band_pass_box(mag_spect,idx_low,idx_high)\n\n #reconstruct filtered phase and magnitude time sequence\n filtered_pha = np.fft.ifft(filtered_pha_spect)\n filtered_mag = np.fft.ifft(filtered_mag_spect) + mean_mag # We want to stack the magnitude fluctuation on top of the averaged magnitude\n\n return filtered_mag * np.exp(1j * filtered_pha) # sig = mag* exp(i*phi)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"src/python2/sdp/diagnostic/fwr/nstx/nstx.py","file_name":"nstx.py","file_ext":"py","file_size_in_byte":21529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"593896309","text":"# author:joketeng time:2018/11/29\r\nm = input()\r\npattern = input()\r\nres = ''\r\nflag = 0\r\nif '+' in m:\r\n flag = 1\r\nfor i in pattern:\r\n if i.isalpha() and i.upper() not in m:\r\n if flag == 0 or (i.islower() and flag == 1):\r\n res += i\r\n elif not i.isalpha() and i not in m:\r\n res += i\r\nprint(res)\r\n","sub_path":"pat-simple/1033.py","file_name":"1033.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"83296528","text":"\n\nfrom xai.brain.wordbase.nouns._maturity import _MATURITY\n\n#calss header\nclass _MATURITIES(_MATURITY, ):\n\tdef __init__(self,): \n\t\t_MATURITY.__init__(self)\n\t\tself.name = \"MATURITIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"maturity\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_maturities.py","file_name":"_maturities.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"618123212","text":"import math\nn = int(input())\np = 0\n\nwhile 2 ** p < n:\n\tp = p + 1\n\nsize = 2 ** p\n\nwhile n != 0:\n\tif n >= size:\n\t\tn = n - size\n\telse:\n\t\tsize = size / 2\n\nprint(2 ** p, p - int(math.log(size, 2)))","sub_path":"cokolada.py","file_name":"cokolada.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"417937700","text":"import time\nimport sys\n\nimport vicon_core_api\nfrom vicon_core_api.client import RPCError\n\ntry:\n from vicon_core_api import Client\nexcept ImportError:\n print(\"vicon_core_api not found. Please ensure this package is installed. It can be found in the Shogun install directory\")\n\ntry:\n from shogun_live_api import CameraCalibrationServices\n from shogun_live_api.interfaces import SubjectServices, CaptureServices\nexcept ImportError:\n print(\"shogun_live_api not found. Please ensure this package is installed. It can be found in the Shogun install directory\")\n\n# connect to Shogun Live API\ndef shogun_connect(host_name):\n try:\n client = Client(host_name)\n time.sleep(1)\n\n return client\n except RPCError:\n print(\"Cannot connect to Shogun Live, please ensure Shogun Live is running and run the script again\")\n sys.exit()\n\n#export XCP from Shogun Live\ndef export_xcp(client, export_path):\n calibration_services = CameraCalibrationServices(client)\n\n result = calibration_services.export_camera_calibration(export_path)\n\n if result:\n print('Succesfully exported camera calibration to: ' + export_path)\n\n#export VSK from Shogun Live\ndef export_vsk(client, subject_name, subject_path):\n subject_services = SubjectServices(client)\n\n result = subject_services.export_subject(subject_name, subject_path, True)\n\n if result:\n print(\"Successfully exported prop \" + subject_name + \" to: \" + subject_path)\n\n#import VSK into Shogun Live\ndef import_vsk(client, subject_name, subject_path):\n subject_services = SubjectServices(client)\n rigid_object_type = SubjectServices.ESubjectType.ERigidObject\n\n result = subject_services.import_subject(subject_path, subject_name, rigid_object_type)\n\n if result:\n print(\"Successfully imported\", subject_name, \"from:\", subject_path)\n\n#get capture folder path from Shogun Live\ndef get_capture_folder(client):\n try:\n capture_services = CaptureServices(client)\n\n cap_folder = capture_services.capture_folder()[1]\n\n return(cap_folder)\n except RPCError:\n print(\"Cannot connect to Shogun Live, please ensure Shogun Live is running and run the script again\")\n sys.exit()\n","sub_path":"update_camera_offset/liveapi_utils.py","file_name":"liveapi_utils.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"551448861","text":"# -*- coding: utf-8 -*-\nfrom helpers import *\nfrom helpers_cntk import *\nlocals().update(importlib.import_module(\"PARAMETERS\").__dict__)\n\n\n################################################\n# MAIN\n################################################\n# If classifier is set to svm, then no need to run any training iterations\nmakeDirectory(workingDir)\nif classifier == 'svm':\n rf_maxEpochs = 0\n\n# Load data\nlutLabel2Id = readPickle(lutLabel2IdPath)\nlutId2Label = readPickle(lutId2LabelPath)\nimgDictTest = readPickle(imgDictTestPath)\nimgDictTrain = readPickle(imgDictTrainPath)\nimgDictAug = readPickle(imgDictAugPath)\n\n# Generate list of active learning images if provided\nimgDictAl = {}\nfor subdir in list(imgDictTrain.keys()):\n imgDictAl[subdir] = getFilesInDirectory(pathJoin(imgAlDir,subdir), \".jpg\")\nwritePickle(imgDictAlPath, imgDictAl)\n\n# Generate cntk test and train data, i.e. (image, label) pairs and write\n# them to disk since in-memory passing is currently not supported by cntk\ndataTest = getImgLabelList(imgDictTest, imgOrigDir, lutLabel2Id)\ndataTrain = getImgLabelList(imgDictTrain, imgOrigDir, lutLabel2Id)\ndataAug = getImgLabelList(imgDictAug, imgAugDir, lutLabel2Id)\ndataAl = getImgLabelList(imgDictAl, imgAlDir, lutLabel2Id)\nprint(\"Adding {} augmented images to the training set.\".format(len(dataAug)))\ndataTrain += dataAug\nprint(\"Adding {} active learninig images to the training set.\".format(len(dataAl)))\ndataTrain += dataAl\n\n# Optionally add duplicates to balance dataset.\n# Note: this should be done using data point weighting (as is done for svm training), rather than using explicit duplicates.\nif rf_boBalanceTrainingSet:\n dataTrain = cntkBalanceDataset(dataTrain)\n\n# Print training statistics\nprint(\"Statistics training data:\")\ncounts = collections.Counter(getColumn(dataTrain,1))\nfor label in range(max(lutLabel2Id.values())+1):\n print(\" Label {:10}({}) has {:4} training examples.\".format(lutId2Label[label], label, counts[label]))\n\n# Train model\n# Note: Currently CNTK expects train/test splits to be provided as actual file, rather than in-memory\nprintDeviceType()\nwriteTable(cntkTestMapPath, dataTest)\nwriteTable(cntkTrainMapPath, dataTrain)\nmodel = train_model(cntkPretrainedModelPath, cntkTrainMapPath, cntkTestMapPath, rf_inputResoluton,\n rf_maxEpochs, rf_mbSize, rf_maxTrainImages, rf_lrPerMb, rf_momentumPerMb, rf_l2RegWeight,\n rf_dropoutRate, rf_boFreezeWeights)\nmodel.save(cntkRefinedModelPath)\nprint(\"Stored trained model at %s\" % cntkRefinedModelPath)\n\nprint(\"DONE. Showing DNN accuracy vs training epoch plot.\")\nplt.show() # Accuracy vs training epochs plt","sub_path":"3_refineDNN.py","file_name":"3_refineDNN.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"595057499","text":"import discord\nfrom discord.ext import commands\n\nclass testing(commands.Cog):\n \"\"\"\n Testing\n \"\"\"\n def __init__(self, bot):\n \n self.bot = bot\n\n @commands.group(name=\"testing\", invoke_without_command=True)\n async def testing(self, ctx):\n \"\"\"Testing Command\"\"\"\n\n await ctx.send_help(ctx.command)\n \n @testing.command(name=\"testing\")\n async def test(self, ctx):\n \"\"\"Test Command\"\"\"\n await ctx.send('Testing Works')\n\ndef setup(bot):\n bot.add_cog(testing(bot))\n","sub_path":"testing/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"441762133","text":"import http.client\nimport requests\nimport json\nfrom flask import Flask, render_template, url_for, request, redirect\nfrom datetime import datetime\nfrom lxml import html\nimport threading\n\n\n\napp = Flask(__name__)\n\n\n\none=0\nbtc=0\neth=0\nltc=0\nsched = Scheduler()\nsched.start()\ndef cek(): \n global one,btc,eth,ltc \n res = requests.get('https://www.paribu.com/ticker')\n data = res.json()\n btc = data['BTC_TL']['highestBid']\n eth = data['ETH_TL']['highestBid']\n ltc = data['LTC_TL']['highestBid'] \n page = requests.get('https://www.bloomberght.com/doviz/euro')\n tree = html.fromstring(page.content)\n prices = tree.xpath('//*[@id=\"euro\"]/span/small[2]/text()')\n pricesf=prices[0].replace(\",\", \".\")\n eur= float(pricesf)\n one2 = eur * 42.43\n one ='{:.2f}'.format(one2)\n btc ='{:.2f}'.format(btc)\n eth ='{:.2f}'.format(eth)\n ltc ='{:.2f}'.format(ltc)\n\nsched.add_interval_job(cek, seconds = 5)\nsched.shutdown\n\n\n\n\n\n@app.route('/') \ndef index():\n return render_template('index.html',one=one, btc=btc, eth=eth, ltc=ltc)\n\nif __name__ == \"__main__\":\n\n app.run(debug=True) \n \n","sub_path":"OneCoin/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"551798677","text":"class Solution(object):\n def subarraySum(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n pre_sum = {}\n pre_sum[0] = 1\n res = 0\n sum_i = 0\n for i in range(len(nums)):\n sum_i += nums[i]\n sum_temp = sum_i - k\n if sum_temp in pre_sum:\n res += pre_sum[sum_temp]\n pre_sum[sum_i] = pre_sum.get(sum_i, 0) + 1\n return res","sub_path":"560_和为K的子数组.py","file_name":"560_和为K的子数组.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"79063116","text":"\"\"\"\nQUESTION:\nGiven a binary tree, return all root-to-leaf paths.\n\nFor example, given the following binary tree:\n\n 1\n / \\\n2 3\n \\\n 5\nAll root-to-leaf paths are:\n\n[\"1->2->5\", \"1->3\"]\nANSWER:\nBFS, DFS\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param {TreeNode} root\n # @return {string[]}\n def binaryTreePaths(self, root):\n def dfs(root, cur):\n if not root.left and not root.right:\n ans.append(cur+str(root.val))\n return\n if root.left:\n dfs(root.left,cur+str(root.val)+'->')\n if root.right:\n dfs(root.right,cur+str(root.val)+'->')\n if not root:\n return []\n ans = []\n dfs(root,'')\n return ans\n #BFS\n def binaryTreePaths_2(self, root):\n if not root:\n return []\n ans = []\n queue = [[root,str(root.val)]]\n while queue:\n top = queue.pop(0)\n if not top[0].left and not top[0].right:\n ans += [top[1]]\n if top[0].left:\n queue.append([top[0].left,top[1]+'->'+str(top[0].left.val)])\n if top[0].right:\n queue.append([top[0].right,top[1]+'->'+str(top[0].right.val)])\n return ans\n\nif __name__ == '__main__':\n print","sub_path":"Python/binary-tree-paths.py","file_name":"binary-tree-paths.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"433735625","text":"import sys\nimport os\nimport pylab\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import factorial as fact\n##########################################################################################################\n# Check the system operating system\n##########################################################################################################\n\nsys_exec = str(sys.executable) # location of the python.exe file\nsys_os = str(sys.platform) # os of the machine\n\nwindows = 'win32'\nlinux1 = 'linux'\nlinux2 = 'linux2'\nmac = 'darwin'\n\nif (sys_os == linux1) or (sys_os == linux2):\n print('Linux machine')\nelif sys_os == windows:\n print('Windows machine')\nelse:\n print('Mac machine')\n\nprint('Current working directory: '+os.getcwd())\n##########################################################################################################\n# Let the experiment be tossing a coin to analsye the fairness of the coin with p := probability that the \n# result is heads in a given toss of the coin. The coin is tossed n times and let k be the number of times \n# the coin is facing heads. Therefore, the random variale of the experiment follow a Binomial distribution.\n# Goal: Find posterior probability of unknown from the given information/ data.\n\n# We need to compute likelihood (numerator).\n# Let's assume that the coin is fair (Prior).\n# (subjective belief). The denominstor is probabilty of the given\n# data which is 1 in this example. It is very hard to compute.\n##########################################################################################################\n\n##########################################################################################################\n# list of number of trials performed of 4 different experiments\nn = [1, 10, 50, 100]\n\n# list of number of heads observed (number of success) on the\n# corresponding experiment\nk = [0, 3, 21, 39]\n\n# List constaining values of p (probability of sucess)\nx = 0.01\n\n# empty list for filling prob values\ntheta = []\n\n# for loop to create list of values of p (99 values; 98 sub-intervals)\nfor i in range(99):\n theta.append(x*(i+1))\n\n# Compute Likelihood times prior\n# empty list for filling posterior values\npost = []\n\n# nested 'for loop' inside a 'for loop' to compute posterior probabilities for n = # 1,10,50,100 with k = 0,3,21\n# and 39, the corresponding number of heads up. Posterior prob stored in a 2D array (4,99)\nfor i in range(len(n)):\n post.append([])\n for j in range(len(theta)):\n post[i].append(0.5*(fact(n[i])*theta[j]**k[i]*(1-theta[j])**(n[i]-k[i]))/float(fact(n[i]-k[i])))\n\n# normalise the densities\n\n# list filled with zeros for the total sum of posterior probabilities for each experiment\ntot_sum = pylab.zeros(4)\nfor i in range(len(n)):\n for j in range(len(theta)):\n tot_sum[i]+=post[i][j]\n\ndx = theta[1]-theta[0]\n\n# empty list for filling area of unnormalised densities\ncoeff = []\n# empty list for filling normalised densities\npost_n = []\nfor i in range(len(n)):\n coeff.append(tot_sum[i]*dx)\n post_n.append\t(post[i]/coeff[i])\n print(sum(post_n[i]*dx))\n\n\n# convert list object as numpy array\npost = np.asarray(post)\npost_n = np.asarray(post_n)\n\n\n########################################################################################################## \n# Plotting figures\n##########################################################################################################\n\n# Sub-plots of posterior of different experiments\n\nfig, axs = plt.subplots(2, 2)\nfig.suptitle('Posterior')\n# string that are labels \nn_str = ['n = 1', 'n = 10', 'n = 50', 'n = 100']\n\n# labels of top panels\nfor i in range(2):\n axs[0,i].text(0.6,0.75,n_str[i], fontsize = 12,transform = axs[0,i].transAxes)\n\n# plot top panels\naxs[0, 0].plot(theta, post[0], 'r-o')\naxs[0, 1].plot(theta, post[1], 'g-o')\n\n# labels of bottom panels\nfor i in range(2):\n axs[1,i].text(0.6, 0.75,n_str[2+i], fontsize = 12,transform = axs[1,i].transAxes)\n\n\n# plot bottom panels\naxs[1, 0].plot(theta, post[2], 'b-o')\naxs[1, 1].plot(theta, post[3], 'k-o')\n\n# label x and y axis \nfor ax in axs.flat:\n ax.set(xlabel=r'$\\theta$', ylabel=r'Density p($\\theta$|D)')\n\nplt.savefig(r'Posterior vs theta')\n\n# Sub-plots of normalised posterior of different experiments\nfig, axs1 = plt.subplots(2, 2)\nfig.suptitle('Normalised Posterior')\n\n# labels of top panels\nfor i in range(2):\n axs1[0,i].text(0.6,0.75,n_str[i], fontsize = 12,transform = axs1[0,i].transAxes)\n\n# plot top panels\naxs1[0, 0].plot(theta, post_n[0], 'r-o')\naxs1[0, 1].plot(theta, post_n[1], 'g-o')\n\n# labels of bottom panels\nfor i in range(2):\n axs1[1,i].text(0.6, 0.75,n_str[2+i], fontsize = 12,transform = axs1[1,i].transAxes)\n\n# plot bottom panels\naxs1[1, 0].plot(theta, post_n[2], 'b-o')\naxs1[1, 1].plot(theta, post_n[3], 'k-o')\n\n# label x and y axis \nfor ax in axs1.flat:\n ax.set(xlabel=r'$\\theta$', ylabel=r'Normalised Density p($\\theta$|D)/c')\n\nplt.savefig(r'Normalised Posterior vs theta')\nplt.show()\n","sub_path":"comp_task_1/comp_task_1.py","file_name":"comp_task_1.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"396292776","text":"import binascii\nimport os\nimport cv2\nfrom flask import Flask, jsonify, request, render_template, Response, redirect, url_for, send_from_directory\nfrom source.face_recognition import recognize_faces\n\nfrom source.utils import draw_rectangles, read_image, prepare_image\nfrom datetime import datetime\nfrom time import gmtime, strftime, localtime\nimport requests\nimport os\nimport cv2\nfrom flask import Flask, jsonify, request, render_template, Response\nfrom source.face_recognition import recognize_faces\nfrom source.utils import draw_rectangles, read_image, prepare_image\nfrom source.model_training import create_mlp_model\n\nimport pandas as pd\n# import the necessary packages\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nfrom imutils import paths\nimport pickle\nimport time\nimport cv2\nimport os\nimport csv\nfrom collections import defaultdict\n\n\n\napp = Flask(__name__)\n\napp.config.from_object('config')\nUPLOAD_FOLDER = os.path.basename('uploads')\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\napp = Flask(\"Flask Image Gallery\")\napp.config['IMAGE_EXTS'] = [\".png\", \".jpg\", \".jpeg\", \".gif\", \".tiff\"]\n\n\ndef encode(x):\n return binascii.hexlify(x.encode('utf-8')).decode()\n\ndef decode(x):\n return binascii.unhexlify(x.encode('utf-8')).decode()\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n\n@app.route('/recognize', methods=['POST'])\ndef detect():\n file = request.files['image']\n\n # Read image\n image = read_image(file)\n\n # Recognize faces\n classifier_model_path = \"models\" + os.sep + \"finalrecognizer.pickle\"\n label_encoder_path = \"models\" + os.sep + \"finalle.pickle\"\n faces = recognize_faces(image, classifier_model_path, label_encoder_path,\n detection_api_url=app.config[\"DETECTION_API_URL\"])\n\n return jsonify(recognitions=faces)\n\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n file = request.files['image']\n\n # Read image\n image = read_image(file)\n\n # Recognize faces\n classifier_model_path = \"models\" + os.sep + \"finalrecognizer.pickle\"\n label_encoder_path = \"models\" + os.sep + \"finalle.pickle\"\n faces = recognize_faces(image, classifier_model_path, label_encoder_path,\n detection_api_url=\"http://127.0.0.1:3000/\")\n\n # Draw detection rects\n draw_rectangles(image, faces)\n\n # Prepare image for html\n to_send = prepare_image(image)\n\n return render_template('stillphoto.html', face_recognized=len(faces) > 0, num_faces=len(faces), image_to_show=to_send,\n init=True)\n\n\n\n@app.route('/static')\ndef static_page():\n return render_template('stillphoto.html')\n\n\n\n@app.route('/realtime')\ndef realtime():\n return render_template('realtime.html')\n\nvideo = cv2.VideoCapture(0)\n\ndef gen(video):\n\n cleaner = pd.read_csv('attendance-system.csv')\n cleaner.to_csv('attendance-system.csv', index=False)\n\n # construct the argument parser and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-d\", \"--detector\", default=\"face_detection_model\",\n help=\"path to OpenCV's deep learning face detector\")\n ap.add_argument(\"-m\", \"--embedding-model\", default=\"models/openface_nn4.small2.v1.t7\",\n help=\"path to OpenCV's deep learning face embedding model\")\n ap.add_argument(\"-r\", \"--recognizer\", default=\"models/finalrecognizer.pickle\",\n help=\"path to model trained to recognize faces\")\n ap.add_argument(\"-l\", \"--le\", default=\"models/finalle.pickle\",\n help=\"path to label encoder\")\n ap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n help=\"minimum probability to filter weak detections\")\n args = vars(ap.parse_args())\n\n # load our serialized face detector from disk\n print(\"[INFO] loading face detector...\")\n protoPath = os.path.sep.join([args[\"detector\"], \"deploy.prototxt\"])\n modelPath = os.path.sep.join([args[\"detector\"],\n \"res10_300x300_ssd_iter_140000.caffemodel\"])\n detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n\n # load our serialized face embedding model from disk\n print(\"[INFO] loading face recognizer...\")\n embedder = cv2.dnn.readNetFromTorch(args[\"embedding_model\"])\n\n # load the actual face recognition model along with the label encoder\n recognizer = pickle.loads(open(args[\"recognizer\"], \"rb\").read())\n le = pickle.loads(open(args[\"le\"], \"rb\").read())\n\n # initialize the video stream, then allow the camera sensor to warm up\n\n # start the FPS throughput estimator\n fps = FPS().start()\n faces_list = []\n proba_list = []\n proba = 0\n count = 0\n now = datetime.now()\n dictionaryin = {}\n dictionaryout = {}\n\n unknown_counter = 0\n\n # loop over frames from the video file stream\n while True:\n # grab the frame from the threaded video stream\n success, image = video.read()\n\n frame = image\n # resize the frame to have a width of 600 pixels (while\n # maintaining the aspect ratio), and then grab the image\n # dimensions\n frame = imutils.resize(frame, width=600)\n (h, w) = frame.shape[:2]\n\n dt_string = now.strftime(\"%d/%m/%Y\")\n hr_string = strftime(\"%H:%M:%S\", localtime())\n\n # construct a blob from the image\n imageBlob = cv2.dnn.blobFromImage(\n cv2.resize(frame, (300, 300)), 1.0, (300, 300),\n (104.0, 177.0, 123.0), swapRB=False, crop=False)\n\n # apply OpenCV's deep learning-based face detector to localize\n # faces in the input image\n detector.setInput(imageBlob)\n detections = detector.forward()\n\n # loop over the detections\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with\n # the prediction\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections\n if confidence > args[\"confidence\"]:\n # compute the (x, y)-coordinates of the bounding box for\n # the face\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # extract the face ROI\n face = frame[startY:endY, startX:endX]\n (fH, fW) = face.shape[:2]\n\n # ensure the face width and height are sufficiently large\n if fW < 20 or fH < 20:\n continue\n\n # construct a blob for the face ROI, then pass the blob\n # through our face embedding model to obtain the 128-d\n # quantification of the face\n faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,\n (96, 96), (0, 0, 0), swapRB=True, crop=False)\n embedder.setInput(faceBlob)\n vec = embedder.forward()\n\n # perform classification to recognize the face\n preds = recognizer.predict_proba(vec)\n j = np.argmax(preds)\n proba = preds[j]\n name = le.classes_[j]\n img_counter = 0\n\n # draw the bounding box of the face along with the\n # associated probability\n text = \"{}: {:.2f}%\".format(name, proba * 100)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n cv2.rectangle(frame, (startX, startY), (endX, endY),\n (0, 255, 255), 2)\n cv2.putText(frame, text, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 255), 2)\n\n # print(le.classes_)\n\n if proba >= 0.70:\n faces_list.append(name)\n proba_list.append(proba)\n count = count + 1\n\n if name == \"Mridulata\":\n if proba >= 0.70:\n cv2.putText(frame, \"WELCOME MRIDULATA!!!\", (40, 60),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)\n\n if name == \"Smrity\":\n if proba >= 0.90:\n cv2.putText(frame, \"WELCOME SMRITY!!!\", (40, 60),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)\n\n if name == \"Saloni\":\n if proba >= 0.90:\n cv2.putText(frame, \"WELCOME SALONI!!!\", (40, 60),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)\n\n if name == \"Sujata\":\n if proba >= 0.90:\n cv2.putText(frame, \"WELCOME SUJATA!!!\", (40, 60),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)\n\n if name == \"Unknown\":\n if proba >= 0.90:\n unknown_dir = \"images/unknown\"\n test = datetime\n date_string = time.strftime(\"%Y-%m-%d-%H:%M\")\n\n unknowns_name = unknown_dir + os.sep + date_string + \".jpg\"\n cv2.imwrite(unknowns_name, frame)\n unknown_counter += 1\n\n if count == 20:\n\n d = defaultdict(list)\n for key, value in zip(faces_list, proba_list):\n d[key].append(value)\n occurence = dict(d)\n thisset = set(occurence)\n for x in thisset:\n occurance_individual = len(occurence[x])\n occurence[x] = sum(item for item in occurence[x])\n\n a = sum(occurence.values())\n\n for x in thisset:\n occurence[x] = occurence[x] / a\n\n attendance = {word for word, prob in occurence.items() if prob >= 0.3}\n # students = max(occurence, key=occurence.get)\n students = list(attendance)\n\n headers = ['Date', 'Name', 'Time Sign In', 'Time Sign Out']\n\n def write_csv(data):\n\n with open('attendance-system.csv', 'a') as outfile:\n outfile.truncate()\n file_is_empty = os.stat('attendance-system.csv').st_size == 0\n writer = csv.writer(outfile, lineterminator='\\n', )\n if file_is_empty:\n writer.writerow(headers)\n\n writer.writerow(data)\n\n # time.sleep(1)\n current_hour = datetime.now().second\n fps.stop()\n waktu = fps.elapsed()\n\n if waktu >= 0 and waktu <= 15:\n print('Attendance system Open for sign in')\n for a in students:\n write_csv([dt_string, a, hr_string, ''])\n\n records = pd.read_csv('attendance-system.csv') # Records dictionaryin for notification\n deduped = records.drop_duplicates(['Name'], keep='first')\n deduped = deduped.drop(columns=['Time Sign Out'])\n dictionaryin = deduped.set_index('Name').T.to_dict('list')\n\n elif waktu >= 30 and waktu <= 45:\n\n for a in students:\n write_csv([dt_string, a, '', hr_string])\n print('Attendance system Open for sign out')\n\n records = pd.read_csv('attendance-system.csv') # Records dictionaryout for notification\n signed_out = records.loc[records['Time Sign In'].notna()]\n deduped_out = signed_out.drop_duplicates(['Name'], keep='first')\n deduped_out = deduped_out.drop(columns=['Time Sign In'])\n dictionaryout = deduped_out.set_index('Name').T.to_dict('list')\n else:\n print('Attendance system close until Next Course')\n\n print(dt_string, hr_string, students)\n\n faces_list.clear()\n proba_list.clear()\n count = 0\n\n\n\n # update the FPS counter\n fps.update()\n\n ret, jpeg = cv2.imencode('.jpg', frame)\n frame = jpeg.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n\n fps.stop()\n\n\n\n print(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n\n\n\n@app.route('/video_feed')\ndef video_feed():\n global video\n return Response(gen(video),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\ncv2.destroyAllWindows()\n\n\n\n@app.route('/images')\ndef home():\n root_dir = \"/home/mridulata/face-recognition-app-tutorial/images/unknown\"\n \n image_paths = []\n for root,dirs,files in os.walk(root_dir):\n for file in files:\n if any(file.endswith(ext) for ext in app.config['IMAGE_EXTS']):\n image_paths.append(encode(os.path.join(root,file)))\n return render_template('images.html', paths=image_paths)\n\n\n@app.route('/cdn/')\ndef download_file(filepath):\n dir,filename = os.path.split(decode(filepath))\n return send_from_directory(dir, filename, as_attachment=False)\n\n\n@app.route('/view')\ndef view():\n filename = 'attendance-system.csv'\n data = pd.read_csv(filename, header=0)\n myData = list(data.values)\n return render_template('view.html', myData=myData)\n\n\n@app.route('/showdata')\ndef showdata():\n\n if request.method == 'POST':\n results = []\n\n filename = 'attendance-system.csv'\n user_csv = pd.read_csv(filename, header=0)\n reader = csv.DictReader(user_csv)\n\n for row in reader:\n results.append(dict(row))\n\n fieldnames = [key for key in results[0].keys()]\n\n return render_template('showdata.html', results=results, fieldnames=fieldnames, len=len)\n\n\nif __name__==\"__main__\":\n \n app.run(host='0.0.0.0', port=5000, threaded=True)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"178280760","text":"\"\"\"Bottle model classes\n\"\"\"\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom utility.base_classes import CustomModel\n\nfrom beer.models import Beer\nfrom cellar.models import Cellar\nfrom location.models import Store\n\n\n# Packaging types with volume information\nclass Packaging(CustomModel):\n \"\"\"Container types\n\n name -- Simple package description ie. Bottle 12 oz\n volume -- package volume in fluid ounces\n \"\"\"\n name = models.CharField(max_length=50)\n volume = models.DecimalField(max_digits=6, decimal_places=2)\n\n\nclass Bottle(CustomModel):\n \"\"\"Information about specific bottles\"\"\"\n qrcode = models.CharField(\n max_length=64,\n unique=True\n )\n beer = models.ForeignKey(Beer)\n cellar = models.ForeignKey(Cellar)\n\n store = models.ForeignKey(\n Store,\n blank=True,\n null=True\n )\n purchase_date = models.DateField(\n blank=True,\n null=True\n )\n purchase_price = models.DecimalField(\n blank=True,\n null=True,\n max_digits=6,\n decimal_places=2\n )\n\n packaging = models.ForeignKey(Packaging)\n bottle_number = models.IntegerField(\n blank=True,\n null=True\n )\n batch_number = models.CharField(\n max_length=20,\n blank=True,\n null=True\n )\n\n year = models.PositiveSmallIntegerField(\n blank=True,\n null=True\n )\n month = models.PositiveSmallIntegerField(\n blank=True,\n null=True\n )\n day = models.PositiveSmallIntegerField(\n blank=True,\n null=True\n )\n\n def current_cellar(self):\n transactions = self.transactiondetail_set\n c_list = transactions.filter(item_io=1).order_by('-created')\n\n return c_list[0]\n\n def __str__(self):\n return '{name} {code_start}...{code_end}'.format(\n code_start=self.qrcode[:4],\n code_end=self.qrcode[-4:],\n name=self.beer.name\n )\n\n\nclass TastingNote(CustomModel):\n \"\"\"Bottle tasting notes\"\"\"\n bottle = models.ForeignKey(Bottle)\n score = models.PositiveSmallIntegerField(\n default=10,\n validators=[\n MaxValueValidator(10),\n MinValueValidator(1)\n ]\n )\n\n tasting_notes = models.TextField(max_length=1000, blank=True, null=True)\n","sub_path":"apps/bottle/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"515272991","text":"'''makeCutList'''\n\n\"\"\"\nMake a pre-edited file reference that can be inputted back into auto-editor.\n\"\"\"\n\nimport os\nimport json\n\ndef readCutList(jsonFile, version, log) -> list:\n with open(jsonFile, 'r') as f:\n data = json.load(f)\n\n if(data['presets']['version'] != version):\n log.warning('This json file was generated using a different version of auto-editor.')\n\n INPUT_FILE = data['timeline']['media_file']\n\n if(not os.path.isfile(INPUT_FILE)):\n log.error('Could not locate file: ' + INPUT_FILE)\n\n speeds = data['presets']['speeds']\n\n chunks = data['timeline']['chunks']\n\n return INPUT_FILE, chunks, speeds\n\n\ndef makeCutList(vidFile, out, version, chunks, speeds, log):\n\n if(not out.endswith('.json')):\n log.error('Output extension must be .json')\n\n data = {}\n data['presets'] = {\n 'version': version,\n 'speeds': speeds,\n }\n data['timeline']= {\n 'media_file': os.path.abspath(vidFile),\n 'chunks': chunks,\n }\n\n with open(out, 'w') as outfile:\n json.dump(data, outfile, indent=4)\n","sub_path":"auto_editor/makeCutList.py","file_name":"makeCutList.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"249420053","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n第2章: UNIXコマンドの基礎\r\n\r\nhightemp.txtは,日本の最高気温の記録を「都道府県」「地点」「℃」「日」の\r\nタブ区切り形式で格納したファイルである.以下の処理を行うプログラムを作成し,\r\nhightemp.txtを入力ファイルとして実行せよ.\r\nさらに,同様の処理をUNIXコマンドでも実行し,プログラムの実行結果を確認せよ.\r\n\r\n15. 末尾のN行を出力\r\n自然数Nをコマンドライン引数などの手段で受け取り,入力のうち末尾のN行だけを\r\n表示せよ.確認にはtailコマンドを用いよ.\r\n\"\"\"\r\nimport argparse\r\nimport io\r\n\r\n\r\ndef tail(file, lines):\r\n \"\"\"\r\n 指定したファイルの末尾から指定行を表示する\r\n\r\n :param str file: 対象ファイルパス\r\n :param int lines: 出力行数\r\n \"\"\"\r\n # with open(file, mode=\"r\", encoding=\"utf8\") as read_file:\r\n with open(file, mode=\"rb\") as read_file:\r\n buf = b\"\"\r\n line = 0\r\n size = read_file.seek(0, io.SEEK_END)\r\n for i in range(1, size):\r\n read_file.seek(-1 * i, io.SEEK_END)\r\n b = read_file.read(1)\r\n if b == b\"\\n\":\r\n print(buf.decode('utf8'))\r\n buf = b\"\"\r\n line += 1\r\n if line >= lines:\r\n break\r\n elif b != b\"\\r\":\r\n buf = b + buf\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('lines', type=int)\r\n parser.add_argument('file')\r\n args = parser.parse_args()\r\n tail(args.file, args.lines)\r\n\r\n","sub_path":"nlp100_015.py","file_name":"nlp100_015.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"131706538","text":"'''\nSay you have an array for which the ith element is the price of a given stock on day i.\n\nIf you were only permitted to complete at most one transaction (i.e., buy one and sell one share of the stock), design an algorithm to find the maximum profit.\n\nNote that you cannot sell a stock before you buy one.\n\nExample :\nInput: [7,1,5,3,6,4]\nOutput: 5\nExplanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.\n Not 7-1 = 6, as selling price needs to be larger than buying price.\n'''\n\n__date__ = '2018-7-13'\n\n# way 1\nclass Solution_1(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if len(prices) < 2:\n return 0\n max_profit = 0\n buy = prices[0]\n for price in prices[1:]:\n buy = min(buy, price)\n max_profit = max(max_profit, price - buy)\n return max_profit\n\n# way 2\nclass Solution_2(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if prices == []:\n return 0\n min_val = prices[0]\n max_res = 0\n for i in prices[1:]:\n if i < min_val:\n min_val = i\n max_res = max(max_res,i-min_val)\n return max_res","sub_path":"121. Best Time to Buy and Sell Stock.py","file_name":"121. Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"305844900","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\nclass GD(object):\n ins = None\n\n def __init__(self):\n self.params = (\n ('moduleId', '12073'),\n ('struts.portlet.action', '/app/yglcAction!listProduct.action'),\n )\n self.session = requests.session()\n self.data = [\n ('SFZS', 'Y'),\n ('TZBZMC', 'RMB'),\n ('pageSize', '12'),\n ('qxrUp', ''),\n ('yqnhsylDown', 'Y'),\n ('channelIds[]', 'yxl94'),\n ('page', '1'),\n ]\n\n def process(self):\n res_list = []\n for page in range(1, 100):\n tmp_data = self.data\n tmp_data.pop(-1)\n tmp_data.append(('page', str(page)))\n # print tmp_data\n res = self.session.post('http://www.cebbank.com/eportal/ui', params=self.params, data=tmp_data)\n if 'cxtj' in res.text:\n break\n soup = BeautifulSoup(res.text, 'html.parser')\n links = soup.find_all('div', 'lccp_main_content_tx')\n # print len(links)\n for link in links:\n for li in link.find_all('li'):\n res_list.append(' '.join(li.text.split()))\n return res_list\n\n @classmethod\n def run(cls):\n if cls.ins is None:\n cls.ins = cls()\n cls.ins.process()\n\n\nif __name__ == \"__main__\":\n GD.run()\n","sub_path":"money/gd.py","file_name":"gd.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"441567459","text":"# aschannels stand for auto-star channels\nimport discord\nimport bot_config\nimport settings\nimport functions\nfrom discord.ext import commands\nfrom typing import Union\n\n\nclass AutoStarChannels(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group(\n name='aschannels', aliases=['asc', 'as', 'a'],\n description=\"Manage AutoStar Channels\",\n brief=\"Manage AutoStar Channels\", invoke_without_command=True\n )\n @commands.guild_only()\n async def aschannels(self, ctx, aschannel: discord.TextChannel = None):\n get_asemojis = \\\n \"\"\"SELECT * FROM asemojis WHERE aschannel_id=$1\"\"\"\n\n conn = self.bot.db.conn\n\n if aschannel is None:\n get_aschannels = \\\n \"\"\"SELECT * FROM aschannels WHERE guild_id=$1\"\"\"\n\n async with self.bot.db.lock:\n async with conn.transaction():\n aschannels = await conn.fetch(\n get_aschannels, ctx.guild.id\n )\n\n if len(aschannels) == 0:\n await ctx.send(\"You don't have any AutoStarChannels.\")\n return\n\n message = \"\"\n for asc in aschannels:\n channel = self.bot.get_channel(asc['id'])\n async with self.bot.db.lock:\n async with conn.transaction():\n s_emojis = await conn.fetch(\n get_asemojis, asc['id']\n )\n emoji_str = await functions.pretty_emoji_string(\n s_emojis, ctx.guild\n )\n if channel is None:\n message += f\"Deleted Channel {asc['id']} {emoji_str}\\n\"\n else:\n message += f\"<#{asc['id']}> {emoji_str}\\n\"\n\n embed = discord.Embed(\n title=\"AutoStar Channels\",\n description=message,\n color=bot_config.COLOR\n )\n\n await ctx.send(embed=embed)\n else:\n get_aschannel = \\\n \"\"\"SELECT * FROM aschannels WHERE id=$1\"\"\"\n\n async with self.bot.db.lock:\n conn = self.bot.db.conn\n async with conn.transaction():\n sasc = await conn.fetchrow(\n get_aschannel, aschannel.id\n )\n s_emojis = await conn.fetch(\n get_asemojis, aschannel.id\n )\n\n if sasc is None:\n await ctx.send(\"That is not an AutoStar Channel!\")\n return\n\n emoji_str = await functions.pretty_emoji_string(\n s_emojis, ctx.guild\n )\n\n message = (\n f\"**emojis:** {emoji_str}\\n\"\n f\"**minChars:** {sasc['min_chars']}\\n\"\n f\"**requireImage:** {sasc['require_image']}\\n\"\n f\"**deleteInvalid:** {sasc['delete_invalid']}\"\n )\n\n embed = discord.Embed(\n title=f\"Settings for {aschannel.name}\",\n description=message,\n color=bot_config.COLOR\n )\n\n await ctx.send(embed=embed)\n\n @aschannels.command(\n name='add', aliases=['a'],\n description='Sets a channel as an AutoStarChannel',\n breif='Add an AutoStarChannel'\n )\n @commands.has_permissions(manage_channels=True)\n @commands.guild_only()\n async def add_aschannel(self, ctx, channel: discord.TextChannel):\n await settings.add_aschannel(self.bot, channel)\n await ctx.send(\n f\"Created AutoStarChannel {channel.mention}\"\n )\n\n @aschannels.command(\n name='remove', aliases=['r', 'delete', 'del', 'd'],\n description=\"Remove an AutoStarChannel\",\n brief=\"Remove an AutoStarChannel\"\n )\n @commands.has_permissions(manage_channels=True)\n @commands.guild_only()\n async def remove_aschannel(\n self, ctx, channel: Union[discord.TextChannel, int]\n ):\n channel_id = channel.id if isinstance(channel, discord.TextChannel)\\\n else channel\n await settings.remove_aschannel(self.bot, channel_id, ctx.guild.id)\n await ctx.send(\n f\"Removed AutoStar Channel {channel}\"\n )\n\n @aschannels.command(\n name='addEmoji', aliases=['ae'],\n description=\"Add an emoji for the bot to automatically react\"\n \" to messages with.\",\n brief='Add an emoji to the AutoStar Channel'\n )\n @commands.has_permissions(manage_messages=True)\n @commands.guild_only()\n async def add_asemoji(\n self, ctx, aschannel: discord.TextChannel,\n emoji: Union[discord.Emoji, str]\n ):\n if type(emoji) is str:\n if not functions.is_emoji(emoji):\n await ctx.send(\n \"I don't recoginize that emoji. If it\"\n \" is a custom emoji, it must be in this server.\"\n )\n return\n emoji_name = emoji if type(emoji) is str else str(emoji.id)\n await settings.add_asemoji(\n self.bot, aschannel, emoji_name\n )\n await ctx.send(f\"Added {emoji} to {aschannel.mention}\")\n\n @aschannels.command(\n name='removeEmoji', aliases=['re'],\n description=\"Remove autostar emoji\",\n brief='Remove autostar emoji'\n )\n @commands.has_permissions(manage_messages=True)\n @commands.guild_only()\n async def remove_asemoji(\n self, ctx, aschannel: discord.TextChannel,\n emoji: Union[discord.Emoji, str]\n ):\n emoji_name = emoji if type(emoji) is str else str(emoji.id)\n await settings.remove_asemoji(\n self.bot, aschannel, emoji_name\n )\n await ctx.send(f\"Removed {emoji} from {aschannel.mention}\")\n\n @aschannels.command(\n name='requireImage', aliases=['ri'],\n description=\"Wether or not messages sent here are\"\n \"required to have an image.\",\n brief=\"Wether or not an image is required\"\n )\n @commands.has_permissions(manage_messages=True)\n @commands.guild_only()\n async def set_require_image(\n self, ctx, aschannel: discord.TextChannel, value: bool\n ):\n await settings.change_aschannel_settings(\n self.bot.db, aschannel.id, require_image=value\n )\n await ctx.send(f\"Set requireImage to {value} for {aschannel.mention}\")\n\n @aschannels.command(\n name='minChars', aliases=['mc'],\n description='The minimum required characters for a message'\n 'in the AutoStar Channel',\n brief='Set the minimum characters for a message'\n )\n @commands.has_permissions(manage_messages=True)\n @commands.guild_only()\n async def set_min_chars(\n self, ctx, aschannel: discord.TextChannel, value: int\n ):\n await settings.change_aschannel_settings(\n self.bot.db, aschannel.id, min_chars=value\n )\n await ctx.send(f\"Set minChars to {value} for {aschannel.mention}\")\n\n @aschannels.command(\n name='deleteInvalid', aliases=['di'],\n description='Wether or not to delete messages if they don\\'t meet'\n 'the requirements',\n brief='Wether or not to delete invalid messages'\n )\n @commands.has_permissions(manage_messages=True)\n @commands.guild_only()\n async def set_delete_invalid(\n self, ctx, aschannel: discord.TextChannel, value: bool\n ):\n await settings.change_aschannel_settings(\n self.bot.db, aschannel.id, delete_invalid=value\n )\n await ctx.send(f\"Set deleteInvalid to {value} for {aschannel.mention}\")\n\n\ndef setup(bot):\n bot.add_cog(AutoStarChannels(bot))\n","sub_path":"cogs/aschannels.py","file_name":"aschannels.py","file_ext":"py","file_size_in_byte":7724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"46564698","text":"import os\nimport sys\nimport glob\nimport numpy as np\nfrom skimage import io\nfrom sklearn import datasets\n\n#定数\nIMAGE_SIZE =40\nCOLOR_BYTE =3\nCATEGORY_NUM =6\n\ndef load_handimage(path):\n #ファイル一覧の取得\n files = glob.glob(os.path.join(path, '*/*.png'))\n\n #イメージとラベル領域を確保\n images = np.ndarray((len(files),IMAGE_SIZE, IMAGE_SIZE, COLOR_BYTE), dtype=np.uint8)\n labels = np.ndarray(len(files), dtype=np.int)\n\n #イメージとラベルを読み込み\n for idx, file in enumerate(files):\n image = io.imread(file)\n images[idx]=image\n\n label=os.path.split(os.path.dirname(file))[-1]\n labels[idx]=int(label)\n\n #scikit-learnの他のデータセットの形式に合わせる\n flat_data= images.reshape((-1,IMAGE_SIZE*IMAGE_SIZE*COLOR_BYTE))\n images=flat_data.view()\n return datasets.base.Bunch(data=flat_data,\n target=labels.astype(np.int),\n target_names=np.arange(CATEGORY_NUM),\n images=images,\n DESCR=None)\n\nfrom sklearn import svm, metrics\n\nif __name__ == '__main__':\n argvs=sys.argv\n train_path=argvs[1]\n test_path=argvs[2]\n\n #学習データの読み込み\n train = load_handimage(train_path)\n\n #手法:線形SVM\n classifier = svm.LinearSVC()\n\n #学習\n classifier.fit(train.data,train.target)\n\n #テストデータの読み込み\n test= load_handimage(test_path)\n\n #テスト\n predicted = classifier.predict(test.data)\n\n #結果\n print(\"Accuracy:\\n%s\" % metrics.accuracy_score(test.target, predicted))\n\n\n\n\n","sub_path":"機械学習入門実践編/trial_handsign_SVM.py","file_name":"trial_handsign_SVM.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"313802719","text":"# Note: I use Spyder on my own laptop. Did not use the lab computer.\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.integrate import odeint\r\n\r\nL = float(input('Please enter a value for the inductance, L: '))\r\nC = float(input('Please enter a value for the capacitance, C: '))\r\n\r\nw0 = 1/np.sqrt(L*C) # omega_0\r\nfreq = np.linspace(0.1*w0/(2*np.pi), w0/np.pi, 100) # given frequency range\r\nm = [5, 1, 0.2] # given values of m\r\nyAf = np.array([0, 0]) # initial charge and current.\r\ntAf = np.arange(0, 60, np.pi/w0/20) # time array\r\nV0 = 1 # given value of v0\r\n\r\nR1 = 2*m[0]*np.sqrt(L/C) # value of resistance at m = 5\r\nR2 = 2*m[1]*np.sqrt(L/C) # value of resistance at m = 1\r\nR3 = 2*m[2]*np.sqrt(L/C) # value of resistance at m = 0.2\r\nR_all = [R1, R2, R3]\r\n\r\n\r\ndef dIdtAF(yAf, tAf, w, R):\r\n # This function takes the array with intial charge and current, as well as\r\n # the time array, the omega (angular frequency), and resistance, as\r\n # parameters. We will use these to solve the differential equation below,\r\n # dI, by using the odeint function.\r\n q = yAf[0] # charge\r\n I = yAf[1] # current = dq/dt\r\n dI = V0*np.cos(w*tAf)/L - q/(L*C) - I*R/L # the differential equation\r\n return [I, dI] # return the current and the result(s) of the DE above\r\n\r\n\r\nres1 = [] # empty list for each resistor/resistance which I'll be appending to\r\nres2 = [] # could have preassigned them using np.zeros() or such, but the prof\r\nres3 = [] # said the change is miniscule and unnecessary in this case.\r\n\r\nfor i in R_all: # iterating over the 3 resistances for respective m\r\n for j in freq: # iterating over all the frequencies\r\n w = 2*np.pi*j # angular frequency, omega\r\n call = odeint(dIdtAF, yAf, tAf, args=(w, i)) # calling odeint to\r\n # solve differential equation above\r\n y = call[:, 1] # for readablility, assigning y to the result returned.\r\n imax = np.max(y[200:]) # taking the max of the values after steady\r\n # state reached. Using the values after 200 because by doing so, we\r\n # neglect the first few values where the solution might not be steady.\r\n if i == R_all[0]:\r\n res1.append(imax) # appending to corresponding resistance\r\n if i == R_all[1]:\r\n res2.append(imax)\r\n if i == R_all[2]:\r\n res3.append(imax)\r\n\r\nplt.figure(1) # creating a figure\r\nplt.subplot(311) # subplots for each resistance\r\nplt.plot(freq, res1) # plotting each resonance for corresponding R with freq\r\nplt.xlabel(\"Frequency, Hz\")\r\nplt.ylabel(\"Current, I(t)\")\r\nplt.title(\"L={}H,C={}F,R={}Ohms\".format(L, C, R_all[0]))\r\n# Here I assumed the units are in C, F, and Ohms. We were not told if they were\r\n# mH, nH etc. which I why I made this general assumption.\r\nplt.grid()\r\n\r\nplt.subplot(312)\r\nplt.plot(freq, res2)\r\nplt.xlabel(\"Frequency, Hz\")\r\nplt.ylabel(\"Current, I(t)\")\r\nplt.title(\"L={}H,C={}F,R={}Ohms\".format(L, C, R_all[1]))\r\nplt.grid()\r\n\r\nplt.subplot(313)\r\nplt.plot(freq, res3)\r\nplt.xlabel(\"Frequency, Hz\")\r\nplt.ylabel(\"Current, I(t)\")\r\nplt.title(\"L={}H,C={}F,R={}Ohms\".format(L, C, R_all[2]))\r\nplt.tight_layout() # allows us to see the titles clearly of each subplot\r\nplt.grid()\r\nplt.savefig('resonance.pdf')\r\nplt.show()\r\n\r\n# =============================================================================\r\n# Here, we answer question 3. For readability, use another for loop. Could have\r\n# done it all in one nested loop, but again, that looks messy and decreases\r\n# readability, as the TAs said. Hence, I'll stick to this as it's cleaner.\r\n\r\nI_1 = [] # empty lists for each resistance that I'll be appending to.\r\nI_2 = []\r\nI_3 = []\r\nfor i in R_all: # iterating over the resistances\r\n call = odeint(dIdtAF, yAf, tAf, args=(w0, i)) # calling odeint to solve\r\n # DE. However, now we keep w0 fixed, instead of using w for each frequency\r\n # The steps below are the same as before.\r\n y = call[:, 1]\r\n if i == R_all[0]:\r\n I_1.append(y)\r\n if i == R_all[1]:\r\n I_2.append(y)\r\n if i == R_all[2]:\r\n I_3.append(y)\r\n\r\nplt.figure(2)\r\nplt.subplot(311)\r\nplt.plot(tAf, I_1[0]) # Instead of frequency, now we plot time on the x-axis.\r\nplt.xlabel(\"Time(s)\")\r\nplt.ylabel(\"Current, I(t)\")\r\nplt.title(\"L={}H,C={}F,R={}Ohms\".format(L, C, R_all[0]))\r\nplt.grid()\r\n\r\nplt.subplot(312)\r\nplt.plot(tAf, I_2[0])\r\nplt.xlabel(\"Time(s)\")\r\nplt.ylabel(\"Current, I(t)\")\r\nplt.title(\"L={}H,C={}F,R={}Ohms\".format(L, C, R_all[1]))\r\nplt.grid()\r\n\r\nplt.subplot(313)\r\nplt.plot(tAf, I_3[0])\r\nplt.xlabel(\"Time(s)\")\r\nplt.ylabel(\"Current, I(t)\")\r\nplt.title(\"L={}H,C={}F,R={}Ohms\".format(L, C, R_all[2]))\r\nplt.tight_layout() # allows us to see the titles clearly of each subplot.\r\nplt.grid()\r\nplt.savefig('transients.pdf')\r\nplt.show()\r\n","sub_path":"3rd Year/Phys 210/Project 3/oscillations.py","file_name":"oscillations.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"319189672","text":"\"\"\"Repeating a beat in a loop.\"\"\"\n\n__author__ = \"730292529\"\n\n\n# Begin your solution here...\nBEAT: str = input(\"What beat do you want to repeat? \") \nREPEAT: int = int(input(\"How many times do you want to repeat it? \"))\ni: int = 0\n\nwhile i < REPEAT: \n print((BEAT + \" \") * (REPEAT - 1) + BEAT) \n i = i + 1 \n REPEAT = i \nwhile REPEAT < 0: \n print(str(\"No beat...\"))\n REPEAT = i + 1 \nwhile REPEAT == 0: \n print(str(\"No beat...\"))\n REPEAT = i + 1 ","sub_path":"exercises/ex02/repeat_beat.py","file_name":"repeat_beat.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"519945701","text":"# -*- coding: utf-8 -*-\nimport functools\nimport json\nimport logging\nimport copy\nfrom datetime import datetime\nimport werkzeug\nfrom dateutil import parser, relativedelta\nimport pytz\nimport os\nimport subprocess\n\nfrom odoo.addons.kg_pos.controllers.kg_api_pos_reports import parse_date, \\\n json_response, invalid_response, validate_token\nfrom odoo import http\nfrom odoo.http import request\n\nAPI_POS_TAX_ONLINE = '/kg/api/pos/order/tax-online'\nAPI_ROUTES = [\n API_POS_TAX_ONLINE,\n]\nHTTP_METHOD_OPTION = \"OPTIONS\"\n\n\nclass KgApiTaxOnlineExport(http.Controller):\n\n @http.route(API_ROUTES, type='http', auth=\"none\", methods=[HTTP_METHOD_OPTION], csrf=False)\n def http_options(self, **payload):\n # api with http method OPTIONS is required from javascript web client\n return json_response({}, http_method=HTTP_METHOD_OPTION)\n\n @http.route('/kg/api/pos/order/tax-online', auth='none',\n cors=\"*\", type='http', methods=['GET'], csrf=False)\n @validate_token\n def get_pos_order_tax_report(self, company_id=1, date=None, format_output=\"txt\"):\n try:\n company_id = int(company_id)\n except Exception as ex:\n return invalid_response('Parameter company_id is invalid!', str(ex))\n try:\n pos_date = parse_date(date)\n except Exception as ex:\n return invalid_response('Parameter date is invalid!', str(ex))\n\n # Laporan pajak online utk transaksi pos order\n data = self.get_data(http.request.env, company_id, pos_date)\n company = http.request.env['res.company'].search([('id', '=', company_id)])\n if company.region_name == 'bali' or format_output == 'bali':\n result = self.generate_for_bali(data)\n elif company.region_name == 'surabaya' or format_output == 'sby':\n result = self.generate_for_jakarta(data, format_output=format_output, show_tax_amount=True)\n elif company.region_name == 'jakarta' or format_output in ('txt', 'jkt'):\n result = self.generate_for_jakarta(data, format_output=format_output, show_tax_amount=False)\n else:\n return invalid_response(\n 'user_company_config_error',\n 'Region user belum di definisikan, silahkan ke konfigurasi organisasi user anda ',\n 412)\n\n return json_response({\n 'result': result\n })\n\n @staticmethod\n def generate_for_jakarta(\n data,\n format_output='txt',\n show_tax_amount=False,\n txt_file=None,\n worksheet=None, row_no_start_from=0\n ):\n counter_item = 0\n row_no = row_no_start_from\n prev_order_id = 0\n result = []\n for rec in data:\n tax_amount = rec.get('tax_amount', 0)\n if rec.get('price_subtotal', 0) > 0 and tax_amount > 0:\n # Reset counter if order id changed\n if rec.get('order_id', 0) != prev_order_id:\n prev_order_id = rec.get('order_id', 0)\n counter_item = 1\n else:\n counter_item += 1\n ref = rec.get('pos_name', '').split('/')\n bill_no = ref[-1] # ambil nomor counter pos bill (element terakhir dari array ref)\n\n receipt_no = rec.get('config_name', '') + ' ' + bill_no\n transaction_id = receipt_no + str(counter_item) + 'Z'\n working_date = datetime.strftime(parser.parse(rec.get('working_date')), '%Y%m%d%H%M%S')\n tax_code = rec.get('tax_code')\n description = rec.get('product_name', '').replace('\"', '')\n # description = rec.get('tax_category', '').replace('\"', '')\n price_subtotal = rec.get('price_subtotal', 0)\n amount = int_to_str(price_subtotal)\n flag = '1' if tax_amount != 0 else '0'\n\n service_amount = int(rec.get('service_amount', 0))\n tax_for_service, tax_for_item = calculate_tax(price_subtotal, service_amount, tax_amount)\n\n row_data = KgApiTaxOnlineExport.format_row_and_write_to_file(\n row_no, working_date,\n transaction_id, receipt_no, amount, description, flag, tax_code,\n tax_amount=tax_for_item,\n worksheet=worksheet, txt_file=txt_file,\n show_tax_amount=show_tax_amount, format_output=format_output)\n\n result.append(row_data)\n\n if service_amount > 0:\n transaction_id = receipt_no + str(counter_item) + 'V'\n tax_code = 'ATV'\n description = 'Service Charge'\n amount = int_to_str(service_amount)\n flag = '1' if service_amount != 0 else '0'\n row_no += 1\n row_data = KgApiTaxOnlineExport.format_row_and_write_to_file(\n row_no, working_date,\n transaction_id, receipt_no, amount, description, flag, tax_code,\n tax_amount=tax_for_service,\n worksheet=worksheet, txt_file=txt_file,\n show_tax_amount=show_tax_amount, format_output=format_output)\n\n result.append(row_data)\n\n row_no += 1\n\n return result\n\n @staticmethod\n def format_row_and_write_to_file(\n row_no, working_date,\n transaction_id, receipt_no, amount, description, flag, tax_code, tax_amount,\n worksheet=None, txt_file=None, show_tax_amount=None, format_output=None\n ):\n if format_output != 'txt' or worksheet:\n # format not text --> expect dictionary, or output to excel\n row_data = KgApiTaxOnlineExport.jakarta_dict(\n show_tax_amount,\n transaction_id, receipt_no, tax_code, working_date, description, amount, flag,\n int_to_str(tax_amount))\n else:\n # if text file and not excel (worksheet None)\n row_data = KgApiTaxOnlineExport.jakarta_row(\n show_tax_amount,\n transaction_id, receipt_no, tax_code, working_date, description, amount, flag,\n int_to_str(tax_amount))\n if txt_file:\n txt_file.write(row_data + \"\\n\")\n elif worksheet:\n KgApiTaxOnlineExport.write_excel(row_no, row_data, worksheet)\n return row_data\n\n @staticmethod\n def jakarta_dict(show_tax_amount, transaction_id, receipt_no, tax_code, working_date, description,\n amount, flag, tax_amount):\n return {\n 'trx_id': transaction_id,\n 'receipt_no': receipt_no,\n 'tax_code': tax_code,\n 'trx_date2': working_date,\n 'description': description,\n 'amount': amount,\n 'flag': flag,\n 'tax_amount': tax_amount\n }\n\n @staticmethod\n def write_excel(row_no, row_data, worksheet):\n worksheet.write(row_no, 0, row_data.get('trx_id'))\n worksheet.write(row_no, 1, row_data.get('receipt_no'))\n worksheet.write(row_no, 2, row_data.get('tax_code'))\n worksheet.write(row_no, 3, row_data.get('trx_date2'))\n worksheet.write(row_no, 4, row_data.get('description'))\n worksheet.write(row_no, 5, row_data.get('amount'))\n worksheet.write(row_no, 6, row_data.get('flag'))\n worksheet.write(row_no, 7, row_data.get('tax_amount'))\n\n @staticmethod\n def jakarta_row(\n show_tax_amount, transaction_id, receipt_no, tax_code, working_date, description,\n amount, flag, tax_amount):\n\n row_data = '\"' + transaction_id + '\"|'\n row_data += '\"' + receipt_no + '\"|'\n row_data += '\"' + tax_code + '\"|'\n row_data += '\"' + working_date + '\"|'\n row_data += '\"' + description + '\"|'\n row_data += '\"' + amount + '\"|'\n row_data += '\"' + flag + '\"'\n if show_tax_amount:\n row_data += '\"' + tax_amount + '\"'\n return row_data\n\n @staticmethod\n def generate_for_bali(data, txt_file=None):\n prev_order_id = 0\n product_list = \"\"\n first_part = \"\"\n last_part = \"\"\n result = []\n price_subtotal = 0\n service_amount = 0\n tax_amount = 0\n price_subtotal_incl = 0\n for rec in data:\n if rec.get('order_id', 0) == prev_order_id or prev_order_id == 0:\n product_list = product_list + \"|\" if product_list else product_list\n product_list += \"{product_name}^{quantity}^{price}\".format(\n product_name=rec.get('product_name', '').replace('\"', '').replace(\"|\", \"\"),\n quantity=int_to_str(rec.get('qty', '')),\n price=int_to_str(rec.get('price_unit', ''))\n )\n else:\n # pergantian order id\n # write row data utk order id sebelumnya\n if tax_amount > 0:\n row_data = first_part + product_list + ';' + last_part\n result.append(row_data)\n # reset variables\n product_list = \"\"\n counter_item = 1\n prev_order_id = rec.get('order_id', 0)\n price_subtotal = 0\n service_amount = 0\n tax_amount = 0\n price_subtotal_incl = 0\n\n # prepare data utk order id yg baru\n ref = rec.get('pos_name', '').split('/')\n bill_no = ref[-1] # ambil nomor counter pos bill (element terakhir dari array ref)\n\n receipt_no = rec.get('config_name', '') + ' ' + bill_no\n # transaction_id = receipt_no + str(counter_item) + 'Z'\n tax_code = rec.get('tax_code')\n\n price_subtotal += round(rec.get('price_subtotal', ''), 0)\n service_amount += round(rec.get('service_amount', ''), 0)\n tax_amount += int(rec.get('tax_amount', '0'))\n price_subtotal_incl += round(rec.get('price_subtotal_incl', ''), 0)\n\n working_date = datetime.strftime(parser.parse(rec.get('working_date')), '%Y%m%d')\n date_order = parser.parse(rec.get('date_order'))\n # date_combine => date = working date, time = pos order time\n date_combine = parser.parse(\"{working_date} {time}\".format(\n working_date=working_date,\n time=datetime.strftime(date_order, '%H:%M:%S')))\n\n timezone_bali = 'Asia/Makassar'\n utc_timestamp = pytz.utc.localize(date_combine, is_dst=False)\n bali_datetime = utc_timestamp.astimezone(pytz.timezone(timezone_bali))\n\n first_part = datetime.strftime(bali_datetime, '%Y/%m/%d') + ';'\n first_part += receipt_no + ';'\n first_part += receipt_no + ';'\n first_part += tax_code + ';'\n first_part += datetime.strftime(bali_datetime, '%m/%d/%Y %H:%M') + ';'\n first_part += rec.get('config_name', '') + ';'\n first_part += int_to_str(price_subtotal) + ';' # subtotal without (before) tax/service\n first_part += '1;' if tax_amount else '0;'\n first_part += 'VER 2' + ';'\n first_part += int_to_str(price_subtotal) + ';'\n first_part += '0' + ';'\n first_part += int_to_str(service_amount) + ';'\n first_part += int_to_str(tax_amount) + ';'\n first_part += int_to_str(price_subtotal_incl) + ';'\n\n last_part = datetime.strftime(bali_datetime, '%d/%m/%Y %H:%M:%S') + ' +0800' + ';'\n last_part += ';'\n last_part += ';'\n\n # write row data utk order terakhir\n if tax_amount > 0:\n row_data = first_part + product_list + ';' + last_part\n result.append(row_data)\n if txt_file:\n txt_file.write(row_data + \"\\n\")\n\n return result\n\n @staticmethod\n def get_data(env, company_id, pos_date):\n query = \"\"\"\n select s.id as session_id, po.id as order_id, s.working_date, pol.id as line_id\n , c.name as config_name, po.name as pos_name, po.date_order\n , coalesce(tc.tax_code, 'ATM') as tax_code\n , coalesce(tc.tax_category, 'Makan/Minum') as tax_category\n , pt.name as product_name\n , pol.qty, pol.price_unit\n , pol.price_subtotal, pol.price_subtotal_incl\n , service_amount\n , tax_amount\n From\n pos_session s\n left join pos_config c on c.id = s.config_id\n left join pos_order po on po.session_id = s.id\n left join pos_order_line pol on pol.order_id = po.id\n left join product_product pp on pp.id = pol.product_id\n left join product_template pt on pt.id = pp.product_tmpl_id\n left join tax_category tc on tc.id = pt.tax_category_id\n where \n -- s.state = 'closed' and\n s.working_date = '{pos_date}' and \n po.company_id = {company_id} and\n po.state != 'draft' and po.state != 'cancel' \n and po.department_id is null and po.employee_id is null \n order by s.id, po.id, pol.id\n \"\"\".format(company_id=company_id, pos_date=pos_date)\n env.cr.execute(query)\n pos_trx_summary = env.cr.dictfetchall()\n return pos_trx_summary\n\n\ndef int_to_str(var):\n return str(var).replace(\".0\", \"\")\n\n\ndef calculate_tax(price_subtotal, service_amount, tax_amount):\n tax_for_service = 0\n if service_amount == 0:\n tax_for_item = tax_amount\n else:\n pct_tax = float(tax_amount) / float(price_subtotal + service_amount)\n tax_for_item = round(pct_tax * price_subtotal, 0)\n tax_for_service = tax_amount - tax_for_item\n return tax_for_service, tax_for_item\n\n","sub_path":"local/kg_pos/controllers/kg_tax_online_export.py","file_name":"kg_tax_online_export.py","file_ext":"py","file_size_in_byte":13889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"250529675","text":"\"\"\"dfz URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url,include\nfrom news import views\nimport xadmin\nxadmin.autodiscover()\n\nfrom xadmin.plugins import xversion\nxversion.register_models()\n\nurlpatterns = [\n url(r'^xadmin/', include(xadmin.site.urls)),\n url(r'^qiniu/',include('DjangoQiniu.urls')),\n url(r'^ueditor/',include('DjangoUeditor.urls')),\n url(r'^$',views.index),\n url(r'^search/$',views.search,name='search'),\n url(r'^news/',include('news.urls',namespace='news')),\n url(r'^course/',include('course.urls',namespace='course')),\n url(r'^payinfo/',include('payinfo.urls',namespace='payinfo')),\n url(r'account/',include('frontuser.urls',namespace='account'))\n]\n","sub_path":"dfz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"12194093","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Created on 2020-03-01 by Asmita Gautam\nAssignment 07: Graphical analysis with python\n\nGraphical analysis for earthquake data for last 30 days\nModified to add header and comments on 2020-03-08\n\"\"\"\n\n\"\"\"\nImporting the required module \n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport scipy.stats as stats\n\n#data = np.genfromtxt('all_month.csv')\n\"\"\"\n genfromtxt does not work as the data type (dtype) are different between the column\nbut data type within each column is same so the file can be read through pandas\n\"\"\"\n\n#Read the given data table\ndf = pd.read_table('all_month.csv', header=0, sep=',')\n\n# Plot histogram of the magnitude of earthquakes\nf1 = plt.figure()\nplt.hist(df['mag'].dropna(), bins=10, range=[0,10])\nplt.xlabel('Magnitude')\nplt.ylabel('Probability')\nf1.text(0.5, -0.05, \"Fig 1. Histogram of the magnitude of earthquakes\", fontsize=15, \n ha=\"center\", va=\"center\")\nplt.show()\n\n## Plot histogram by changing the bin width\nf1a =plt.figure()\nf1a, (ax1,ax2,ax3) = plt.subplots(3, sharex=True)\nax1.hist(df[\"mag\"], bins=5, range=[0,10])\nax2.hist(df[\"mag\"], bins=50, range=[0,10]) # plots to show difference of bins\nax3.hist(df[\"mag\"], bins=100, range=[0,10]) \nplt.xlabel('Magnitude')\nplt.ylabel('Probability')\nf1a.text(0.5, -0.05, \"Fig 1A. Histogram changing the bin width\", fontsize=15, \n ha=\"center\", va=\"center\")\nplt.show()\n\n#KDE\nf2=plt.figure()\nmag=df[\"mag\"].dropna()\nkde = stats.gaussian_kde(df[\"mag\"].dropna())\nspacing = np.linspace(0,10, num=500) #start,stop, and the number of points/samples between them\nkde.covariance_factor = lambda : .25 # bandwidth adjustment\nkde._compute_covariance()\nplt.plot(spacing,kde(spacing))\nplt.xlabel('Magnitude')\nplt.ylabel('Frequency')\nf2.text(0.5, -0.05, \"Fig 2. KDE plot of the earthquake\", fontsize=15, \n ha=\"center\", va=\"center\")\nplt.show()\n\n#latitude versus longitude for all earthquakes\nf3=plt.figure()\nplt.scatter(df['longitude'], \n y=df['latitude'], \n s=2, c='blue') #setting point size and color\nplt.ylabel('Latitude')\nplt.xlabel('Longitude')\nf3.text(0.5, -0.05, \"Fig 3. Earthquake distribution\", fontsize=15, \n ha=\"center\", va=\"center\")\nplt.show()\n\n# normalized cummulative distribution plot of earthquake depth \nf4=plt.figure()\nsort_depth = np.sort(df['depth'].dropna())\nprob = np.linspace(0,1,len(sort_depth))\nplt.plot(sort_depth, prob)\nplt.xlabel('Depth (km)')\nplt.ylabel('Probability')\nf4.text(0.5, -0.05, \"Fig 4. CDF of Earthquake depth\", fontsize=15, \n ha=\"center\", va=\"center\")\nplt.show()\n\n# Scatter plot of earthquake mag vs depth\nf5=plt.figure()\nplt.scatter(df['mag'],df['depth'],s=5)\nplt.xlabel('Magnitude')\nplt.ylabel('Depth (km)')\nf5.text(0.5, -0.05, \"Fig 5. Earthquake Magnitude vs Depth\", fontsize=15, \n ha=\"center\", va=\"center\")\nplt.show()\n\n# Quantile plot of earthquake mag \nf6=plt.figure()\nstats.probplot(df['mag'].dropna(), dist=\"norm\", plot=plt)\nplt.xlabel('Normal Quantiles')\nplt.ylabel('Data Quantiles')\nf6.text(0.5, -0.05, \"Fig 6. Q-Q Plot of Earthquake Magnitudes\", fontsize=15, \n ha=\"center\", va=\"center\")\nplt.show()","sub_path":"untitled6.py","file_name":"untitled6.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"419693478","text":"from model.group import Group\nfrom model.info_contact import Infos\nimport random\n\n\ndef test_add_contact_in_group(app, orm):\n if len(orm.get_group_list()) == 0:\n app.group.create(Group(name=\"test\"))\n groups = orm.get_group_list()\n group = random.choice(groups)\n index = 0\n for m in groups:\n if m.id == group.id:\n return index\n index = index+1\n if len(orm.get_contacts_not_in_group(group)) == 0:\n app.contact.create(Infos(firstname=\"Firstname\"))\n contacts = orm.get_contacts_not_in_group(group)\n contact = random.choice(contacts)\n old_contacts = orm.get_contacts_in_group(group)\n app.contact.add_contact_in_group(index+1, contact)\n new_contacts = orm.get_contacts_in_group(group)\n old_contacts.append(contact)\n assert sorted(old_contacts, key=Infos.id_or_max) == sorted(new_contacts, key=Infos.id_or_max)","sub_path":"home_works/test/test_add_contact_in_group.py","file_name":"test_add_contact_in_group.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"7256275","text":"#!/usr/bin/env python\n##\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom config import config\n\n__author__ = 'Kord Campbell'\n__website__ = 'http://www.tinyprobe.com'\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\nimport oauth_client as oauth2\n\n\n# Github OAuth Implementation\nclass GithubAuth(object):\n def __init__(self, github_server, github_redirect_uri, scope, github_client_id=config.get('github_client_id'),\n github_client_secret=config.get('github_client_secret')):\n # load github shizzle from config.py\n self.oauth_settings = {\n 'client_id': github_client_id,\n 'client_secret': github_client_secret,\n 'access_token_url': 'https://%s/login/oauth/access_token' % github_server,\n 'authorization_url': 'https://%s/login/oauth/authorize' % github_server,\n 'redirect_url': '%s' % github_redirect_uri,\n 'scope': '%s' % scope\n }\n\n # get our auth url and return to login handler\n def get_authorize_url(self):\n oauth_client = oauth2.Client(\n self.oauth_settings['client_id'],\n self.oauth_settings['client_secret'],\n self.oauth_settings['authorization_url']\n )\n\n authorization_url = oauth_client.authorization_url(\n redirect_uri=self.oauth_settings['redirect_url'],\n params={'scope': self.oauth_settings['scope']}\n )\n return authorization_url\n\n def get_access_token(self, code):\n oauth_client = oauth2.Client(\n self.oauth_settings['client_id'],\n self.oauth_settings['client_secret'],\n self.oauth_settings['access_token_url']\n )\n\n data = oauth_client.access_token(code, self.oauth_settings['redirect_url'])\n\n access_token = data.get('access_token')\n\n return access_token\n\n return authorization_url\n\n\nclass GithubRequest(object):\n def __init__(self, access_token, github_client_id=config.get('github_client_id'),\n github_client_secret=config.get('github_client_secret')):\n self.access_token = access_token\n self.oauth_settings = {\n 'client_id': github_client_id,\n 'client_secret': github_client_secret,\n 'access_token_url': 'https://%s/login/oauth/access_token' % access_token,\n }\n\n def get_user_info(self):\n return self.make_request('user')\n\n def fetch_user_repos(self):\n return self.make_request('user/repos', params={'per_page': 1000})\n\n def get_issues_list(self, repo_name, repo_own):\n return self.make_request('repos/' + repo_own + '/' + repo_name + '/issues', params={'per_page': 1000})\n\n def create_issue(self, owner, repo, issue):\n body = json.dumps(issue)\n return self.make_request('repos/' + owner + '/' + repo + '/issues', method='POST', body=body)\n\n def get_oauth(self):\n return oauth2.Client(\n self.oauth_settings['client_id'],\n self.oauth_settings['client_secret'],\n self.oauth_settings['access_token_url']\n )\n\n def make_request(self, endpoint, body=None, method='GET', params=None):\n oauth_client = self.get_oauth()\n (headers, body) = oauth_client.request(\n 'https://api.github.com/' + endpoint,\n access_token=self.access_token,\n token_param='access_token',\n method=method,\n body=body,\n params=params\n )\n return json.loads(body)\n","sub_path":"github/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"35822866","text":"from builtins import range\nfrom builtins import object\n#copyright @Fernando Benites\n\nfrom ..base import MLClassifierBase\n\nimport numpy.core.umath as umath\nimport scipy.sparse\nimport numpy\n\nclass Neuron(object):\n def __init__(self,startpoint,label):\n #vector must be in complement form\n self.vc = startpoint \n# ones = scipy.ones(startpoint.shape);\n# self.vc=numpy.concatenate((startpoint, ones - startpoint))\n self.label=label\n\nclass MLARAM(MLClassifierBase):\n \"\"\"Multi-label ARAM classifier. See http://dx.doi.org/10.1109/ICDMW.2015.14\n\n Parameters\n ----------\n\n vigilance : vigilance parameter for adaptiv resonance theory networks, controls how large a hyperbox can be, 1 it is small (no compression), 0 should assume all range. Normally set between 0.8 and 0.999, it is dataset dependent. It is responsible for the creation of the prototypes, therefore training of the network.\n threshold : controls how many prototypes participate by the prediction, can be changed at the testing phase.\n tneurons : if the network should inherited neurons (prototypes) from another network\n tdebug : set debug modus\n\n Whether the base classifier requires input as dense arrays, False by default\"\"\"\n BRIEFNAME = \"ML-ARAM\"\n\n def __init__(self, vigilance=0.9,threshold=0.02, tneurons=None):\n super(MLARAM, self).__init__()\n \n if tneurons!=None:\n self.neurons=tneurons\n else:\n self.neurons=[]\n self.labels=[]\n self.vigilance=vigilance\n self.threshold=threshold\n\t\n self.allneu=\"\"\n self.online=1\n self.alpha=0.0000000000001\n self.copyable_attrs += [\"neurons\", \"labels\", \"vigilance\",\"threshold\", \"allneu\", \"online\", \"alpha\"]\n \n def reset(self):\n self.labels=[]\n self.neurons=[]\n\n #@profile\n def fit(self,X,y):\n \n labdict = {}\n if len(X[0].shape)==1:\n ismatrix=0\n else:\n ismatrix=1\n xma=X.max()\n xmi=X.min()\n if xma<0 or xma>1 or xmi<0 or xmi>1:\n X=numpy.multiply(X-xmi,1/(xma-xmi))\n \n if len(self.neurons) == 0:\n ones = scipy.ones(X[0].shape)\n self.neurons.append(Neuron(numpy.concatenate((X[0], ones - X[0]), ismatrix),y[0]))\n startc = 1\n labdict[y[0].nonzero()[0].tostring()] = [0]\n else:\n startc = 0\n newlabel = 0\n ones = scipy.ones(X[0].shape)\n for i1,f1 in enumerate(X[startc: ], startc):\n found=0\n if scipy.sparse.issparse(f1):\n f1=f1.todense()\n fc = numpy.concatenate((f1, ones - f1), ismatrix)\n \n activationn = [0] * len(self.neurons)\n activationi = [0] * len(self.neurons)\n ytring=y[i1].nonzero()[0].tostring()\n if ytring in labdict:\n fcs = fc.sum()\n for i2 in labdict[ytring]:\n minnfs = umath.minimum(self.neurons[i2].vc, fc).sum()\n activationi[i2] =minnfs/fcs\n activationn[i2] =minnfs/self.neurons[i2].vc.sum()\n \n\n if numpy.max(activationn) == 0:\n newlabel += 1\n self.neurons.append(Neuron(fc,y[i1]))\n labdict.setdefault(ytring, []). append(len(self.neurons) - 1)\n \n\n continue\n inds = numpy.argsort(activationn)\n \n indc = numpy.where(numpy.array(activationi)[inds[::-1]]>self.vigilance)[0]\n if indc.shape[0] == 0: \n self.neurons.append(Neuron(fc,y[i1]))\n \n labdict.setdefault(ytring, []). append(len(self.neurons) - 1)\n continue\n \n\n winner =inds[::- 1][indc[0]]\n self.neurons[winner].vc= umath.minimum(self.neurons[winner].vc,fc)\n \n\n \n labadd = numpy.zeros(y[0].shape,dtype=y[0].dtype)\n labadd[y[i1].nonzero()] = 1\n self.neurons[winner].label += labadd\n \n \n \n #@profile\n def predict(self,X):\n result=[]\n ranks=self.predict_proba(X)\n for rank in ranks:\n sortedRankarg = numpy.argsort(-rank)\n diffs=-numpy.diff([rank[k] for k in sortedRankarg])\n \n indcutt=numpy.where(diffs==(diffs).max())[0]\n if len(indcutt.shape)==1:\n indcut=indcutt[0]+1\n else:\n indcut=indcutt[0,-1]+1\n label=numpy.zeros(rank.shape)\n\n label[sortedRankarg[0:indcut]]=1\n \n result.append(label)\n \n return numpy.array(numpy.matrix(result))\n\n #@profile\n def predict_proba(self,X):\n result = []\n if len(X) == 0: \n return\n if len(X[0].shape)==1:\n ismatrix=0\n else:\n ismatrix=1\n xma=X.max()\n xmi=X.min()\n if xma<0 or xma>1 or xmi<0 or xmi>1:\n X=numpy.multiply(X-xmi,1/(xma-xmi))\n ones = scipy.ones(X[0].shape);\n n1s = [0] * len(self.neurons)\n allranks = []\n neuronsactivated=[]\n\n allneu=numpy.vstack([n1.vc for n1 in self.neurons])\n allneusum=allneu.sum(1)+self.alpha\n\n\n for i1,f1 in enumerate(X):\n if scipy.sparse.issparse(f1):\n\n f1 = f1.todense()\n fc = numpy.concatenate((f1, ones - f1), ismatrix)\n activity=(umath.minimum(fc,allneu).sum(1)/allneusum).squeeze().tolist()\n if ismatrix==1:\n activity=activity[0]\n \n # be very fast\n sortedact=numpy.argsort(activity)[::-1]\n \n\n winner=sortedact[0]\n diff_act=activity[winner]-activity[sortedact[-1]]\n\n \n\n largest_activ = 1\n\n par_t=self.threshold\n for i in range(1, len(self.neurons)):\n activ_change = (activity[winner]-activity[sortedact[i]])/activity[winner];\n if activ_change >par_t*diff_act:\n break\n\n largest_activ += 1\n\n rbsum = sum([activity[k] for k in sortedact[0:largest_activ]])\n\n rank = activity[winner]*self.neurons[winner].label\n actives =[]\n activity_actives =[]\n actives.append(winner)\n activity_actives.append(activity[winner])\n for i in range(1,largest_activ):\n rank+=activity[sortedact[i]]*self.neurons[sortedact[i]].label\n actives.append(sortedact[i])\n activity_actives.append(activity[sortedact[i]])\n rank/= rbsum\n allranks.append(rank)\n \n return numpy.array(numpy.matrix(allranks))\n","sub_path":"skmultilearn/neurofuzzy/MLARAMfast.py","file_name":"MLARAMfast.py","file_ext":"py","file_size_in_byte":6845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"425500184","text":"\"\"\"\nThis module is used to initialize the model, and allocate fields and arrays to a 'ci_model' class.\n\"\"\"\nimport xarray as xr\nimport numpy as np\nimport copy\nimport pint\nfrom time import time\nfrom . import LES\nfrom . import AER\nfrom . import plotting\nfrom .run_model import run_model as Run\n\n\nclass ci_model():\n \"\"\"\n Cloud-ice nucleation 1D model class containing:\n 1. All initialization model parameters\n 2. LES output dataset used to initialize and inform the model (ci_model.les).\n 3. Model output output fields (ci_model.ds).\n \"\"\"\n def __init__(self, final_t=21600, delta_t=10, use_ABIFM=True, les_name=\"DHARMA\", t_averaged_les=True,\n custom_vert_grid=None, w_e_ent=1e-3, entrain_to_cth=True,\n implicit_ent=True, tau_mix=1800., heat_rate=None, tau_act=10., implicit_act=True,\n implicit_sublim=True, mixing_bounds=None, v_f_ice=0.3, in_cld_q_thresh=1e-6,\n nuc_RH_thresh=None, time_splitting=True, ent_then_act=True,\n prognostic_inp=True, prognostic_ice=False, dt_out=None, relative_sublim=True,\n aer_info=None, les_out_path=None, les_out_filename=None, les_bin_phys=True, t_harvest=10800,\n fields_to_retain=None, height_ind_2crop=\"ql_pbl\", cbh_det_method=\"ql_thresh\",\n input_conc_units=None, input_diam_units=None, input_heatrate_units=None,\n do_act=True, do_entrain=True, do_mix_aer=True, do_mix_ice=True, do_sedim=True,\n do_sublim=False, output_budgets=False, output_aer_decay=True, run_model=True):\n \"\"\"\n Model namelists and unit conversion coefficient required for the 1D model.\n The LES class includes methods to processes model output and prepare the out fields for the 1D model.\n This method also initializes the aerosol populations and runs the model.\n\n Parameters\n ----------\n final_t: float\n Total simulation time [s].\n delta_t: float\n time_step [s].\n use_ABIFM: bool\n True - use ABIFM, False - use singular.\n les_name: str\n Name of LES model to harvest data from.\n t_averaged_les: bool\n If True, use time-averaged LES profile of each variable to inform the 1D model.\n If False then the 1D model is informed by the LES output temporal evolution with extrapolation\n outside the LES output DataSet time range.\n Note: in the case of a single LES output time step requested ('t_harvest' is a scalar), this boolean\n has no effect.\n custom_vert_grid: list, np.ndarray, or None.\n custom vertical grid for the 1D model. If None, then using the processed (and cropped) LES output\n grid.\n w_e_ent: dict or float\n cloud-top entrainment rate [m/s].\n if a float then using its value throughout the simulation time.\n if a dict, must have the keys \"time\" [s] and \"value\". Each key contains a list or np.ndarray of\n length s (s > 1) determining time and entrainment rate time series.\n Time values are interpolated between the specified times, and the edge values are used for\n extrapolation.\n entrain_to_cth: bool or int\n If True, entrain to cloud top (mixing layer top) after calculating the corresponding delta.\n If False, entrain to the mixing layer base (surface layer in coupled cases).\n If int, then using this input as index such that 0 or -1 mean consistent entrainment to the surface\n layer or domain top, respectively.\n NOTE: the value of entrain_to_cth will be overwritten if provided as key in aer_info.\n implicit_ent: bool\n If True, using an implicit solver for entrainment. If False, using explicit solver.\n tau_mix: dict or float\n boundary-layer mixing time scale [s].\n if a float then using its value throughout the simulation time.\n if a dict, then treated as in the case of a dict for w_e_ent.\n heat_rate: xr DataArray, dict, or float\n heating rate over the domain added to the LES output sounding (negative values = cooling) [K s-1]\n if a float then using its value throughout the simulation time.\n if a dict, then treated as in the case of a dict for w_e_ent.\n if an xr DataArray, must contain the \"height\" [m] and \"time\" [s] coordinates. Values outside the\n coordinate range are extrapolated using the nearest edge values.\n tau_act: float, int, or None [--singular--]\n If float or int, then setting an activation time scale (10 s by default matching the CFDC).\n If None, then singular activation is instantaneous and depends on delta_t.\n Relevant for singular parameterizations.\n implicit_act: bool [--singular--]\n If True and tau_act is a scalar, using implicit solution to activation.\n implicit_sublim: bool\n If True, using implicit solution to sublimation (Ni reduction - relevant for relative_sublim == True).\n mixing_bounds: two-element tuple or list, or None\n Determining the mixing layer (especially relevant when using time-varying LES input).\n The first element provides a fixed lowest range of mixing (float), a time varying range (dict as\n in w_e_ent), or the method with which to determine mixing base (str). The second element is\n similar, but for the determination of the mixing layer top.\n If None, using the full domain.\n NOTE: currently, the only accepted pre-specified mixing determination method is \"ql_thresh\"\n (q_liq-based cloud base or top height detection method, allowing limit mixing to the cloud).\n v_f_ice: xr DataArray, dict, or float\n number-weighted ice crystal fall velocity [m/s].\n if a float then using its value throughout the simulation time.\n if a dict, then treated as in the case of a dict for w_e_ent.\n if an xr DataArray, must contain the \"height\" [m] and \"time\" [s] coordinates. Values outside the\n coordinate range are extrapolated using the nearest edge values.\n in_cld_q_thresh: float\n Mixing ratio threshold [kg/kg] for determination of in-cloud environment; also assigned to the\n 'q_liq_pbl_cut' attribute value.\n nuc_RH_thresh: float, str, list, or None [--ABIFM--]\n An RH threshold (fraction) for ABIFM (which can nucleate outside a cloud layer), such that a threshold\n of 1.00 means nucleation only within cloud layers.\n If str equals to \"use_ql\" then limiting nucleation to levels where ql > in_cld_q_thresh.\n If list and the first element equals to \"use_RH_and_ql\" then limiting nucleation to levels where\n ql > in_cld_q_thresh and/or RH >= RH threshold set in the second list element.\n Ignored if None.\n time_splitting: bool\n If True, running the model using time splitting (processes are calculated sequentially, each based on\n the state produced by the other).\n If False, using process splitting (process calculations are based on the same state and their\n tendencies are added to produce the updated state).\n ent_then_act: bool\n if True, entrain aerosol and then activate. If False, activate and then entrain (in either case,\n these two processes are followed by mixing).\n prognostic_inp: bool\n if True, using prognostic aerosol (default - essentially, the purpose of this model).\n if False, using diagnostic INP, i.e., total activated INP numbers are calcuated while considering\n tau_act (singular) or Jhet in current time step (ABIFM).\n prognostic_ice: bool\n If True, using prognostic ice, i.e., ice particles have INP memory, thereby enabling sublimation\n such that particle INPs are restored (requires setting prognostic_inp to True).\n If False, ice particles have no memory, and therefore, no sublimation, for example.\n Note that prognostic_ice requires more computation time. Memory is only allocated for ice snapshot\n as in INAS.\n Requires: prognostic_inp == True.\n dt_out: np.ndarray, float, int, or None\n array specifying times at which prognostic variables will be saved.\n Using a constant value if float or int\n Saving every time step if None\n Requires prognostic_ice == True.\n relative_sublim: bool\n If True, using the relative reduction of Ni with height (based on LES).\n If False, using abosulte reduction.\n Requires prognostic_ice == True.\n aer_info: list of dict\n Used to initialize the aerosol arrays. Each element of the list describes a single population\n type providing its composition, concentration, and PSD, e.g., can use a single log-normal population\n of Illite, or two Illite PSDs with different mode diameter and geometric SD combined with a Kaolinite\n population.\n Each dictionary (i.e., an 'aerosol_attrs' list element) must contain the keys:\n\n 1. n_init_max: [float] total concentration [m-3].\n\n 2. psd: [dict] choose a 'type' key between several options (parentheses denote required dict key\n names; units are SI by default; for concentration and/or diameter values, other units can be\n specified using 'input_conc_units' and/or 'input_conc_units' input parameters):\n - \"mono\": fixed-size population, i.e., a single particle diameter should be provided\n (diam [m]).\n - \"logn\": log--normal: provide geometric mean diameter (diam_mean [m]), geometric SD\n (geom_sd), number of PSD bins or an alternative diameter bin array (n_bins), minimum diameter\n (diam_min [m]; can be a 2-element tuple and then the 2nd is the maximum diameter cutoff),\n and bin-to-bin mass ratio (m_ratio). Note that the effective bin-to-bin diameter ratio\n equals m_ratio**(1/3).\n - \"multi_logn\": multi-modal log-normal: as in \"logn\" but diam_mean, geom_sd, and n_init_max\n need to be specified as lists or np.ndarrays with the same length (each characterizing\n a single mode (bin array is identical and represents the sum of modes).\n - \"custom\": custom size distribution with maunally specified bin values and PSD shape.\n Provide the PSD diameter array (diam) and the number concentration per bin\n (dn_dlogD). Optional input key includes normalization to n_init (norm_to_n_init_max)\n that normalizes dn_dlogD such that such sum(dn_dlogD) = n_init_max.\n - \"default\": (parameters not required) using a log-normal PSD with mean diameter\n of 1e-6 m, geometric SD of 2.5, 35 PSD bins with minimum diameter of 0.01e-6 m and mass\n ratio of 2, resulting in max diameter of ~26e-6 m.\n optional keys:\n 1. name: [str] population name (or tag). A default string using nucleus type is used if not\n provided.\n\n 2. nucleus_type: [str; --ABIFM--] name of substance (e.g., Al2O3) - to initialize Jhet (must be\n specified for ABIFM).\n\n 3. diam_cutoff: [float or tuple; --singular--] minimum particle diameter to consider.\n Using a value of 0.5e-6 as in D2010 if not specified. Use a 2-element tuple to specify a range of\n diameters to consider.\n\n 4. T_array: [list or np.ndarray; --singular--] discrete temperature array. If not specified, using\n temperatures between the smallest LES-informed temperature (or -40 C) and 0 with logarithmically-\n increasing delta_t.\n\n 5. singular_fun: [lambda func. or str; --singular--] INP parametrization (typically as a function\n of T).\n str: use \"D2010\" to use eq. 1 in DeMott et al., 2010, \"D2015\" to use eq. 2 in DeMott et al.,\n 2015, \"D2010fit\" to use the temperature dependence fit from fig. 2 in DeMott et al., 2010,\n \"ND2012\" for surface area temperature-based fit (eq. 5) in Niemand et al., JAS, 2012,\n \"SC2020\" for surface area temperature-based fit (eq. 5) in Schill et al., PNAS, 202,\n and \"AT2013\" for surface area temperature_based fit (eq.6) in Atkinson et al., NATURE, 2013.\n The D2015 has default values of the five coeff. from eq. 2 (cf - calibration correction factor,\n alpha, beta, gamma, delta); these might be coded as optional input for the AER class in\n the future.\n Note that \"D2010fit\" does not consider aerosol PSDs.\n Use \"D2010\" (default) if None.\n\n 6. singular_scale: [float] Scale factor for 'singular_fun' or Jhet (1 by default).\n\n 7. n_init_weight_prof: [dict] a dict with keys \"height\" and \"weight\". Each key contains\n a list or np.ndarray of length s (s > 1) determining PSD heights [m] and weighting profiles.\n Weights are applied on n_init such that n_init(z) = n_init_max * weighting_factor(z), i.e., a\n weighted_aer_prof filled with ones means that n_init(z) = n_init_max.\n if weights > 1 are specified, the profile is normalized to max value == 1. heights are interpolated\n between the specified heights, and the edge values are used for extrapolation (can be used to set\n different aerosol source layers at model initialization, and combined with turbulence weighting,\n allows the emulation of cloud-driven mixing.\n\n 8. entrain_psd: [dict] PSD for entrained aerosol - similar to the aer_info dict for specifying the\n PSD parameters of the entrained aerosol (can be surface aerosol fluxes if entrain_from_cth=0, for\n example). The 'type' key value must be the same as the aer_info dict.\n optional keys:\n 1. src_weight_time: [dict] a dict with keys \"time\" and \"weight\" for entrainment source.\n 9. entrain_to_cth: [bool or int] as in the 'entrain_to_cth' in the ci_model class attributes, the\n case of which will result in determining this attribute value only for this specific aerosol\n population.\n If not specified, using the default option, i.e., the initial PSD ('dn_dlogD) with a weight of 1.,\n which in likely most scenarios represent the free-tropospheric (or PBL top) as was the case until\n the Sep 6, 2020 commits.\n input_conc_units: str or None\n An str specifies the input aerosol concentration units that will be converted to SI in pre-processing.\n Relevant input parameters are: n_init_max and dn_dlogD (custom).\n input_diam_units: str or None\n An str specifies the input aerosol diameter units that will be converted to SI in pre-processing.\n Relevant input parameters are: diam (mono, custom) diam_mean (logn, multi_logn), diam_min\n (logn, multi_logn), and diam_cutoff.\n input_heatrate_units: str or None\n An str specifies the input heating rate units that will be converted to SI in pre-processing.\n The relevant input parameters is: heat_rate.\n do_act: bool\n determines whether aerosol (INP) activation will be performed.\n do_entrain: bool\n determines whether aerosols entrainment will be performed.\n do_mix_aer: bool\n determines whether mixing of aerosols will be performed.\n do_mix_ice: bool\n determines whether mixing of ice will be performed.\n do_sedim: bool\n determines whether ice sedimentation will be performed.\n do_sublim: bool\n determines whether ice sublimation will be performed (based on dNi/dz from LES).\n output_budgets: bool\n If True, then activation, entrainment, and mixing budgest are provided in the model output.\n output_aer_decay: bool\n If True, then generating an output field of the relative fraction of PBL aerosol relative to\n initial value, as well as the decay rate between consecutive time steps.\n run_model: bool\n True - run model once initialization is done.\n\n Other Parameters\n ----------------------\n les_out_path: str or None\n LES output path (can be relative to running directory). Use default if None.\n les_out_filename: str or None\n LES output filename. Use default file if None.\n les_bin_phys: bool\n IF True, using bin microphysics output namelist for harvesting LES data.\n If False, using bulk microphysics output namelist for harvesting LES data.\n t_harvest: scalar, 2- or 3-element tuple, list (or ndarray), or None\n If scalar then using the nearest time (assuming units of seconds) to initialize the model\n (single profile).\n If a tuple, cropping the range defined by the first two elements (increasing values) using a\n slice object. If len(t_harvest) == 3 then using the 3rd element as a time offset to subtract from\n the tiem array values.\n If a list, cropping the times specified in the list (can be used take LES output profiles every\n delta_t seconds.\n NOTE: default in the ci_model class (10800 s) is different than in the DHARMA init method (None).\n fields_to_retain: list or None\n Fieldnames to crop from the LES output (required to properly run the model).\n If None, then cropping the minimum number of required fields using DHARMA's namelist convention\n (Temperature [K], q_liq [kg/kg], RH [fraction], precipitation flux [mm/h], and ice number\n concentration [cm^-3]).\n height_ind_2crop: list, str, or None\n Indices of heights to crop from the model output (e.g., up to the PBL top).\n if str then different defitions for PBL:\n - if == \"ql_pbl\" then cropping all values within the PBL defined here based on the\n 'q_liq_pbl_cut' attribute. If more than a single time step exist in the dataset, then cropping\n the highest index corresponding to the cutoff.\n - OTHER OPTIONS TO BE ADDED.\n If None then not cropping.\n Method to determine cloud base with:\n - if == \"ql_thresh\" then cbh is determined by a q_liq threshold set with the 'q_liq_cbh' attribute.\n - OTHER OPTIONS TO BE ADDED.\n \"\"\"\n # count processing time\n Now = time()\n\n # Set some simulation attributes.\n self.vars_harvested_from_les = [\"RH\", \"ql\", \"T\", \"Ni\", \"prec\", \"rho\"] # processed vars used by the model\n self.final_t = final_t\n self.use_ABIFM = use_ABIFM\n self.in_cld_q_thresh = in_cld_q_thresh # kg/kg\n self.nuc_RH_thresh = nuc_RH_thresh # fraction value\n self.prognostic_inp = prognostic_inp\n if np.logical_and(not self.prognostic_inp, prognostic_ice):\n print(\"prognostic_inp is False while prognostic_ice, which requires True prognostic_inp, is False - \"\n \"setting prognostic_ice = False\")\n prognostic_ice = False\n self.prognostic_ice = prognostic_ice\n if isinstance(dt_out, (float, int)):\n print(f\"Setting output time increments to {dt_out} s\")\n dt_out = np.arange(0., self.final_t + 1e-10, dt_out)\n elif dt_out is None:\n print(f\"Setting output time increments to 1 time step of {delta_t} s (none were specified)\")\n dt_out = np.arange(0., self.final_t + 1e-10, delta_t) # By default output every time step\n self.dt_out = dt_out\n\n # assign a unit registry and define percent units.\n self.ureg = pint.UnitRegistry()\n self.ureg.define(pint.definitions.UnitDefinition('percent', 'pct', (),\n pint.converters.ScaleConverter(1 / 100.0)))\n\n # Load LES output\n if les_name == \"DHARMA\":\n les = LES.DHARMA(les_out_path=les_out_path, les_out_filename=les_out_filename, t_harvest=t_harvest,\n fields_to_retain=fields_to_retain, height_ind_2crop=height_ind_2crop,\n cbh_det_method=cbh_det_method, q_liq_pbl_cut=in_cld_q_thresh,\n les_bin_phys=les_bin_phys)\n les.ds[\"rho\"] = les.ds[\"rho\"].isel({\"time\": 0}) # density is constant with time (per Exner function)\n else:\n raise NameError(\"Can't process LES model output from '%s'\" % les_name)\n self.LES_attributes = {\"LES_name\": les_name,\n \"les_out_path\": les.les_out_path,\n \"les_out_filename\": les.les_out_filename,\n \"les_bin_phys\": les.les_bin_phys,\n \"t_averaged_les\": t_averaged_les,\n \"t_harvest\": t_harvest,\n \"fields_to_retain\": fields_to_retain,\n \"height_ind_2crop\": height_ind_2crop,\n \"cbh_det_method\": cbh_det_method}\n\n # time-averaged LES variable profile option\n if t_averaged_les:\n les_units = {}\n for key in self.vars_harvested_from_les:\n les_units.update({key: les.ds[key].attrs[\"units\"]})\n Mean_time = les.ds[\"time\"].mean()\n les.ds = les.ds.mean(dim=\"time\")\n les.ds = les.ds.assign_coords({\"time\": Mean_time})\n les.ds = les.ds.expand_dims(\"time\").transpose(*(\"height\", \"time\"))\n for key in self.vars_harvested_from_les: # restore attributes lost during averaging.\n les.ds[key].attrs[\"units\"] = les_units[key]\n\n # Redetermine cloud bounds with the time-averaged profile for model consistency (entrainment, etc.).\n tmp_ds = xr.Dataset() # first, use a temporary xr.Dataset to retain t-averaged precip rates.\n tmp_ds[\"P_Ni\"], tmp_ds[\"Pcb_per_Ni\"] = les.ds[\"P_Ni\"].copy(), les.ds[\"Pcb_per_Ni\"].copy()\n les._find_and_calc_cb_precip(self.LES_attributes[\"cbh_det_method\"])\n tmp_fields = [x for x in les.ds.keys()]\n les.ds[\"P_Ni\"].values, les.ds[\"Pcb_per_Ni\"].values = tmp_ds[\"P_Ni\"].values, tmp_ds[\"Pcb_per_Ni\"].values\n\n # crop updated dataset (temporarily change les object attributes to invoke internal method)\n tmp_attrs = {\"ql\": les.q_liq_field, \"height_dim\": les.height_dim}\n les.q_liq_field[\"name\"], les.q_liq_field[\"scaling\"], les.height_dim = \"ql\", 1, \"height\"\n les._crop_fields(tmp_fields, height_ind_2crop)\n les.q_liq_field[\"name\"], les.q_liq_field[\"scaling\"], les.height_dim = \\\n tmp_attrs[\"ql\"][\"name\"], tmp_attrs[\"ql\"][\"scaling\"], tmp_attrs[\"height_dim\"]\n\n # Make self.les point at the LES object's xr.Dataset for accessibility\n self.LES_obj = les\n self.les = self.LES_obj.ds\n\n # Make sure ice does not sediment more than 1 vertical cell per time step. In that case change delta_t\n if isinstance(v_f_ice, dict):\n max_sediment_vel = np.max(v_f_ice[\"value\"])\n else:\n max_sediment_vel = np.max(v_f_ice)\n max_sediment_dist = max_sediment_vel * delta_t # maximum ice sedimentation distance per time step\n if custom_vert_grid is not None:\n height = custom_vert_grid.astype(np.float32)\n height = height[np.logical_and(height <= self.les[\"height\"].max().values,\n height >= self.les[\"height\"].min().values)]\n if len(height) < len(custom_vert_grid):\n print(\"Some heights were omitted because they are outside the processed LES dataset grid\")\n else:\n height = self.les[\"height\"].values\n if max_sediment_dist > np.min(np.diff(height)):\n delta_t = np.floor(np.min(np.diff(height)) / max_sediment_vel)\n print(\"∆t was modified to the largest integer preventing ice sedimentation of more than 1 \" +\n \"grid cell (%d s)\" % delta_t)\n self.delta_t = delta_t\n self.mod_nt = int(final_t / delta_t) + 1 # number of time steps\n self.mod_nt_out = len(dt_out) # number of output time steps\n self.mod_nz = len(height) # number of vertical layers\n\n # allocate xarray DataSet for model atmospheric state and prognosed variable fields\n self.ds = xr.Dataset()\n self.ds = self.ds.assign_coords({\"height\": height})\n self.ds = self.ds.assign_coords({\"time\": np.arange(self.mod_nt) * self.delta_t})\n self.ds = self.ds.assign_coords({\"t_out\": dt_out})\n delta_z = np.diff(self.ds[\"height\"])\n self.ds[\"delta_z\"] = xr.DataArray(np.concatenate((delta_z, np.array([delta_z[-1]]))),\n dims=(\"height\"), attrs={\"units\": \"$m$\"})\n extrap_locs_tail = self.ds[\"time\"] >= self.les[\"time\"].max()\n extrap_locs_head = self.ds[\"time\"] <= self.les[\"time\"].min()\n x, y = np.meshgrid(self.les[\"height\"], self.les[\"time\"])\n for key in self.vars_harvested_from_les:\n\n # Linear interp (two 1D interpolations - fastest) if LES temporal evolution is to be considered.\n if self.les[\"time\"].size > 1:\n self._set_1D_or_2D_var_from_AERut(self.les[key], key)\n else:\n # Use LES bounds (min & max) outside the available range (redundant step - could be useful later).\n key_array_tmp = np.zeros((self.mod_nz, self.mod_nt))\n if extrap_locs_head.sum() > 0:\n key_array_tmp[:, extrap_locs_head.values] = np.tile(np.expand_dims(\n np.interp(self.ds[\"height\"], self.les[\"height\"],\n self.les[key].sel({\"time\": self.les[\"time\"].min()})),\n axis=1), (1, np.sum(extrap_locs_head.values)))\n if extrap_locs_tail.sum() > 0:\n key_array_tmp[:, extrap_locs_tail.values] = np.tile(np.expand_dims(\n np.interp(self.ds[\"height\"], self.les[\"height\"],\n self.les[key].sel({\"time\": self.les[\"time\"].max()})),\n axis=1), (1, np.sum(extrap_locs_tail.values)))\n self.ds[key] = xr.DataArray(key_array_tmp, dims=(\"height\", \"time\"))\n self.ds[key].attrs = self.les[key].attrs\n\n # init entrainment\n self.w_e_ent = w_e_ent\n self.entrain_to_cth = entrain_to_cth\n self.implicit_ent = implicit_ent\n self._set_1D_or_2D_var_from_AERut(w_e_ent, \"w_e_ent\", \"$m/s$\", \"Cloud-top entrainment rate\")\n if self.les[\"time\"].size > 1:\n self._set_1D_or_2D_var_from_AERut({\"time\": self.les[\"time\"].values,\n \"value\": self.les[\"lowest_cbh\"].values},\n \"lowest_cbh\", \"$m$\", \"Lowest cloud base height\")\n self._set_1D_or_2D_var_from_AERut({\"time\": self.les[\"time\"].values,\n \"value\": self.les[\"lowest_cth\"].values},\n \"lowest_cth\", \"$m$\", \"Lowest cloud top height\")\n else:\n self._set_1D_or_2D_var_from_AERut(self.les[\"lowest_cbh\"].item(),\n \"lowest_cbh\", \"$m$\", \"Lowest cloud base height\")\n self._set_1D_or_2D_var_from_AERut(self.les[\"lowest_cth\"].item(),\n \"lowest_cth\", \"$m$\", \"Lowest cloud top height\")\n\n # init vertical mixing and generate a mixing layer mask for the model\n self.tau_mix = tau_mix\n self.mixing_bounds = mixing_bounds\n self._set_1D_or_2D_var_from_AERut(tau_mix, \"tau_mix\", \"$s$\", \"Boundary-layer mixing time scale\")\n if mixing_bounds is None:\n self.ds[\"mixing_mask\"] = xr.DataArray(np.full((self.mod_nz, self.mod_nt),\n True, dtype=bool), dims=(\"height\", \"time\"))\n else:\n if isinstance(mixing_bounds[0], str):\n if mixing_bounds[0] == \"ql_thresh\":\n self.ds[\"mixing_base\"] = xr.DataArray(np.interp(\n self.ds[\"time\"], self.les[\"time\"], self.les[\"lowest_cbh\"]), dims=(\"time\"))\n self.ds[\"mixing_base\"].attrs[\"units\"] = \"$m$\"\n else:\n self._set_1D_or_2D_var_from_AERut(mixing_bounds[0], \"mixing_base\", \"$m$\", \"Mixing layer base\")\n if isinstance(mixing_bounds[1], str):\n if mixing_bounds[1] == \"ql_thresh\":\n self.ds[\"mixing_top\"] = xr.DataArray(np.interp(\n self.ds[\"time\"], self.les[\"time\"], self.les[\"lowest_cth\"]), dims=(\"time\"))\n self.ds[\"mixing_top\"].attrs[\"units\"] = \"$m$\"\n else:\n self._set_1D_or_2D_var_from_AERut(mixing_bounds[1], \"mixing_top\", \"$m$\", \"Mixing layer top\")\n mixing_mask = np.full((self.mod_nz, self.mod_nt), False, dtype=bool)\n for t in range(self.mod_nt):\n rel_ind = np.arange(\n np.argmin(np.abs(self.ds[\"height\"].values - self.ds[\"mixing_base\"].values[t])),\n np.argmin(np.abs(self.ds[\"height\"].values - self.ds[\"mixing_top\"].values[t])) + 1) # inc. top\n mixing_mask[rel_ind, t] = True\n self.ds[\"mixing_mask\"] = xr.DataArray(mixing_mask, dims=(\"height\", \"time\"))\n self.ds[\"mixing_mask\"].attrs[\"long_name\"] = \"Mixing-layer mask (True --> mixed)\"\n\n # init number weighted ice fall velocity\n self.v_f_ice = v_f_ice\n self._set_1D_or_2D_var_from_AERut(v_f_ice, \"v_f_ice\", \"$m/s$\", \"Number-weighted ice crystal fall velocity\")\n\n # init and apply heating rates (prior to calculating delta_aw and/or other activation-related variables)\n self.heat_rate, self.input_heatrate_units = heat_rate, input_heatrate_units\n if self.heat_rate is not None:\n self._set_1D_or_2D_var_from_AERut(heat_rate, \"heat_rate\", r\"$K\\ s^{-1}$\", \"Atmospheric heating rate\")\n if self.input_heatrate_units is not None:\n self.ds[\"heat_rate\"].values = \\\n (self.ds[\"heat_rate\"].values * self.ureg(self.input_heatrate_units)).to(\"K * s^{-1}\").magnitude\n for t in range(1, self.mod_nt):\n self.ds[\"T\"].values[:, t:] += self.ds[\"heat_rate\"].isel({\"time\": [t]}).values * delta_t\n\n # set singular activation parameters.\n if isinstance(tau_act, (float, int)):\n self.use_tau_act = True\n self.tau_act = tau_act\n else:\n self.use_tau_act = False\n self.tau_act = None\n self.implicit_act = implicit_act\n\n # init sublimation\n self.relative_sublim = relative_sublim\n self.implicit_sublim = implicit_sublim\n\n # calculate delta_aw\n self._calc_delta_aw()\n\n # allocate aerosol population Datasets\n self.aer = {}\n self.aer_info = copy.deepcopy(aer_info) # save the aerosol info dict for reference in a deep copy.\n self.input_conc_units, self.input_diam_units = input_conc_units, input_diam_units\n self._convert_input_to_SI() # Convert input concentration and/or diameter parameters to SI (if requested).\n optional_keys = [\"name\", \"nucleus_type\", \"diam_cutoff\", \"T_array\", # optional aerosol class input params.\n \"n_init_weight_prof\", \"singular_fun\", \"singular_scale\",\n \"entrain_psd\", \"entrain_to_cth\"]\n for ii in range(len(self.aer_info)):\n param_dict = {\"use_ABIFM\": use_ABIFM} # tmp dict for aerosol attributes to send to class call.\n if np.all([x in self.aer_info[ii].keys() for x in [\"n_init_max\", \"psd\"]]):\n param_dict[\"n_init_max\"] = self.aer_info[ii][\"n_init_max\"]\n param_dict[\"psd\"] = self.aer_info[ii][\"psd\"]\n else:\n raise KeyError('aerosol information requires the keys \"n_init_max\", \"psd\"')\n if not self.aer_info[ii][\"psd\"][\"type\"] in [\"mono\", \"logn\", \"multi_logn\", \"custom\", \"default\"]:\n raise ValueError('PSD type must be one of: \"mono\", \"logn\", \"multi_logn\", \"custom\", \"default\"')\n for key in optional_keys:\n param_dict[key] = self.aer_info[ii][key] if key in self.aer_info[ii].keys() else None\n\n # set aerosol population arrays\n tmp_aer_pop = self._set_aer_obj(param_dict)\n self.aer[tmp_aer_pop.name] = tmp_aer_pop\n\n # allocate nucleated ice DataArrays\n if not self.prognostic_ice:\n self.ds[\"ice_snap\"] = xr.DataArray(np.zeros(self.ds[\"height\"].size), dims=(\"height\"))\n self.ds[\"ice_snap\"].attrs[\"units\"] = \"$m^{-3}$\"\n self.ds[\"ice_snap\"].attrs[\"long_name\"] = \"Diagnostic ice number concentration (snapshot)\"\n self.ds[\"Ni_nuc\"] = xr.DataArray(np.zeros((self.mod_nz,\n self.mod_nt_out)), dims=(\"height\", \"t_out\"))\n self.ds[\"Ni_nuc\"].attrs[\"units\"] = \"$m^{-3}$\"\n self.ds[\"Ni_nuc\"].attrs[\"long_name\"] = \"Nucleated ice\"\n self.ds[\"nuc_rate\"] = xr.DataArray(np.zeros((self.mod_nz,\n self.mod_nt_out)), dims=(\"height\", \"t_out\"))\n self.ds[\"nuc_rate\"].attrs[\"units\"] = r\"$m^{-3}\\:s^{-1}$\"\n self.ds[\"nuc_rate\"].attrs[\"long_name\"] = \"Ice nucleation rate\"\n\n print(\"Model initalization done! Total processing time = %f s\" % (time() - Now))\n\n # Set additional coordinates and attributes\n self.ds[\"height\"].attrs[\"units\"] = \"$m$\"\n self.ds[\"time\"].attrs[\"units\"] = \"$s$\"\n self.ds[\"height_km\"] = self.ds[\"height\"].copy() / 1e3 # add coordinates for height in km.\n self.ds = self.ds.assign_coords(height_km=(\"height\", self.ds[\"height_km\"].values))\n self.ds[\"height_km\"].attrs[\"units\"] = \"$km$\"\n self.ds[\"time_h\"] = self.ds[\"time\"].copy() / 3600 # add coordinates for time in h.\n self.ds = self.ds.assign_coords(time_h=(\"time\", self.ds[\"time_h\"].values))\n self.ds[\"time_h\"].attrs[\"units\"] = \"$h$\"\n self.ds = self.ds.assign_coords({\"t_out\": self.dt_out})\n self.ds[\"t_out\"].attrs[\"units\"] = \"$s$\"\n self.ds[\"t_out_h\"] = self.ds[\"t_out\"].copy() / 3600 # add coordinates for time in h.\n self.ds = self.ds.assign_coords(t_out_h=(\"t_out\", self.ds[\"t_out_h\"].values))\n self.ds[\"t_out_h\"].attrs[\"units\"] = \"$h$\"\n self.time_dim = \"time\"\n self.height_dim = \"height\"\n self.t_out_dim = \"t_out\"\n self.T_dim = \"T\" # setting the T dim even though it is only set when allocating an AER object.\n self.diam_dim = \"diam\" # setting the diam dim even though it is only set when allocating an AER object.\n\n # Run the model and reassign coordinate unit attributes (typically lost in xr.DataArray manipulations)\n if np.logical_and(not self.prognostic_ice, do_sublim):\n print(\"prognostic_ice is False while do_sublim is True, but do_sublim requires prognostic ice - \"\n \"setting do_sublim = False\")\n do_sublim = False\n self.do_act = do_act\n self.do_entrain = do_entrain\n self.do_mix_aer = do_mix_aer\n self.do_mix_ice = do_mix_ice\n self.do_sedim = do_sedim\n self.do_sublim = do_sublim\n self.time_splitting = time_splitting\n self.ent_then_act = ent_then_act\n self.output_budgets = output_budgets\n self.output_aer_decay = output_aer_decay\n if run_model:\n Run(self)\n self.ds[\"time_h\"].attrs[\"units\"] = \"$h$\"\n self.ds[\"time\"].attrs[\"units\"] = \"$s$\"\n self.ds[\"t_out\"].attrs[\"units\"] = \"$s$\"\n self.ds[\"t_out_h\"].attrs[\"units\"] = \"$h$\"\n self.ds[\"height_km\"].attrs[\"units\"] = \"$km$\"\n self.ds[\"height\"].attrs[\"units\"] = \"$m$\"\n for key in self.aer.keys():\n self.aer[key].ds[\"time_h\"].attrs[\"units\"] = \"$h$\"\n self.aer[key].ds[\"time\"].attrs[\"units\"] = \"$s$\"\n self.aer[key].ds[\"t_out_h\"].attrs[\"units\"] = \"$h$\"\n self.aer[key].ds[\"t_out\"].attrs[\"units\"] = \"$s$\"\n self.aer[key].ds[\"height_km\"].attrs[\"units\"] = \"$km$\"\n self.aer[key].ds[\"height\"].attrs[\"units\"] = \"$m$\"\n\n @staticmethod\n def calc_a_ice_w(T):\n \"\"\"\n calculate a_w(ice) using eq. 7 in Koop and Zobrist (2009, https://doi.org/10.1039/B914289D.\n\n Parameters\n ----------\n T: np.ndarray or xr.DataArray\n Temperature\n\n Returns\n -------\n a_ice_w: np.ndarray or xr.DataArray\n water activity for ice nucleation\n \"\"\"\n a_ice_w = \\\n (np.exp(9.550426 - 5723.265 / T + 3.53068 * np.log(T) -\n 0.00728332 * T) /\n (np.exp(54.842763 - 6763.22 / T -\n 4.210 * np.log(T) + 0.000367 * T +\n np.tanh(0.0415 * (T - 218.8)) * (53.878 - 1331.22 / T - 9.44523 * np.log(T) + 0.014025 * T)))\n )\n return a_ice_w\n\n def _calc_delta_aw(self):\n \"\"\"\n calculate the ∆aw field and S_ice for ABIFM using:\n 1. eq. 1 in Knopf and Alpert (2013, https://doi.org/10.1039/C3FD00035D) combined with:\n 2. eq. 7 in Koop and Zobrist (2009, https://doi.org/10.1039/B914289D) for a_w(ice)\n Here we assume that our droplets are in equilibrium with the environment at its given RH, hence, RH = a_w.\n \"\"\"\n a_ice_w = self.calc_a_ice_w(self.ds['T'])\n self.ds[\"delta_aw\"] = self.ds['RH'] - a_ice_w\n self.ds[\"S_ice\"] = self.ds['RH'] / a_ice_w\n self.ds['delta_aw'].attrs['units'] = \"\"\n self.ds[\"S_ice\"].attrs['units'] = \"\"\n\n def _set_1D_or_2D_var_from_AERut(self, var_in, var_name, units_str=None, long_name_str=None):\n \"\"\"\n set a 1D xr.DataArray from a scalar or a dictionary containing \"time\" and \"value\" keys.\n If 'var_in' is a scalar then generating a uniform time series.\n Values are linearly interpolated onto the model temporal grid (values outside the provided\n range are extrapolated.\n The method can also operate on an xr.DataArray. In that case it interpolates the input\n variable (containing \"time\" and \"height\" coordinates) onto the ci_model object's grid\n and also extrapolates using edge values (two-1D linear interpolations are performed).\n\n Parameters\n ---------\n var_in: xr.DataArray, dict, or scalar.\n if xr.DataArray, must have \"time\" and \"height\" coordinates and dims.\n if dict then using the \"time\" and \"value\" keys of the variable.\n var_name: str\n Name of DataArray variable.\n units_str: str\n string for the units attribute.\n long_name_str: str\n string for the long_name attribute.\n \"\"\"\n if isinstance(var_in, (float, int)):\n self.ds[var_name] = xr.DataArray(np.ones(self.mod_nt) * var_in, dims=(\"time\"))\n elif isinstance(var_in, dict): # 1D linear interpolation\n if not np.all([x in var_in.keys() for x in [\"time\", \"value\"]]):\n raise KeyError('variable time series requires the keys \"time\" and \"value\"')\n if not np.logical_and(len(var_in[\"time\"]) > 1,\n len(var_in[\"time\"]) == len(var_in[\"value\"])):\n raise ValueError(\"times and values must have the same length > 1\")\n self.ds[var_name] = xr.DataArray(np.interp(self.ds[\"time\"],\n var_in[\"time\"], var_in[\"value\"]), dims=(\"time\"))\n elif isinstance(var_in, xr.DataArray): # 2D linear interpolation\n if not np.all([x in var_in.coords for x in [\"time\", \"height\"]]):\n raise KeyError('2D variable processing requires the \"time\" and \"height\" coordinates!')\n if not np.logical_and(len(var_in[\"time\"]) > 1, len(var_in[\"height\"]) > 1):\n raise ValueError(\"times and height coordinates must be longer than 1 for interpolation!\")\n key_array_tmp = np.zeros((self.mod_nz, self.mod_nt))\n key_1st_interp = np.zeros((var_in[\"height\"].size, self.mod_nt))\n for hh in range(var_in[\"height\"].size):\n key_1st_interp[hh, :] = np.interp(self.ds[\"time\"].values, var_in[\"time\"].values,\n var_in.isel({\"height\": hh}))\n for tt in range(self.mod_nt):\n key_array_tmp[:, tt] = np.interp(self.ds[\"height\"].values, var_in[\"height\"].values,\n key_1st_interp[:, tt])\n self.ds[var_name] = xr.DataArray(key_array_tmp, dims=(\"height\", \"time\"))\n else:\n raise TypeError(\"Input variable must be of type float, int, dict, or xr.DataArray!\")\n if units_str is not None:\n self.ds[var_name].attrs[\"units\"] = units_str\n if long_name_str is not None:\n self.ds[var_name].attrs[\"long_name\"] = long_name_str\n\n def _set_aer_obj(self, param_dict):\n \"\"\"\n Invoke an AER class call and use the input parameters provided. Using a full dictionary key call to\n maintain consistency even if some AER class input variable order will be changed in future updates.\n\n Parameters\n ----------\n param_dict: dict\n Keys include all possible input parameters for the AER sub-classes.\n\n Returns\n -------\n tmp_aer_pop: AER class object\n AER class object that includes the AER array with dims height x time x diameter (ABIFM) or\n height x time x temperature (singular).\n \"\"\"\n if param_dict[\"psd\"][\"type\"] == \"mono\":\n tmp_aer_pop = AER.mono_AER(use_ABIFM=param_dict[\"use_ABIFM\"], n_init_max=param_dict[\"n_init_max\"],\n psd=param_dict[\"psd\"], nucleus_type=param_dict[\"nucleus_type\"],\n name=param_dict[\"name\"], diam_cutoff=param_dict[\"diam_cutoff\"],\n T_array=param_dict[\"T_array\"], singular_fun=param_dict[\"singular_fun\"],\n entrain_psd=param_dict[\"entrain_psd\"],\n entrain_to_cth=param_dict[\"entrain_to_cth\"],\n singular_scale=param_dict[\"singular_scale\"],\n n_init_weight_prof=param_dict[\"n_init_weight_prof\"], ci_model=self)\n elif param_dict[\"psd\"][\"type\"] == \"logn\":\n tmp_aer_pop = AER.logn_AER(use_ABIFM=param_dict[\"use_ABIFM\"], n_init_max=param_dict[\"n_init_max\"],\n psd=param_dict[\"psd\"], nucleus_type=param_dict[\"nucleus_type\"],\n name=param_dict[\"name\"], diam_cutoff=param_dict[\"diam_cutoff\"],\n T_array=param_dict[\"T_array\"], singular_fun=param_dict[\"singular_fun\"],\n entrain_psd=param_dict[\"entrain_psd\"],\n entrain_to_cth=param_dict[\"entrain_to_cth\"],\n singular_scale=param_dict[\"singular_scale\"],\n n_init_weight_prof=param_dict[\"n_init_weight_prof\"], ci_model=self)\n elif param_dict[\"psd\"][\"type\"] == \"multi_logn\":\n tmp_aer_pop = AER.multi_logn_AER(use_ABIFM=param_dict[\"use_ABIFM\"],\n n_init_max=param_dict[\"n_init_max\"],\n psd=param_dict[\"psd\"], nucleus_type=param_dict[\"nucleus_type\"],\n name=param_dict[\"name\"], diam_cutoff=param_dict[\"diam_cutoff\"],\n T_array=param_dict[\"T_array\"],\n singular_fun=param_dict[\"singular_fun\"],\n entrain_psd=param_dict[\"entrain_psd\"],\n entrain_to_cth=param_dict[\"entrain_to_cth\"],\n singular_scale=param_dict[\"singular_scale\"],\n n_init_weight_prof=param_dict[\"n_init_weight_prof\"], ci_model=self)\n elif param_dict[\"psd\"][\"type\"] == \"custom\":\n tmp_aer_pop = AER.custom_AER(use_ABIFM=param_dict[\"use_ABIFM\"], n_init_max=param_dict[\"n_init_max\"],\n psd=param_dict[\"psd\"], nucleus_type=param_dict[\"nucleus_type\"],\n name=param_dict[\"name\"], diam_cutoff=param_dict[\"diam_cutoff\"],\n T_array=param_dict[\"T_array\"], singular_fun=param_dict[\"singular_fun\"],\n entrain_psd=param_dict[\"entrain_psd\"],\n entrain_to_cth=param_dict[\"entrain_to_cth\"],\n singular_scale=param_dict[\"singular_scale\"],\n n_init_weight_prof=param_dict[\"n_init_weight_prof\"], ci_model=self)\n elif param_dict[\"psd\"][\"type\"] == \"default\":\n param_dict[\"psd\"].update({\"diam_mean\": 1e-6, \"geom_sd\": 2.5, \"n_bins\": 35, \"diam_min\": 0.01e-6,\n \"m_ratio\": 2.}) # default parameters.\n tmp_aer_pop = AER.logn_AER(use_ABIFM=param_dict[\"use_ABIFM\"], n_init_max=param_dict[\"n_init_max\"],\n psd=param_dict[\"psd\"], nucleus_type=param_dict[\"nucleus_type\"],\n name=param_dict[\"name\"], diam_cutoff=param_dict[\"diam_cutoff\"],\n T_array=param_dict[\"T_array\"], singular_fun=param_dict[\"singular_fun\"],\n entrain_psd=param_dict[\"entrain_psd\"],\n entrain_to_cth=param_dict[\"entrain_to_cth\"],\n singular_scale=param_dict[\"singular_scale\"],\n n_init_weight_prof=param_dict[\"n_init_weight_prof\"], ci_model=self)\n\n return tmp_aer_pop\n\n def _convert_input_to_SI(self):\n \"\"\"\n Convert one or more input parameters to SI if other units were specified.\n \"\"\"\n if self.input_conc_units is not None: # assuming input_conc_units is an str with valid conc. units\n self._do_input_conversion([\"n_init_max\", \"dn_dlogD\"], self.input_conc_units, \"m^{-3}\")\n if self.input_diam_units is not None: # assuming input_diam_units is an str with valid length units\n self._do_input_conversion([\"diam\", \"diam_mean\", \"diam_min\", \"diam_cutoff\"], self.input_diam_units, \"m\")\n\n def _do_input_conversion(self, param_list, from_units, to_units):\n \"\"\"\n Search for input parameters in the aer_info input list of dicts and convert units to SI.\n Quantity type is parsed by pint (for all valid unit strings see:\n https://github.com/hgrecco/pint/blob/master/pint/default_en.txt).\n\n Parameters\n ----------\n param_list: list\n Elements include all possible (and relevant) input parameters for conversion, so define wisely.\n from_units: str\n Units to convert from (input units).\n to_units: str\n Units to convert to.\n \"\"\"\n for ii in range(len(self.aer_info)):\n for param in param_list:\n if param in self.aer_info[ii][\"psd\"].keys():\n param_val = (self.aer_info[ii][\"psd\"][param] * self.ureg(from_units)).to(to_units).magnitude\n if type(self.aer_info[ii][\"psd\"][param]) == tuple:\n self.aer_info[ii][\"psd\"][param] = tuple(param_val)\n elif type(self.aer_info[ii][\"psd\"][param]) == list:\n self.aer_info[ii][\"psd\"][param] = list(param_val)\n else: # scalar or np.ndarray\n self.aer_info[ii][\"psd\"][param] = param_val\n print(\"'%s' (in aer_info's 'psd' keys) was input in %s units; now converted to %s (SI)\" %\n (param, from_units, to_units))\n if param in self.aer_info[ii].keys():\n param_val = (self.aer_info[ii][param] * self.ureg(from_units)).to(to_units).magnitude\n if type(self.aer_info[ii][param]) == tuple:\n self.aer_info[ii][param] = list(param_val)\n elif type(self.aer_info[ii][param]) == list:\n self.aer_info[ii][param] = list(param_val)\n else: # scalar or np.ndarray\n self.aer_info[ii][param] = param_val\n print(\"'%s' (in aer_info) was input in %s units; now converted to %s (SI)\" %\n (param, from_units, to_units))\n\n def _convert_quantity_units(self, to_units):\n \"\"\"\n Convert a quantity units (e.g., volume, concentration) in all relevant arrays (e.g., from 1/m^3 to L-1).\n\n Parameters\n ---------\n to_units: str\n Units to convert to. Quantity type is parsed by pint (for all valid unit strings see:\n https://github.com/hgrecco/pint/blob/master/pint/default_en.txt).\n \"\"\"\n Converted = [] # converted fields\n for DA in self.ds.keys():\n if isinstance(self.ds[DA].data, pint.Quantity):\n if self.ds[DA].data.check(to_units):\n Converted.append(\"The units of '%s' converted from %s to %s\" %\n (DA, self.ds[DA].attrs[\"units\"], to_units))\n self.ds[DA].data = self.ds[DA].data.to(to_units)\n self.ds[DA].attrs[\"units\"] = r\"$%s$\" % to_units\n for key in self.aer.keys():\n for DA in self.aer[key].ds.keys():\n if isinstance(self.aer[key].ds[DA].data, pint.Quantity):\n if self.aer[key].ds[DA].data.check(to_units):\n Converted.append(\"The units of '%s' in the '%s' popolation converted from %s to %s\" %\n (DA, key, self.aer[key].ds[DA].attrs[\"units\"], to_units))\n self.aer[key].ds[DA].data = self.aer[key].ds[DA].data.to(to_units)\n self.aer[key].ds[DA].attrs[\"units\"] = r\"$%s$\" % to_units\n if Converted:\n for Conv_str in Converted:\n print(Conv_str)\n else:\n print(\"No fields with units able to convert to %s \" % to_units)\n\n def _swap_height_dim_to_from_km(self):\n \"\"\"\n If the height dim is in m changing to km and vice versa.\n \"\"\"\n if \"height\" in self.ds.dims:\n print(\"Converting height dimension units from meters to kilometers\")\n self.ds = self.ds.swap_dims({\"height\": \"height_km\"})\n self.height_dim = \"height_km\"\n for key in self.aer.keys():\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"height\": \"height_km\"})\n else:\n print(\"Converting height dimension units from kilometers to meters\")\n self.ds = self.ds.swap_dims({\"height_km\": \"height\"})\n self.height_dim = \"height\"\n for key in self.aer.keys():\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"height_km\": \"height\"})\n\n def _swap_time_dim_to_from_hr(self):\n \"\"\"\n If the time dim is in seconds changing to hours and vice versa.\n \"\"\"\n if \"time\" in self.ds.dims:\n print(\"Converting time dimension units from seconds to hours\")\n self.ds = self.ds.swap_dims({\"time\": \"time_h\"})\n self.time_dim = \"time_h\"\n for key in self.aer.keys():\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"time\": \"time_h\"})\n else:\n print(\"Converting time dimension units from hours to seconds\")\n self.ds = self.ds.swap_dims({\"time_h\": \"time\"})\n self.time_dim = \"time\"\n for key in self.aer.keys():\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"time_h\": \"time\"})\n if \"t_out\" in self.ds.dims:\n print(\"Converting output time dimension units from seconds to hours\")\n self.ds = self.ds.swap_dims({\"t_out\": \"t_out_h\"})\n self.t_out_dim = \"t_out_h\"\n for key in self.aer.keys():\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"t_out\": \"t_out_h\"})\n else:\n print(\"Converting output time dimension units from hours to seconds\")\n self.ds = self.ds.swap_dims({\"t_out_h\": \"t_out\"})\n self.t_out_dim = \"t_out\"\n for key in self.aer.keys():\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"t_out_h\": \"t_out\"})\n\n def _swap_diam_dim_to_from_um(self):\n \"\"\"\n If the diam dim is in m changing to um and vice versa.\n \"\"\"\n for key in self.aer.keys():\n if \"diam\" in self.aer[key].ds.dims:\n print(\"Converting diameter dimension units for %s from meters to micrometers\" % key)\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"diam\": \"diam_um\"})\n self.diam_dim = \"diam_um\"\n else:\n print(\"Converting diameter dimension units for %s from micrometers to meters\" % key)\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"diam_um\": \"diam\"})\n self.diam_dim = \"diam\"\n\n def _swap_T_dim_to_from_C(self):\n \"\"\"\n If the T dim is in Kelvin changing to Celsius and vice versa (singular).\n \"\"\"\n if not self.use_ABIFM:\n for key in self.aer.keys():\n if \"T\" in self.aer[key].ds.dims:\n print(\"Converting diameter dimension units for %s from Kelvin to Celsius\" % key)\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"T\": \"T_C\"})\n self.T_dim = \"T_C\"\n else:\n print(\"Converting diameter dimension units for %s from Celsius to Kelvin\" % key)\n self.aer[key].ds = self.aer[key].ds.swap_dims({\"T_C\": \"T\"})\n self.T_dim = \"T\"\n\n def ci_model_ds_to_netcdf(self, out_prefix='AC_1D_out'):\n \"\"\"\n export datasets from a model simulation. Each dataset is stored in a different file.\n Files are generated for the main ci_model object and each aerosol population.\n\n Parameters\n ----------\n out_prefix: str\n filename prefix and path from which to load ci_model's datasets.\n A \"_main.nc\" suffix is added to the filename of the NetCDF file containing the main\n ci_model dataset, while for each dataset of an aerosol population xxxx, an\n 'aer_pop_xxxx.nc' suffix is added.\n \"\"\"\n out_filenames = []\n ds_4_out = self.ds.copy(deep=True)\n ds_4_out = self.strip_units(ds_4_out)\n out_filenames.append(out_prefix + \"_main.nc\")\n ds_4_out.to_netcdf(out_filenames[-1])\n for aer_key in self.aer.keys():\n ds_4_out = self.aer[aer_key].ds.copy(deep=True)\n ds_4_out = self.strip_units(ds_4_out)\n out_filenames.append(out_prefix + f\"_aer_pop_{aer_key}.nc\")\n ds_4_out.to_netcdf(out_filenames[-1])\n print(\"Exporting ci_model xr.Dataset to the following files\\n\")\n print(out_filenames + \"\\n\")\n\n def ci_model_ds_from_netcdf(self, out_prefix='AC_1D_out'):\n \"\"\"\n Load datasets from a model simulation. Each dataset is stored in a different file.\n Assumes files were generated for the main ci_model object and each aerosol population.\n\n Parameters\n ----------\n out_prefix: str\n filename prefix and path from which to load ci_model's datasets.\n A \"_main.nc\" suffix is added to the filename of the NetCDF file containing the main\n ci_model dataset, while for each dataset of an aerosol population xxxx, an\n 'aer_pop_xxxx.nc' suffix is added.\n \"\"\"\n ds_4_out = xr.open_dataset(out_prefix + \"_main.nc\")\n ds_4_out = self.reassign_units(ds_4_out)\n self.ds = ds_4_out\n for aer_key in self.aer.keys():\n ds_4_out = xr.open_dataset(out_prefix + f\"_aer_pop_{aer_key}.nc\")\n ds_4_out = self.reassign_units(ds_4_out)\n self.aer[aer_key].ds = ds_4_out\n print(f\"Loading ci_model xr.Datasets from the {out_prefix} files done!\\n\")\n\n @staticmethod\n def strip_units(ds_4_out):\n \"\"\"\n Strip units from fields in an xr.Dataset enabling export to NetCDF files\n (convert pint.Quantity data fields to np.ndarray while saving stripping info).\n\n Parameters\n ----------\n ds_4_out: xr.Dataset\n Dataset from which to strip units\n\n Returns\n -------\n ds_4_out: xr.Dataset\n Dataset with to stripped units.\n \"\"\"\n for key in ds_4_out.keys():\n if isinstance(ds_4_out[key].data, pint.quantity.Quantity):\n print(f\"Stripping units from '{key}'\")\n ds_4_out[key].data = ds_4_out[key].data.magnitude\n ds_4_out[key].attrs[\"stripped_units\"] = 1\n else:\n ds_4_out[key].attrs[\"stripped_units\"] = 0\n return ds_4_out\n\n def reassign_units(self, ds_4_out):\n \"\"\"\n Reassign units to fields in an xr.Dataset loaded from a NetCDF file assuming that\n a 'stripped_units' attribute exists.\n (convert np.ndarray data fields to pint.Quantity and delete stripping info).\n\n Parameters\n ----------\n ds_4_out: xr.Dataset\n Dataset with to stripped units.\n\n Returns\n -------\n ds_4_out: xr.Dataset\n Dataset with with units added to fields.\n \"\"\"\n for key in ds_4_out.keys():\n if ds_4_out[key].attrs[\"stripped_units\"]:\n print(f\"Restoring units to '{key}'\")\n ds_4_out[key].data *= self.ureg(ds_4_out[key].attrs[\"units\"])\n del ds_4_out[key].attrs[\"stripped_units\"]\n return ds_4_out\n\n def _recalc_cld_and_mixing(self):\n \"\"\"\n Recalculate Jhet (ABIFM) and LES-harvested parameters following changes to LES ouput (essentially,\n cloud depth) in order for the model to consider in simulation. Mixing bounds are updated only if they\n are cloud-dependent (e.g., using 'ql_thresh').\n NOTE: no other change is made to the grid or cropped fields, so these parameters should be specified in\n the first call to init_model.\n NOTE: in the case of ABIFM, 'inp_cum_init' and 'inp_pct' are not recalculated.\n ALSO, do not change units from SI before calling this method.\n \"\"\"\n print(\"recalculating cloud depth and mixing layer depth\")\n # find all cloud bases and the precip rate in the lowest cloud base in every time step (each profile).\n if self.LES_attributes[\"cbh_det_method\"] == \"ql_thresh\":\n cbh_all = np.diff(self.ds[\"ql\"].values >= self.in_cld_q_thresh, prepend=0, axis=0) == 1\n cth_all = np.diff(self.ds[\"ql\"].values >= self.in_cld_q_thresh, append=0, axis=0) == -1\n else:\n print(\"Unknown cbh method string - skipping cbh detection function\")\n return\n self.ds[\"lowest_cbh\"].values = np.full(self.ds.dims[\"time\"], np.nan)\n self.ds[\"lowest_cth\"].values = np.full(self.ds.dims[\"time\"], np.nan)\n for tt in range(self.ds.dims[\"time\"]):\n cbh_lowest = np.argwhere(cbh_all[:, tt]).flatten()\n if len(cbh_lowest):\n cth_lowest = np.argwhere(cth_all[:, tt]).flatten()\n self.ds[\"lowest_cbh\"].values[tt] = self.ds[\"height\"].values[cbh_lowest[0]]\n self.ds[\"lowest_cth\"].values[tt] = self.ds[\"height\"].values[cth_lowest[0]]\n\n # redetermine mixing bounds and mixing mask\n if self.mixing_bounds is not None:\n if isinstance(self.mixing_bounds[0], str):\n if self.mixing_bounds[0] == \"ql_thresh\":\n self.ds[\"mixing_base\"].values = np.copy(self.ds[\"lowest_cbh\"].values)\n if isinstance(self.mixing_bounds[1], str):\n if self.mixing_bounds[1] == \"ql_thresh\":\n self.ds[\"mixing_top\"].values = np.copy(self.ds[\"lowest_cth\"].values)\n mixing_mask = np.full((self.mod_nz, self.mod_nt), False, dtype=bool)\n for t in range(self.mod_nt):\n rel_ind = np.arange(\n np.argmin(np.abs(self.ds[\"height\"].values - self.ds[\"mixing_base\"].values[t])),\n np.argmin(np.abs(self.ds[\"height\"].values - self.ds[\"mixing_top\"].values[t])) + 1) # inc. top\n mixing_mask[rel_ind, t] = True\n self.ds[\"mixing_mask\"].values = mixing_mask\n\n # Recalculate delta_aw\n print(\"recalculating delta_aw\")\n self._calc_delta_aw() # recalculate delta_aw\n\n if self.use_ABIFM:\n # Recalculate Jhet for ABIFM (NOTE that 'inp_cum_init' and 'inp_pct' are not recalculated)\n print(\"recalculating Jhet (use_ABIFM == True)\")\n for key in self.aer.keys():\n self.aer[key].ds[\"Jhet\"] = 10.**(self.aer[key].Jhet.c + self.aer[key].Jhet.m *\n self.ds[\"delta_aw\"]) * 1e4 # calc Jhet\n if self.aer[key].singular_scale != 1.:\n self.aer[key].ds[\"Jhet\"].values *= self.aer[key].singular_scale\n self.aer[key].ds[\"Jhet\"].attrs[\"units\"] = \"$m^{-2} s^{-1}$\"\n self.aer[key].ds[\"Jhet\"].attrs[\"long_name\"] = \"Heterogeneous ice nucleation rate coefficient\"\n else:\n # allocate aerosol population Datasets (required since the T array might have changed)\n self.aer = {}\n optional_keys = [\"name\", \"nucleus_type\", \"diam_cutoff\", \"T_array\", # opt. aerosol class input params\n \"n_init_weight_prof\", \"singular_fun\", \"singular_scale\",\n \"entrain_psd\", \"entrain_to_cth\"]\n for ii in range(len(self.aer_info)):\n param_dict = {\"use_ABIFM\": self.use_ABIFM} # tmp dict for aerosol attributes to send to class call\n if np.all([x in self.aer_info[ii].keys() for x in [\"n_init_max\", \"psd\"]]):\n param_dict[\"n_init_max\"] = self.aer_info[ii][\"n_init_max\"]\n param_dict[\"psd\"] = self.aer_info[ii][\"psd\"]\n else:\n raise KeyError('aerosol information requires the keys \"n_init_max\", \"psd\"')\n if not self.aer_info[ii][\"psd\"][\"type\"] in [\"mono\", \"logn\", \"multi_logn\", \"custom\", \"default\"]:\n raise ValueError('PSD type must be one of: \"mono\", \"logn\", \"multi_logn\", \"custom\", \"default\"')\n for key in optional_keys:\n param_dict[key] = self.aer_info[ii][key] if key in self.aer_info[ii].keys() else None\n\n # set aerosol population arrays\n tmp_aer_pop = self._set_aer_obj(param_dict)\n self.aer[tmp_aer_pop.name] = tmp_aer_pop\n\n @staticmethod\n def generate_figure(**kwargs):\n \"\"\"\n A method for generating a figure object.\n \"\"\"\n return plotting.generate_figure(**kwargs)\n\n def plot_curtain(self, **kwargs):\n \"\"\"\n A method for curtain plots based on the object's xr.DataSet\n \"\"\"\n return plotting.plot_curtain(self, **kwargs)\n\n def plot_tseries(self, **kwargs):\n \"\"\"\n A method for time series plots based on the object's xr.DataSet\n \"\"\"\n return plotting.plot_tseries(self, **kwargs)\n\n def plot_profile(self, **kwargs):\n \"\"\"\n A method for profile plots based on the object's xr.DataSet\n \"\"\"\n return plotting.plot_profile(self, **kwargs)\n\n def plot_psd(self, **kwargs):\n \"\"\"\n A method for PSD plots based on the object's xr.DataSet\n \"\"\"\n return plotting.plot_psd(self, **kwargs)\n","sub_path":"AC_1D/init_model.py","file_name":"init_model.py","file_ext":"py","file_size_in_byte":65155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"45608192","text":"# -*- coding: utf-8 -*-\nfrom classytags.arguments import Argument\nfrom classytags.core import Tag, Options\nfrom django import template\nfrom django.template.defaultfilters import safe\n\nregister = template.Library()\n\n\nclass RenderPlaceholder(Tag):\n name = 'render_placeholder'\n options = Options(\n Argument('placeholder'),\n Argument('width', default=None, required=False),\n 'language',\n Argument('language', default=None, required=False),\n )\n\n def render_tag(self, context, placeholder, width, language=None):\n raise DeprecationWarning('render_placeholder is now located in cms_tags. Please do not load placeholder_tags anymore')\nregister.tag(RenderPlaceholder)\n","sub_path":"cms/templatetags/placeholder_tags.py","file_name":"placeholder_tags.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"618911340","text":"# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\nfrom keras.layers import Dense, Flatten, Dropout, BatchNormalization\nfrom keras import Model, regularizers\nimport os\nimport datetime\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\nclass Fashion_MnistModel_one_layer(Model):\n def __init__(self):\n super(Fashion_MnistModel_one_layer, self).__init__()\n self.flatten = Flatten()\n self.d1 = Dense(512, activation='softmax')\n\n def call(self, x):\n x = self.flatten(x)\n y = self.d1(x)\n return y\n\n\nclass Fashion_MnistModel_mullayer(Model):\n def __init__(self):\n super(Fashion_MnistModel_mullayer, self).__init__()\n self.flatten = Flatten()\n self.d1 = Dense(512, activation='relu')\n self.d2 = Dense(256, activation='relu')\n self.d3 = Dense(128, activation='relu')\n self.d4 = Dense(10, activation='softmax')\n\n def call(self, x):\n x = self.flatten(x)\n x = self.d1(x)\n x = self.d2(x)\n x = self.d3(x)\n y = self.d4(x)\n return y\n\n\nclass Fashion_MnistModel_Dropout(Model):\n def __init__(self):\n super(Fashion_MnistModel_Dropout, self).__init__()\n self.flatten = Flatten()\n self.d1 = Dense(512, activation='relu')\n self.d2 = Dense(256, activation='relu')\n self.d3 = Dense(128, activation='relu')\n self.drop = Dropout(0.5)\n self.d4 = Dense(10, activation='softmax')\n\n def call(self, x):\n x = self.flatten(x)\n x = self.d1(x)\n x = self.d2(x)\n x = self.d3(x)\n x = self.drop(x)\n y = self.d4(x)\n return y\n\n\nclass Fashion_MnistModel_BN(Model):\n def __init__(self):\n super(Fashion_MnistModel_BN, self).__init__()\n self.flatten = Flatten()\n self.d1 = Dense(512, activation='relu')\n self.BN1 = BatchNormalization()\n self.d2 = Dense(256, activation='relu')\n self.BN2 = BatchNormalization()\n self.d3 = Dense(128, activation='relu')\n self.BN3 = BatchNormalization()\n self.d4 = Dense(10, activation='softmax')\n\n def call(self, x):\n x = self.flatten(x)\n x = self.d1(x)\n x = self.BN1(x)\n x = self.d2(x)\n x = self.BN2(x)\n x = self.d3(x)\n x = self.BN3(x)\n y = self.d4(x)\n return y\n\n\nclass Fashion_MnistModel_R(Model):\n def __init__(self):\n super(Fashion_MnistModel_R, self).__init__()\n self.flatten = Flatten()\n self.d1 = Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.001))\n self.d2 = Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.001))\n self.d3 = Dense(128, activation='relu', kernel_regularizer=regularizers.l2(0.001))\n self.d4 = Dense(10, activation='softmax')\n\n def call(self, x):\n x = self.flatten(x)\n x = self.d1(x)\n x = self.d2(x)\n x = self.d3(x)\n y = self.d4(x)\n return y\n\n\nclass Fashion_MnistModel_Optimize(Model):\n def __init__(self):\n super(Fashion_MnistModel_Optimize, self).__init__()\n self.flatten = Flatten()\n self.d1 = Dense(1024, activation='relu') # , kernel_regularizer=regularizers.l2(0.005))\n self.BN1 = BatchNormalization()\n self.d2 = Dense(512, activation='relu') # , kernel_regularizer=regularizers.l2(0.005))\n self.dropout1 = Dropout(0.2)\n self.BN2 = BatchNormalization()\n self.d3 = Dense(256, activation='relu') # , kernel_regularizer=regularizers.l2(0.005))\n self.dropout2 = Dropout(0.2)\n self.BN3 = BatchNormalization()\n self.d4 = Dense(128, activation='relu') # , kernel_regularizer=regularizers.l2(0.005))\n self.dropout3 = Dropout(0.5)\n self.d5 = Dense(10, activation='softmax')\n\n def call(self, x):\n x = self.flatten(x)\n x = self.d1(x)\n x = self.BN1(x)\n x = self.d2(x)\n # x = self.dropout1(x)\n x = self.BN2(x)\n x = self.d3(x)\n # x = self.dropout2(x)\n x = self.BN3(x)\n x = self.d4(x)\n x = self.dropout3(x)\n y = self.d5(x)\n return y\n\n\ndef generateds(path, txt):\n f = open(txt, 'r')\n contents = f.readlines() # read in lines\n f.close()\n x, y_ = [], []\n for content in contents:\n value = content.split() # split by Space, stored in array\n img_path = path + value[0]\n img = Image.open(img_path)\n img = np.array(img.convert('L'))\n img = img / 255.0\n x.append(img)\n y_.append(value[1])\n x = np.array(x)\n y_ = np.array(y_)\n y_ = y_.astype(np.int64)\n print(\"Generate Dateset successfully!\")\n return x, y_\n\n\nif __name__ == \"__main__\":\n np.set_printoptions(threshold=float('inf'))\n model_save_path = './checkpoint/fashion_mnist.ckpt'\n load_pretrain_model = False\n\n train_path = './fashion_mnist_image_label/fashion_mnist_train_jpg_60000/'\n train_txt = './fashion_mnist_image_label/fashion_mnist_train_jpg_60000.txt'\n test_path = './fashion_mnist_image_label/fashion_mnist_test_jpg_10000/'\n test_txt = './fashion_mnist_image_label/fashion_mnist_test_jpg_10000.txt'\n\n x_train, y_train = generateds(train_path, train_txt)\n x_test, y_test = generateds(test_path, test_txt)\n # 输出图片标签,展示图片\n # print('y_train[0]', y_train[0])\n # plt.imshow(x_train[0], cmap=plt.cm.binary)\n # plt.show()\n\n # model = Fashion_MnistModel_one_layer()\n # model = Fashion_MnistModel_mullayer()\n # model = Fashion_MnistModel_BN()\n # model = Fashion_MnistModel_Dropout()\n # model = Fashion_MnistModel_R()\n # model = Fashion_MnistModel_Optimize()\n _models = [Fashion_MnistModel_one_layer(),\n Fashion_MnistModel_mullayer(),\n Fashion_MnistModel_BN(),\n Fashion_MnistModel_Dropout(),\n Fashion_MnistModel_R()]\n\n for model in _models:\n model = model\n print(\"----------------Training on\", model.name, \"---------------------\")\n model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01, decay=0.01),\n loss='sparse_categorical_crossentropy',\n metrics=['sparse_categorical_accuracy'])\n\n log_dir = './logs_1/' + model.name + \"_\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n # 创建一个回调,为TensorBoard编写一个日志\n tb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,\n write_graph=True,\n histogram_freq=1,\n update_freq='epoch')\n\n model.fit(x_train, y_train, epochs=2, validation_data=(x_test, y_test),\n callbacks=[tb_callback], validation_freq=1, verbose=1)\n # model.summary()\n","sub_path":"人工智能实践/Fashion_MNIST_models.py","file_name":"Fashion_MNIST_models.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"273955987","text":"#!/usr/bin/python3\n\"\"\"\n Starts a Flask web application to listen on 0.0.0.0:5000\n\"\"\"\n\nfrom flask import Flask, render_template\nfrom models import storage\n\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.route('/cities_by_states')\ndef cities_by_states():\n \"\"\"\n List cities and states in a jinja template\n \"\"\"\n all_states = storage.all(\"State\")\n return render_template('8-cities_by_states.html', states=all_states)\n\n\n@app.teardown_appcontext\ndef teardown_app(exception):\n \"\"\"\n Closing the storage\n \"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"web_flask/8-cities_by_states.py","file_name":"8-cities_by_states.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"205794959","text":"\"\"\"\nFleem\n------------\nFleem provides infrastructure for theming support in Flask\napplications. It takes care of:\n\n- Loading themes\n- Rendering templates from themes\n- Serving static files like CSS and images from themes\n\n\nLinks\n`````\n* `documentation `_\n* `development version\n `_\n\n\n\"\"\"\nfrom flask_fleem import __version__\nfrom setuptools import setup\nimport sys\nrequires = ['Flask>=0.9',\n 'Flask-Assets>=0.8']\nif sys.version_info < (2, 6):\n requires.append('simplejson')\n\nsetup(\n name='Flask-Fleem',\n version=__version__,\n url='http://',\n license='MIT',\n author='thrisp/hurrata',\n author_email='blueblank@gmail.com',\n description='Provides infrastructure for theming Flask applications',\n long_description=__doc__,\n packages=['flask_fleem'],\n zip_safe=False,\n platforms='any',\n install_requires=requires,\n test_suite='nose.collector',\n tests_require=[\n 'nose',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"420240929","text":"#!/usr/bin/python3\nfrom bs4 import BeautifulSoup\nfrom os.path import expanduser\nimport multiprocessing as mp\nimport os,time,argparse,urllib.request,shutil,re,json\n\nclass Twoch():\n def __init__(self):\n self.CONST_ATTEMPT = 1\n self.CONST_SKIP = 2\n self.CONST_DL = 3\n self.CONST_ERROR = 4\n self.CONST_HOME = expanduser(\"~\")\n self.CONST_DOMAIN = '2ch.hk'\n self.CONST_LOGFILE = '/var/log/web-crawlers.log'\n self.CONST_OPDIR = '{0}/Downloads'.format(self.CONST_HOME)\n self.CONST_BOARD = 'b'\n self.CONST_STATEMD5 = self.CONST_SAVEPAGE = self.CONST_SAVECAT = self.CONST_CASES = self.CONST_LOGGING = False\n self.CONST_CONTMD5 = []\n\n def createDirectory(self, dir):\n try:\n if not os.path.exists(dir):\n os.makedirs(dir)\n except:\n return\n\n def timestamp(self):\n return time.strftime(\"%H:%M:%S\", time.localtime(int(time.time())))\n\n def log(self, entry):\n if self.CONST_LOGGING is True:\n try:\n with open(self.CONST_LOGFILE,'a') as logFile:\n logFile.write('[{0}] $2ch: {1}\\n'.format(self.timestamp(), entry))\n except IOError as e:\n self.report('Encountered an error when trying to write to the logfile: Code {0} - {1}'.format(e.errno, e.strerror))\n\n def report(self, entry, log = False):\n print('[{0}] {1}'.format(self.timestamp(), str(entry)))\n if log is True:\n self.log(entry)\n\n def loadFile(self, readFile):\n if os.access(readFile, os.R_OK):\n try:\n with open(readFile, 'r') as f:\n return(f.read().splitlines())\n except:\n self.report('{0} file was not found or could not be read (Exiting)'.format(readFile), True)\n raise ValueError('{0} file was not found or could not be read'.format(readFile))\n else:\n self.log('{0} file could not be read - Permission denied (Exiting)'.format(readFile))\n raise ValueError('{0} file could not be read (Permission denied)'.format(readFile))\n\n def request(self, url): \n try:\n with urllib.request.urlopen(url) as response:\n return [response.read().decode(response.headers.get_content_charset()), True]\n except urllib.error.HTTPError as e:\n if e.code is 503:\n return ['{0} (Probably Cloudflare, this should be temporary and resolve itself soon.)'.format(e.code), False]\n else:\n return [e.code, False]\n except urllib.error.URLError as e:\n return [e.code, False]\n\n def save(self, url, filename, tid):\n self.createDirectory('{0}/{1}'.format(self.CONST_OPDIR, self.CONST_BOARD+tid))\n savePath = '{0}/{1}/{2}'.format(self.CONST_OPDIR, self.CONST_BOARD+tid, filename)\n if not os.path.exists(savePath):\n self.report(filename)\n with urllib.request.urlopen(url) as response, open(savePath, 'wb') as of:\n try:\n shutil.copyfileobj(response, of)\n except IOError as e:\n self.report('Encountered an error when saving {0} ({1} - {2})'.format(filename, e.errno, e.strerror), True)\n return self.CONST_ERROR\n else:\n return self.CONST_DL\n else:\n return self.CONST_SKIP\n return self.CONST_ATTEMPT\n\n def multiprocDownload(self, container):\n pool = mp.Pool()\n results = pool.starmap(self.save, zip(container[0], container[1], container[2]))\n if results.count(self.CONST_SKIP) >=1:\n self.report('Downloaded {0} file(s), {1} file(s) already exists'.format(results.count(self.CONST_DL), results.count(self.CONST_SKIP)), True)\n else:\n self.report('Downloaded {0} file(s)'.format(results.count(self.CONST_DL)), True)\n if results.count(self.CONST_ERROR) >=1:\n self.report('Encountered {0} error(s) when downloading'.format(results.count(self.CONST_DL)), True) \n return\n\n def savePageText(self, id, source):\n try:\n self.createDirectory('{0}/{1}'.format(self.CONST_OPDIR, self.CONST_BOARD+id))\n filePath = '{0}{1}/{2}.html'.format(self.CONST_OPDIR, self.CONST_BOARD+id, id)\n with open(filePath,'w') as sPage:\n sPage.write(str(source))\n return os.path.exists(filePath)\n except IOError:\n self.report('Encountered an error when saving page {0}: Code {1} - {2}'.format(id, e.errno, e.strerror), True)\n return False\n \n def saveCatalogData(self, catalogData):\n try:\n self.createDirectory('{0}/catalogs/{1}'.format(self.CONST_OPDIR, self.CONST_BOARD))\n filePath = '{0}/catalogs/{1}/{2}.json'.format(self.CONST_OPDIR, self.CONST_BOARD, int(time.time()))\n with open(filePath,'w') as sPage:\n sPage.write(str(catalogData))\n return os.path.exists(filePath)\n except IOError:\n self.report('Encountered an error when saving catalog {0}: Code {1} - {2}'.format(id, e.errno, e.strerror), True)\n return False\n\n def fetchImageURLs(self, matchedThreads):\n container=[[],[],[],[]]\n try:\n for match in matchedThreads:\n try:\n with urllib.request.urlopen('https://{2}/{1}/res/{0}.html'.format(match, self.CONST_BOARD, self.CONST_DOMAIN)) as response:\n pageSource = response.read().decode(response.headers.get_content_charset(), 'ignore')\n if self.CONST_SAVEPAGE is True:\n self.savePageText(match, pageSource)\n postWrapper = BeautifulSoup(pageSource, \"html.parser\").find(\"div\", { \"class\" : \"thread\" })\n imgCount = 0\n allPosts = postWrapper.findAll(\"div\", { \"class\" : [\"post-wrapper\", \"oppost-wrapper\"] })\n for post in allPosts:\n images = post.findAll(\"div\", { \"class\" : \"images\" })\n for image in images:\n imgCount += 1\n filename = (image.find(\"a\", { \"class\" : \"desktop\" }).contents[0]).__str__()\n container[0].append('https://{3}/{0}/src/{1}/{2}'.format(self.CONST_BOARD, match, filename, self.CONST_DOMAIN))\n container[1].append(filename)\n container[2].append(match)\n self.report('Fetched {0} image(s) from Thread {1} ..'.format(imgCount, match))\n except:\n self.report('\\033[91mEncountered an error when trying to scrape thread {0}\\033[0m'.format(match), True)\n return container\n except:\n self.report('Encountered an error when trying to scrape for link(s) (Exiting)', True)\n raise ValueError('Could not scrape link(s)')\n \n def timeSince(self, sec):\n hours = (sec) / (60 * 60)\n minutes = ((sec) - (int(hours) * (60 * 60))) / 60\n seconds = ((sec) - (int(hours) * (60 * 60))) - (int(minutes) * 60)\n if int(hours) > 0:\n return '{0}h {1}m {2}s'.format(int(hours), int(minutes), int(seconds))\n else:\n if int(minutes) > 0:\n return '{0}m {1}s'.format(int(minutes), int(seconds))\n else:\n return '{0}s'.format(int(seconds))\n \n def getJson(self, Js):\n try:\n Jso = json.loads(Js)\n return Jso\n except:\n return False\n \n def scrapeEverything(self):\n activeThreads = self.getAllThreads()\n if len(activeThreads) > 0:\n self.report('Fetching image URLs from {0} threads, please wait this may take a while ..'.format(len(activeThreads)))\n allImages = self.fetchImageURLs(activeThreads)\n self.report('Fetched {0} image URLs, starting download this may take a while ..'.format(len(allImages[0])))\n if len(allImages) > 0:\n self.report('Downloading to: {0} ..'.format(os.path.abspath(self.CONST_OPDIR)))\n self.multiprocDownload(allImages)\n else:\n self.report('Could not fetch any images.')\n else:\n self.report('Could not find any active threads.')\n return\n \n def getAllThreads(self):\n container = []\n catalogReq = self.request('https://{0}/{1}/catalog.json'.format(self.CONST_DOMAIN, self.CONST_BOARD))\n if catalogReq[1] is True:\n Jso = self.getJson(catalogReq[0])\n if Jso is not False:\n for node in Jso['threads']:\n container.append(node['num'])\n return list(set(container))\n\n def searchThreads(self, Jsd, Kws):\n if self.CONST_SAVECAT is True:\n self.saveCatalogData(Jsd)\n Jso = self.getJson(Jsd)\n if Jso is not False:\n matches = []\n hashCount = 0\n self.report('Found {0} thread(s)'.format(len(Jso['threads'])))\n startTime = time.time()\n for node in Jso['threads']:\n if self.CONST_STATEMD5 is True:\n for i in node['files']:\n hashCount += 1\n for x, md5 in enumerate(self.CONST_CONTMD5):\n if i['md5'] == md5:\n matches.append(node['num'])\n self.report('\\033[92mThread {0} is matching hash ({1}) \\'{2}\\'\\033[0m'.format(node['num'], x+1, md5), True)\n for Key in Kws:\n if self.CONST_CASES is True:\n match = re.search('{0}'.format(Key), (node['comment']))\n else:\n match = re.search('(?i)({0})'.format(Key), (node['comment']))\n if match:\n matches.append(node['num'])\n self.report('\\033[92mThread: {0} Match: {2} Last Post: {3} ago\\033[0m'.format(node['num'], Key, match.groups(), (self.timeSince(int(time.time()) - int(node['lasthit'])))), True)\n endTime = time.time()\n if self.CONST_STATEMD5 is True:\n self.report('Searched {0}(against {1}) hashes and {2}(against {3}) threads in {4}s'.format(hashCount, len(self.CONST_CONTMD5), len(Jso['threads']), len(Kws), round((endTime - startTime), 3)))\n else:\n self.report('Searched {0} threads in {1}s'.format(len(Jso['threads']), round((endTime - startTime), 3)))\n return list(set(matches))\n \n def scrapeLink(self, fullUrl):\n match = re.search('2ch.hk/(.*)/res/(.*).html', fullUrl)\n if match:\n self.CONST_BOARD = match.group(1)\n container = self.fetchImageURLs([match.group(2)])\n if len(container[0]) >=1:\n self.createDirectory(self.CONST_OPDIR)\n self.report('Downloading to: {0} ..'.format(os.path.abspath(self.CONST_OPDIR)))\n self.multiprocDownload(container)\n else:\n self.report('Couldn\\'t find anything to scrape, has it 404\\'d?')\n else:\n self.report('Couldn\\'t parse URL')\n return\n \n def initiate(self, Kwf): \n if self.CONST_CASES is True:\n self.report('Matching is case-sensitive') \n \n if os.path.exists(Kwf):\n keywords = self.loadFile(Kwf)\n if cmd5file != '':\n if os.path.exists(cmd5file):\n self.CONST_CONTMD5 = self.loadFile(cmd5file)\n if len(self.CONST_CONTMD5) >= 1:\n self.CONST_STATEMD5 = True\n self.report('MD5 is enabled ({0} loaded)'.format(len(self.CONST_CONTMD5)))\n else:\n self.report('MD5 was enabled, but nothing could be read (Is the file empty?).')\n\n if self.CONST_LOGGING is True:\n try:\n open(self.CONST_LOGFILE, 'a').close()\n if os.access(self.CONST_LOGFILE, os.W_OK) is False:\n self.CONST_LOGGING = False\n self.report('Can\\'t write to logfile - Permission denied (Disabled Logging)')\n except PermissionError as e:\n self.CONST_LOGGING = False\n self.report('Logfile could not be created: Code {0} - {1} (Disabled Logging)'.format(e.errno, e.strerror))\n\n self.report('Keyword(s): ' + str(len(keywords)))\n self.catalog = 'https://{0}/{1}/catalog.json'.format(self.CONST_DOMAIN, self.CONST_BOARD)\n self.report('Requesting: {0}'.format(self.catalog))\n self.catalogReq = self.request(self.catalog)\n if self.catalogReq[1] is True:\n results = self.searchThreads(self.catalogReq[0], keywords)\n if len(results)>=1:\n self.report('Scraping links ..')\n container = self.fetchImageURLs(results)\n if len(container) >=1:\n self.createDirectory(self.CONST_OPDIR)\n self.report('Downloading to: {0} ..'.format(os.path.abspath(self.CONST_OPDIR)))\n self.multiprocDownload(container)\n else:\n self.report('A matching thread was found but nothing was scraped')\n else:\n self.report('No matches were found')\n else:\n self.report('\\033[91mCatalog could not be retrieved, error code: {0}\\033[0m'.format(self.catalogReq[0]), True)\n else:\n self.report('Keyword path does not exist, please check the path and try again.', True)\n\n def main(self):\n parser=argparse.ArgumentParser(description = '== 2ch.hk Catalog Scanner / Scraper == ')\n parser.add_argument('keywordFile',metavar='keywords',default='keywords',type=str,nargs='?',help='Keyword file (1 per line)')\n parser.add_argument('-b',dest='cboard',default='b',type=str,help='Set a custom board',required=False)\n parser.add_argument('-d',dest='cdestination',default=self.CONST_OPDIR,type=str,help='Specify a output directory (where everything gets downloaded)',required=False)\n parser.add_argument('-md5',dest='cmd5file',default='',type=str,help='Search for MD5s (Catalog provides MD5 hash of OPs image(s))',required=False)\n parser.add_argument('-l',action='store_true',help=\"Enables logging ({0})\".format(self.CONST_LOGFILE))\n parser.add_argument('-cs',action='store_true',help=\"Enables case sensitive matching\")\n parser.add_argument('-sp',action='store_true',help=\"Save the threads source code to a file (Text ONLY)\")\n parser.add_argument('-sc',action='store_true',help=\"Save the catalog data (.Json) to a folder\")\n parser.add_argument('-dl',dest='cdl',default=[],nargs='*',help='Search for nothing, instead scrape one or more specific threads',required=False)\n parser.add_argument('-se',action='store_true',help='Search for nothing, instead scrape everything from every active thread. (slow!)',required=False)\n globals().update(vars(parser.parse_args()))\n \n self.CONST_BOARD = cboard\n self.CONST_OPDIR = cdestination\n self.CONST_LOGGING = l\n self.CONST_SAVEPAGE = sp\n self.CONST_SAVECAT = sc\n self.CONST_CASES = cs\n \n if se:\n self.scrapeEverything()\n else:\n if cdl:\n for i in cdl:\n self.scrapeLink(i)\n else:\n self.initiate(keywordFile)\n\nif __name__ == \"__main__\":\n Twoch().main()\n","sub_path":"2ch-scraper.py","file_name":"2ch-scraper.py","file_ext":"py","file_size_in_byte":15937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"431894037","text":"import sys\nimport igraph\n\n\ndef get_stdin():\n buf = \"\"\n for line in sys.stdin:\n buf = buf + line\n return int(buf)\n\n\nif(__name__ == \"__main__\"):\n size = get_stdin()\n\n graph = igraph.Graph.Barabasi(size, 10)\n\n result = graph.pagerank()\n\n print(result)\n","sub_path":"PageRank/Openfaas/funtion.py","file_name":"funtion.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"359262015","text":"#!/usr/bin/env python\n#coding: utf-8\n\nfrom scipy import signal\nimport numpy as np\nimport wfdb\nimport matplotlib.pyplot as plt\nimport sys\n\ndef extract_short_peaks(x, fs, bias_window_ms=250, peak_length_ms=20, peak_interval_ms=500):\n \"\"\"\n extract_short_peaks локализует всплески сигнала с продолжительностью меньше заданной\n :param x: numpy array - отсчеты сигнала\n :param fs: частота дискретизации, Гц\n :param bias_window_ms: ширина окна для подаввления фона (мс). 0 - фон не подавляется\n :param peak_length_ms: максимальная длительность всплеска\n :param peak_interval_ms: минимальный интервал времени между всплесками (для гашения помех)\n :return: np_array с номерами отсчетов сигнала, в которых найдены всплески\n \"\"\"\n\n samples_per_ms = fs/1000\n #print(samples_per_ms * peak_interval_ms)\n\n if bias_window_ms:\n # косинусоидальная сглаживающая апертура шириной bias_window_ms\n h = signal.hann(samples_per_ms * bias_window_ms)\n h = h / sum(h)\n\n # огибающая (фон) вычисляется путем свертки со сглаживающей апертурой и затем вычитается из входного сигнала\n lfsignal = x - signal.convolve(x, h, mode=\"same\")\n else:\n lfsignal = np.array(x, 'float')\n\n # Готовим высокочастотный препарат, подчеркивающий короткие выбросы\n\n h = signal.hann(samples_per_ms * peak_length_ms)\n h = h / sum(h)\n\n hfsignal = lfsignal - signal.convolve(lfsignal, h, mode=\"same\")\n #print(hfsignal)\n # по данному ВЧ препарату находим локальные максимумы, отстоящие друг от друга не менее, чем на peak_interval_ms\n extrema = signal.argrelmax(hfsignal, 0, int(samples_per_ms * peak_interval_ms))\n\n pks = []\n sigsize = len(x)\n\n # А дальше начинается самая колхозная часть: мы нашли максимумы в \"искаженном\" сигнале (hfsignal),\n # а они могут быть сдвинуты относительно пиков исходного сигнала x.\n # Поэтому мы будем просматривать окрестности каждого пика.\n # Кроме того, сигнал hfsignal по определению более шумный, и локальные максимумы в нем могут быть ложными.\n # Поэтому мы введем порог на значение сигнала в пике.\n\n peak_threshold = 0 # размерность не определена и вообще это самый подлый параметр, от него надо избавляться.\n # для хороших сигналов можно попробовать порог 0, он универсальный\n \n search_window = samples_per_ms * 10 # 10 миллисекунд\n\n for pos in extrema[0]:\n if hfsignal[pos] > peak_threshold:\n # уточняем максимум по первичному сигналу, просматривая окрестности текущего отсчета\n n1 = int(max(0, pos - search_window))\n n2 = int(min(sigsize, pos + search_window))\n delta = np.argmax(lfsignal[n1:n2])\n pks.append(n1 + delta)\n\n # результат можно преобразовать в миллисекунды по формуле 1000 * pks / fs\n return np.array(pks) \n\ndef main():\n # загрузка данных\n recordname = sys.argv[1] # The name of the WFDB record to be read (without any file extensions)\n sampto = int(sys.argv[2]) # The final sample number to read for each channel\n peak_interval_ms = int(sys.argv[3])\n sig, fields=wfdb.rdsamp(recordname, sampto=sampto)\n x = sig[:,0]\n fs = fields[\"fs\"] # sampling frequency (in samples per second per signal)\n\n pks = extract_short_peaks(x, fs, peak_interval_ms=peak_interval_ms)\n A = [x[i] for i in pks]\n I = [(pks[i+1] - pks[i])/1000 for i in range(len(pks)-1)]\n\n plt.style.use(\"ggplot\")\n t=np.array(range(0,sig.shape[0]))/fs\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 12\n fig_size[1] = 6\n plt.rcParams[\"figure.figsize\"] = fig_size\n plt.plot(t, x)\n plt.plot(pks/fs, A, \"b+\", markersize=6)\n #plt.plot(pks/fs, np.zeros(len(pks)), \"gd\", markersize=6)\n plt.title(\"MIT-BIH Arrhythmia record\")\n plt.xlabel(\"time/s\")\n plt.ylabel(fields[\"units\"][0])\n plt.show()\nif __name__ == \"__main__\":\n main()","sub_path":"sigsegment.py","file_name":"sigsegment.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"337394000","text":"# ============LICENSE_START==========================================\n# org.onap.vvp/test-engine\n# ===================================================================\n# Copyright © 2017 AT&T Intellectual Property. All rights reserved.\n# ===================================================================\n#\n# Unless otherwise specified, all software contained herein is licensed\n# under the Apache License, Version 2.0 (the “License”);\n# you may not use this software except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n#\n# Unless otherwise specified, all documentation contained herein is licensed\n# under the Creative Commons License, Attribution 4.0 Intl. (the “License”);\n# you may not use this documentation except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://creativecommons.org/licenses/by/4.0/\n#\n# Unless required by applicable law or agreed to in writing, documentation\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ============LICENSE_END============================================\n#\n# ECOMP is a trademark and service mark of AT&T Intellectual Property.\nfrom pprint import pprint\n\nfrom wheel.signatures import assertTrue\n\nfrom iceci.decorator.exception_decor import exception\nfrom services.constants import Constants\nfrom services.database.db_user import DBUser\nfrom services.frontend.base_actions.wait import Wait\nfrom services.helper import Helper\nfrom services.logging_service import LoggingServiceFactory\nfrom services.types import API\nfrom tests.uiTests.test_ui_base import TestUiBase\nfrom utils.cryptography import CryptographyText\n\n\nlogger = LoggingServiceFactory.get_logger()\n\n\nclass TestBucketE2E(TestUiBase):\n\n def create_bucket_and_validate_users(self):\n user_content = API.VirtualFunction.create_engagement(\n wait_for_gitlab=True)\n API.VirtualFunction.set_eng_stage(\n user_content, Constants.EngagementStages.ACTIVE)\n bucket_id = user_content[\n 'engagement_manual_id'] + \"_\" + user_content['vfName'].lower()\n Wait.bucket_to_create(bucket_id)\n bucket = API.Rados.get_bucket(bucket_id)\n assertTrue(API.Rados.is_bucket_ready(bucket_id))\n assertTrue(bucket != \"None\")\n assertTrue(API.Rados.users_of_bucket_ready_after_created(\n bucket_id, user_content['uuid']))\n # validate users added to bucket\n grants = API.Rados.get_bucket_grants(bucket_id)\n count = 0\n for g in grants:\n if g.id == user_content['uuid']:\n count = +1\n\n assertTrue(count > 0)\n return bucket, user_content\n\n @exception()\n def test_validate_bucket_created(self):\n bucket, user_content = self.create_bucket_and_validate_users()\n # create upload file\n str_content = Helper.rand_string(\n \"randomString\") + Helper.rand_string(\"randomNumber\")\n fileName = Helper.rand_string(\"randomString\")\n bucket_id = user_content[\n 'engagement_manual_id'] + \"_\" + user_content['vfName'].lower()\n Wait.bucket_to_create(bucket_id)\n bucket = API.Rados.get_bucket(bucket_id)\n assertTrue(API.Rados.is_bucket_ready(bucket_id))\n key = bucket.new_key(fileName + '.dat')\n key.set_contents_from_string(str_content)\n pprint(key.generate_url(expires_in=400))\n# DOWNLOAD AN OBJECT (TO A FILE)\n key = bucket.get_key(fileName + '.dat')\n key.get_contents_to_filename('/home/' + fileName + '.dat')\n key.delete()\n\n @exception()\n def test_validate_bucket_removed(self):\n bucket, user_content = self.create_bucket_and_validate_users()\n # set Completed Stage\n API.VirtualFunction.set_eng_stage(\n user_content, Constants.EngagementStages.COMPLETED)\n # validate users removed from bucket\n bucket_id = user_content[\n 'engagement_manual_id'] + \"_\" + user_content['vfName'].lower()\n assertTrue(API.Rados.users_of_bucket_ready_after_complete(\n bucket_id, user_content['full_name']))\n assertTrue(API.Rados.is_bucket_ready(bucket_id))\n assertTrue(bucket != \"None\")\n # try create upload file - must failed\n str_content = Helper.rand_string(\n \"randomString\") + Helper.rand_string(\"randomNumber\")\n fileName = Helper.rand_string(\"randomString\")\n bucket = API.Rados.get_bucket(bucket_id)\n assertTrue(API.Rados.is_bucket_ready(bucket_id))\n key = bucket.new_key(fileName + '.dat')\n key.set_contents_from_string(str_content)\n pprint(key.generate_url(expires_in=400))\n# DOWNLOAD AN OBJECT (TO A FILE)\n key = bucket.get_key(fileName + '.dat')\n key.get_contents_to_filename('/home/' + fileName + '.dat')\n key.delete()\n\n @exception()\n def test_validate_upload_download_image_with_bucket_user(self):\n bucket, user_content = self.create_bucket_and_validate_users()\n # connect to bucket with specific user\n bucket_id = user_content[\n 'engagement_manual_id'] + \"_\" + user_content['vfName'].lower()\n access_key = DBUser.get_access_key(user_content['uuid'])\n secret_key = DBUser.get_access_secret(user_content['uuid'])\n secret = CryptographyText.decrypt(secret_key)\n bucket_for_specific_user = API.Rados.get_bucketfor_specific_user(\n bucket_id, access_key, secret)\n assertTrue(bucket_for_specific_user is not None)\n # create upload file with user\n str_content = Helper.rand_string(\n \"randomString\") + Helper.rand_string(\"randomNumber\")\n fileName = Helper.rand_string(\"randomString\")\n key = bucket_for_specific_user.new_key(fileName + '.dat')\n key.set_contents_from_string(str_content)\n pprint(key.generate_url(expires_in=3600))\n# DOWNLOAD AN OBJECT (TO A FILE)\n key = bucket_for_specific_user.get_key(fileName + '.dat')\n key.get_contents_to_filename('/home/' + fileName + '.dat')\n key.delete()\n","sub_path":"tests/uiTests/test_bucket_e2e.py","file_name":"test_bucket_e2e.py","file_ext":"py","file_size_in_byte":6652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"354907003","text":"from typing import (\n Tuple,\n Dict,\n List,\n Iterator,\n Iterable,\n Optional,\n Any\n)\nfrom pathlib import Path\nimport click\nimport toml\n\nfrom robotidy.version import __version__\nfrom robotidy.app import Robotidy\nfrom robotidy.transformers import load_transformers\nfrom robotidy.utils import GlobalFormattingConfig, split_args_from_name_or_path\n\n\nINCLUDE_EXT = ('.robot', '.resource')\nHELP_MSG = f\"\"\"\nVersion: {__version__}\n\nRobotidy is a tool for formatting Robot Framework source code.\nSee examples at the end of this help message too see how you can use Robotidy.\nFor more documentation check README section at https://github.com/MarketSquare/robotframework-tidy\n\"\"\"\nEPILOG = \"\"\"\nExamples:\n # Format `path/to/src.robot` file\n $ robotidy path/to/src.robot\n\n # Format every Robot Framework file inside `dir_name` directory\n $ robotidy dir_name\n\n # List available transformers:\n $ robotidy --list-transformers\n \n # Display transformer documentation\n $ robotidy --describe-transformer \n\n # Format `src.robot` file using `SplitTooLongLine` transformer only\n $ robotidy --transform SplitTooLongLine src.robot\n\n # Format `src.robot` file using `SplitTooLongLine` transformer only and configured line length 140\n $ robotidy --transform SplitTooLongLine:line_length=140 src.robot\n\n\"\"\"\n\n\nclass RawHelp(click.Command):\n def format_help_text(self, ctx, formatter):\n if self.help:\n formatter.write_paragraph()\n for line in self.help.split('\\n'):\n formatter.write_text(line)\n\n def format_epilog(self, ctx, formatter):\n if self.epilog:\n formatter.write_paragraph()\n for line in self.epilog.split('\\n'):\n formatter.write_text(line)\n\n\nclass TransformType(click.ParamType):\n name = \"transform\"\n\n def convert(self, value, param, ctx):\n name = ''\n try:\n name, args = split_args_from_name_or_path(value)\n except ValueError:\n exc = f'Invalid {name} transformer configuration. ' \\\n f'Parameters should be provided in format name=value, delimited by :'\n raise ValueError(exc)\n return name, args\n\n\ndef find_project_root(srcs: Iterable[str]) -> Path:\n \"\"\"Return a directory containing .git, or robotidy.toml.\n That directory will be a common parent of all files and directories\n passed in `srcs`.\n If no directory in the tree contains a marker that would specify it's the\n project root, the root of the file system is returned.\n \"\"\"\n if not srcs:\n return Path(\"/\").resolve()\n\n path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]\n\n # A list of lists of parents for each 'src'. 'src' is included as a\n # \"parent\" of itself if it is a directory\n src_parents = [\n list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs\n ]\n\n common_base = max(\n set.intersection(*(set(parents) for parents in src_parents)),\n key=lambda path: path.parts,\n )\n\n for directory in (common_base, *common_base.parents):\n if (directory / \".git\").exists():\n return directory\n\n if (directory / \"robotidy.toml\").is_file():\n return directory\n\n return directory\n\n\ndef find_config(src_paths: Iterable[str]) -> Optional[str]:\n project_root = find_project_root(src_paths)\n config_path = project_root / 'robotidy.toml'\n return str(config_path) if config_path.is_file() else None\n\n\ndef read_config(ctx: click.Context, param: click.Parameter, value: Optional[str]) -> Optional[str]:\n # if --config was not used, try to find robotidy.toml file\n if not value:\n value = find_config(ctx.params.get(\"src\", ()))\n if value is None:\n return None\n try:\n config = parse_config(value)\n except (toml.TomlDecodeError, OSError) as e:\n raise click.FileError(\n filename=value, hint=f\"Error reading configuration file: {e}\"\n )\n click.echo(f'Reading config from {value}')\n if not config:\n return None\n else:\n # Sanitize the values to be Click friendly. For more information please see:\n # https://github.com/psf/black/issues/1458\n # https://github.com/pallets/click/issues/1567\n config = {\n k: str(v) if not isinstance(v, (list, dict)) else v\n for k, v in config.items()\n }\n\n default_map: Dict[str, Any] = {}\n if ctx.default_map:\n default_map.update(ctx.default_map)\n default_map.update(config.get('main', {}))\n\n transformers = []\n for transformer, configurables in config.get('transformers', {}).items():\n if configurables:\n transformer += ':' + ':'.join(f'{key}={value}' for key, value in configurables.items())\n transformers.append(transformer)\n default_map['transform'] = transformers\n\n ctx.default_map = default_map\n return value\n\n\ndef parse_config(path: str) -> Dict[str, Any]:\n config = toml.load(path)\n return {k.replace('--', '').replace('-', '_'): v for k, v in config.items()}\n\n\ndef iterate_dir(paths: Iterable[Path]) -> Iterator[Path]:\n for path in paths:\n if path.is_file():\n if path.suffix not in INCLUDE_EXT:\n continue\n yield path\n elif path.is_dir():\n yield from iterate_dir(path.iterdir())\n\n\ndef get_paths(src: Tuple[str, ...]):\n sources = set()\n for s in src:\n path = Path(s).resolve()\n if path.is_file():\n sources.add(path)\n elif path.is_dir():\n sources.update(iterate_dir(path.iterdir()))\n elif s == '-':\n sources.add(path)\n\n return sources\n\n\n@click.command(cls=RawHelp, help=HELP_MSG, epilog=EPILOG)\n@click.option(\n '--transform',\n type=TransformType(),\n multiple=True,\n metavar='TRANSFORMER_NAME',\n help=\"Transform files from [PATH(S)] with given transformer\"\n)\n@click.argument(\n \"src\",\n nargs=-1,\n type=click.Path(\n exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True\n ),\n is_eager=True,\n metavar='[PATH(S)]'\n)\n@click.option(\n '--overwrite/--no-overwrite',\n default=True,\n help='Overwrite source files.',\n show_default=True\n)\n@click.option(\n '--diff',\n is_flag=True,\n help='Output diff of each processed file.',\n show_default=True\n)\n@click.option(\n '--check',\n is_flag=True,\n help=\"Don't overwrite files and just return status. Return code 0 means nothing would change. \"\n \"Return code 1 means that at least 1 file would change. Any internal error will overwrite this status.\",\n show_default=True\n)\n@click.option(\n '-s',\n '--spacecount',\n type=click.types.INT,\n default=4,\n help='The number of spaces between cells in the plain text format.\\n',\n show_default=True\n)\n@click.option(\n '-l',\n '--lineseparator',\n type=click.types.Choice(['native', 'windows', 'unix']),\n default='native',\n help=\"Line separator to use in outputs. The default is 'native'.\\n\"\n \"native: use operating system's native line separators\\n\"\n \"windows: use Windows line separators (CRLF)\\n\"\n \"unix: use Unix line separators (LF)\",\n show_default=True\n)\n@click.option(\n '-p',\n '--usepipes',\n is_flag=True,\n help=\"Use pipe ('|') as a column separator in the plain text format.\",\n show_default=True\n)\n@click.option(\n '-sl',\n '--startline',\n default=None,\n type=int,\n help=\"Limit robotidy only to selected area. If --endline is not provided, format text only at --startline. \"\n \"Line numbers start from 1.\",\n show_default=True\n)\n@click.option(\n '-el',\n '--endline',\n default=None,\n type=int,\n help=\"Limit robotidy only to selected area. \"\n \"Line numbers start from 1.\",\n show_default=True\n)\n@click.option(\n '-v',\n '--verbose',\n is_flag=True,\n show_default=True\n)\n@click.option(\n \"--config\",\n type=click.Path(\n exists=True,\n file_okay=True,\n dir_okay=False,\n readable=True,\n allow_dash=False,\n path_type=str,\n ),\n is_eager=True,\n callback=read_config,\n help=\"Read configuration from FILE path.\",\n)\n@click.option(\n '--list-transformers',\n is_eager=True,\n is_flag=True,\n help='List available transformers and exit.'\n)\n@click.option(\n '--describe-transformer',\n default=None,\n metavar='TRANSFORMER_NAME',\n help='Show documentation for selected transformer.'\n)\n@click.version_option(version=__version__, prog_name='robotidy')\n@click.pass_context\ndef cli(\n ctx: click.Context,\n transform: List[Tuple[str, Dict]],\n src: Tuple[str, ...],\n overwrite: bool,\n diff: bool,\n check: bool,\n spacecount: int,\n lineseparator: str,\n usepipes: bool,\n verbose: bool,\n config: Optional[str],\n startline: Optional[int],\n endline: Optional[int],\n list_transformers: bool,\n describe_transformer: Optional[str]\n):\n if list_transformers:\n transformers = load_transformers(None)\n click.echo('Run --describe-transformer to get more details. Transformers:')\n for transformer in transformers:\n click.echo(transformer)\n ctx.exit(0)\n if describe_transformer is not None:\n transformers = load_transformers(None)\n if describe_transformer in transformers:\n click.echo(f\"Transformer {describe_transformer}:\")\n click.echo(transformers[describe_transformer].__doc__)\n else:\n click.echo(f\"Transformer with the name '{describe_transformer}' does not exist\")\n ctx.exit(0)\n\n if config and verbose:\n click.echo(f'Loaded {config} configuration file')\n\n formatting_config = GlobalFormattingConfig(\n use_pipes=usepipes,\n space_count=spacecount,\n line_sep=lineseparator,\n start_line=startline,\n end_line=endline\n )\n sources = get_paths(src)\n tidy = Robotidy(\n transformers=transform,\n src=sources,\n overwrite=overwrite,\n show_diff=diff,\n formatting_config=formatting_config,\n verbose=verbose,\n check=check\n )\n status = tidy.transform_files()\n ctx.exit(status)\n","sub_path":"robotidy/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":10316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"119322478","text":"description=\"Search candidates\"\n#########################################################################\nimport os,sys,glob,shutil\nimport subprocess\nimport time\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import fits\nfrom astropy.table import Table, Column, vstack\nimport gw\n\nif __name__ == \"__main__\":\n start_time = time.time()\n parser = argparse.ArgumentParser(description=description,\\\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"trigger\",help=\"trigger name\")\n parser.add_argument(\"filter\",help=\"filter\",choices=[\"g\",\"r\",\"i\"])\n parser.add_argument(\"date_new\",help='date new')\n parser.add_argument(\"date_ref\",help='reference epoch')\n parser.add_argument(\"pointing\",help='pointing number')\n parser.add_argument(\"-s\",\"--search\",\n help=\"Search direction (p-ositive, n-egative, b-oth)\",\n choices=[\"p\",\"n\",\"b\"],default='b')\n parser.add_argument(\"-c\", \"--clobber\",action=\"store_true\",\\\n dest='clobber',default=False,help='Clobber existing files') \n parser.add_argument(\"-v\", \"--verbose\",dest=\"verbose\",action=\"store_true\",\\\n default=False,help='Enable task progress report')\n\n args = parser.parse_args()\n\ndef gw_search(datain,dataou,trigger,filtro,date_new,date_ref,pointing,optlist,\n search,clobber,verbose):\n \n fdir_n = trigger+'/'+date_new+'/'\n fdir_r = trigger+'/'+date_ref+'/'\n fnew = '_'.join([trigger,'VST',filtro,date_new.replace('-',''),pointing])\n fref = '_'.join([trigger,'VST',filtro,date_ref.replace('-',''),pointing])\n cnew = '_'.join(['cat',trigger,'VST',filtro,date_new.replace('-',''),\n pointing])\n cref = '_'.join(['cat',trigger,'VST',filtro,date_ref.replace('-',''),\n pointing])\n fdiff= '_'.join(['diff',trigger,filtro,date_new.replace('-',''),\n date_ref.replace('-',''),pointing])\n cdiff= '_'.join(['dcat',trigger,filtro,date_new.replace('-',''),\n date_ref.replace('-',''),pointing])\n\n if not os.path.exists(dataou+fdir_n+fdiff+\".fits\"): \n print(\"!!! Error: file\",fdiff+'.fits','does not exists !!!')\n return False\n\n if os.path.exists(dataou+fdir_n+cdiff+\".fits\"): \n if clobber: \n os.remove(dataou+fdir_n+cdiff+\".fits\")\n else:\n print(\"!!! WARNING: file\",cdiff+'.fits',\"already exists !!!\")\n return False\n\n if search in 'bn': \n if os.path.exists(dataou+fdir_n+'n'+fdiff+\".fits\"):\n os.remove(dataou+fdir_n+'n'+fdiff+\".fits\")\n pid = subprocess.Popen([\"imarith\",dataou+fdir_n+fdiff+\".fits \",\n \"-1.0\",\"mul\",dataou+fdir_n+\"n\"+fdiff+'.fits'],\n stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n output,error = pid.communicate()\n if verbose: print(output)\n if error: print(error)\n\n hdr = fits.getheader(dataou+fdir_n+fdiff+\".fits\")\n seeing = hdr['FWHM']\n threshold = float(optlist['search']['threshold'])\n\n mask = fits.getdata(dataou+fdir_n+fdiff+\".bpm.fits\")\n nn = []\n if search in 'bp': nn.append('')\n if search in 'bn': nn.append('n')\n\n alltab = {}\n for n in nn: \n _ta =\"tmp_\"+n+\"search_\"+pointing+'_'+date_new+\".cat\" \n if os.path.exists(_ta): os.remove(_ta)\n cparam = gw.runsex(dataou+fdir_n+n+fdiff,seeing,optlist,threshold,\\\n 'ascii',verbose,n+'search',pointing,date_new)\n\n tab = Table.read(_ta,format='ascii.sextractor')\n\n cc = Column(np.zeros(len(tab['X_IMAGE'])),name=\"mask\")\n tab.add_column(cc) \n\n for i in range(len(tab['X_IMAGE'])):\n _mask = mask[int(tab['Y_IMAGE'][i])-1,int(tab['X_IMAGE'][i])-1]\n tab['mask'][i] = _mask\n \n idx = np.where(tab['mask']<= 2)\n ## negative/positive ratio\n fwhm = seeing/float(optlist['global']['pixel_scale'].split()[0])\n size = int(fwhm*8)+2\n imgdiff = fits.open(dataou+fdir_n+fdiff+'.fits')\n rgood = np.zeros(len(tab))\n ydim,xdim = imgdiff[0].data.shape\n for h in idx[0]:\n xc = int(tab['X_IMAGE'][h])-1\n yc = int(tab['Y_IMAGE'][h])-1\n x1,x2 = int(xc-size),int(xc+size)\n y1,y2 = int(yc-size),int(yc+size)\n if x1 < 1: x1,x2 = 1,1+2*size\n if y1 < 1: y1,y2 = 1,1+2*size\n if x2 >= xdim: x1,x2 = xdim-1-2*size,xdim-1\n if y2 >= ydim: y1,y2 = ydim-1-2*size,ydim-1\n\n xx,yy = np.meshgrid(np.linspace(x1,x2,x2-x1+1),\\\n np.linspace(y1,y2,y2-y1+1))\n\n ii = np.where(np.sqrt((xx-xc)**2+(yy-yc)**2)>fwhm*5.)\n jj = np.where(np.sqrt((xx-xc)**2+(yy-yc)**2)1.5*imgstd)\n mm = np.where((imgsec[jj]-imgmean)<-1.5*imgstd)\n if len(pp[0])+len(mm[0])>0:\n if n!='n': rgood[h] = len(pp[0])/float(len(pp[0])+len(mm[0])) \n else: rgood[h] = len(mm[0])/float(len(pp[0])+len(mm[0])) \n\n if n=='': _ext = 'P'\n elif n=='n': _ext = 'N'\n\n cc = Column(np.array(['-']*len(tab)),name=\"search\")\n tab.add_column(cc,index=0) \n tab['search'][idx] = _ext\n\n cc = Column(rgood,name=\"rgood\")\n tab.add_column(cc) \n\n _tb = \"tmp_\"+n+\"search_\"+pointing+'_'+date_new+\"_dcat.fits\"\n if os.path.exists(_tb): os.remove(_tb)\n tab[idx].write(_tb,format='fits')\n \n###########\n Msg = \">>> Match diff and new/ref source catalogue\"\n if verbose: print(Msg)\n if n==\"\": \n newtab = dataou+fdir_n+cnew+\".fits\"\n reftab = dataou+fdir_r+cref+\".fits\"\n elif n=='n': \n reftab = dataou+fdir_n+cnew+\".fits\"\n newtab = dataou+fdir_r+cref+\".fits\"\n\n _tc = \"tmp_\"+n+\"search_\"+pointing+'_'+date_new+\"_merged1.fits\"\n gw.stilts_run(\"stilts tmatch2 in1=\"+_tb+\\\n \" matcher=2d params=10 values1='X_IMAGE Y_IMAGE'\"+\\\n \" in2=\"+newtab+\" values2='X_IMAGE Y_IMAGE'\"+\\\n \" out=\"+_tc+\" join=all1 find=best1\",Msg,verbose)\n \n Msg = \">>> Match with reference catalog\\n\"\n if verbose: print(Msg)\n _td = \"tmp_\"+n+\"search_\"+pointing+'_'+date_new+\"_merged2.csv\"\n gw.stilts_run(\"stilts tmatch2 matcher=skyellipse in1=\"+_tc+\\\n \" params=10 values1='X_WORLD_1 Y_WORLD_1 A_IMAGE_1 B_IMAGE_1 \"+\\\n \" THETA_IMAGE_1' in2=\"+reftab+\" ocmd='delcols Group*'\"+\\\n \" values2='X_WORLD Y_WORLD A_IMAGE B_IMAGE THETA_IMAGE'\"+\\\n \" out=\"+_td+\" ofmt=csv join=all1 find=best1\",Msg,verbose)\n\n alltab['T'+n] = Table.read(_td,format='csv')\n ii = np.isnan(alltab['T'+n]['X_IMAGE']) \n alltab['T'+n]['Separation'][ii] = 0.21*(np.sqrt(\\\n (alltab['T'+n]['X_IMAGE_1'][ii]-alltab['T'+n]['X_IMAGE'][ii])**2+\\\n (alltab['T'+n]['Y_IMAGE_1'][ii]-alltab['T'+n]['Y_IMAGE'][ii])**2))\n \n _tlist = [alltab[x] for x in alltab]\n if len(_tlist)>1:\n tab = vstack(_tlist,join_type='exact',metadata_conflicts='warn')\n\n _tout = dataou+fdir_n+cdiff+\".fits\"\n tab.write(_tout,format='fits')\n\n pid = subprocess.Popen([\"modhead\",_tout,\"FWHM\",str(seeing)],\n stderr=subprocess.PIPE,stdout=subprocess.PIPE)\n output,error = pid.communicate()\n if verbose:\n print(output)\n print(error)\n\n message = \">>> Search \"+cdiff\n trash = glob.glob(\"tmp_*search_\"+pointing+'_'+date_new+\"*\")\n for t in trash: os.remove(t)\n\n return message\n\n###############################################################################\nif __name__ == \"__main__\":\n\n datain = os.path.expandvars(\"$gw_datain\") \n dataou = os.path.expandvars(\"$gw_dataou\") \n optlist = gw.read_default()\n\n print(\">>> Search candidates in difference image\")\n\n seeing = gw_search(datain,dataou,args.trigger,args.filter,args.date_new,\n args.date_ref,'p'+args.pointing,optlist,args.search,args.clobber,\n args.verbose) \n\n print(\"********** Completed in \",int(time.time()-start_time),\"sec\")\n","sub_path":"bin/gw_search.py","file_name":"gw_search.py","file_ext":"py","file_size_in_byte":8297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"169683180","text":"########################################################################\n#\tMCU Gear(R) system Sample Code\n#\tAuther:y.kou.\n#\tweb site: http://www.milletool.com/\n#\tDate\t:\t8/OCT/2016\n#\n########################################################################\n#Revision Information\n#\n########################################################################\n#!/usr/bin/python\n\nfrom milpy import mil\nfrom milpy import milMod\nfrom milpy import wiringdata\nfrom milpy import Pwm\nimport time\n\nwiringdata.initIO()\nmodA = milMod.milMod(Pwm.getInfo(0))\t#Baseboard connector No.0\n\nif __name__=='__main__':\n\ttry:\n\t\n\t\tPwm.initPwm()\n\n\t\t#modA.connect()#I2C devices not need connect command\n\n\t\twhile(1):\n\t\t\tPwm.setServoPulse(0,308)\n\t\t\tPwm.setServoPulse(1,495)\n\t\t\tPwm.time.sleep(1)\n\t\t\tPwm.setServoPulse(1,308)\n\t\t\tPwm.setServoPulse(0,495)\n\t\t\ttime.sleep(1)\n\t\n\n\texcept KeyboardInterrupt:\n\t\tprint(\"detect key interrupt [ctrl]+ [C] \\n\")\n\n\tmil.cleanup()\n","sub_path":"milpython/PwmTest.py","file_name":"PwmTest.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"419949680","text":"import numpy as np\nimport re\nimport math\n\n\"\"\"\nATTENTION: Use the following dictionaries to get the correct index for each\n amino acid when accessing any type of matrix or array provided as\n parameters. Further, use those indices when generating or returning\n any matrices or arrays. Failure to do so will most likely result in\n not passing the tests.\nEXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'\n in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].\n\"\"\"\nALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'\nAA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}\nINT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}\nGAP_INDEX = AA_TO_INT['-']\n\n\nclass MSA:\n\n def __init__(self, sequences):\n \"\"\"\n Initialize the MSA class with the provided list of sequences. Check the\n sequences for correctness. Pre-calculate any statistics you seem fit.\n\n :param sequences: List containing the MSA sequences.\n \"\"\"\n self.msa_seq_list = []\n self.size = () # msa: nr rows, nr cols\n self.check_validity(sequences)\n self.abs_freq_gaps = np.zeros((self.size[1], 21), dtype=np.float64)\n self.abs_freq = np.zeros((self.size[1], 20), dtype=np.float64)\n self.calc_abs_freq()\n self.observed_aa_nr = np.count_nonzero(self.abs_freq_gaps, 1)\n self.primary_seq = re.sub('[-]', '', self.msa_seq_list[0])\n self.weight = np.zeros((self.size[0]), dtype=np.float64)\n self.all_weights = np.zeros((self.size[0], self.size[1]), dtype=np.float64)\n self.calc_weight()\n\n def calc_aa_score(self, rel_freq, bg_freq):\n aa_score = np.zeros((self.size[1], 20), dtype=np.float64)\n for foo in range(self.size[1]):\n for bar in range(20):\n if rel_freq[foo][bar] == 0:\n aa_score[foo][bar] = np.NINF\n else:\n bg = bg_freq[bar]\n aa_score[foo][bar] = 2* math.log(rel_freq[foo][bar]/bg, 2)\n return aa_score\n\n\n def calc_weight(self):\n #get number of different AAs in MSA column (r)\n for foo in range(self.size[1]):\n for bar in range(self.size[0]):\n r = self.observed_aa_nr[foo]\n index_large = AA_TO_INT[self.msa_seq_list[bar][foo]]\n s = self.abs_freq_gaps[foo][index_large]\n if r > 1 and s > 0:\n self.all_weights[bar][foo] = 1/(r*s)\n self.weight = np.sum(self.all_weights, 1, dtype=np.float64)\n\n def calc_rel_freq(self, matrix):\n sums_rows = np.zeros((self.size[1]), dtype=np.float64)\n np.sum(matrix, axis=1, dtype=np.float64, out=sums_rows)\n rel_freq = np.zeros((self.size[1], 20), dtype=np.float64)\n for foo in range(self.size[1]):\n for bar in range(20):\n rel_freq[foo][bar] = matrix[foo][bar]/sums_rows[foo]\n #print(self.rel_freq)\n return rel_freq\n\n def calc_abs_freq(self):\n for foo in range(self.size[1]):\n count = {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'Q': 0, 'E': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0,\n 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0, '-': 0}\n for bar in range(self.size[0]):\n aa = self.msa_seq_list[bar][foo]\n count[aa] = count[aa] + 1\n for amino_a in count:\n if not amino_a == '-':\n self.abs_freq[foo][AA_TO_INT[amino_a]] = count[amino_a]\n self.abs_freq_gaps[foo][AA_TO_INT[amino_a]] = count[amino_a]\n\n\n\n def check_validity(self, sequences):\n aa_list = 'ARNDCQEGHILKMFPSTWYV-'\n # check not empty\n if len(sequences) >= 1:\n length = len(sequences[0])\n self.size = (len(sequences), length)\n # check all lenths same\n for foo in range(len(sequences)):\n if len(sequences[foo]) == length:\n # check only valid AAs\n for bar in range(length):\n if sequences[foo][bar] in aa_list:\n # store MSA sequence list\n self.msa_seq_list = sequences\n else:\n raise TypeError(\"amino acid not valid\")\n else:\n raise TypeError(\"not all sequences have same length\")\n else:\n raise TypeError(\"not enough sequences\")\n\n def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,\n redistribute_gaps=False, add_pseudocounts=False):\n \"\"\"\n Return a PSSM for the underlying MSA. Use the appropriate refinements \n according to the parameters. If no bg_matrix is specified, use uniform \n background frequencies.\n Every row in the resulting PSSM corresponds to a non-gap position in \n the primary sequence of the MSA (i.e. the first one).\n Every column in the PSSM corresponds to one of the 20 amino acids.\n Values that would be -inf must be replaced by -20 in the final PSSM.\n Before casting to dtype=numpy.int64, round all values to the nearest\n integer (do not just FLOOR all values).\n\n :param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).\n Access the matrix using the indices from AA_TO_INT.\n :param beta: Beta value (float) used to weight the pseudocounts \n against the observed amino acids in the MSA.\n :param use_sequence_weights: Calculate and apply sequence weights.\n :param redistribute_gaps: Redistribute the gaps according to the \n background frequencies.\n :param add_pseudocounts: Calculate and add pseudocounts according \n to the background frequencies.\n\n :return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).\n L = ungapped length of the primary sequence.\n \"\"\"\n if use_sequence_weights:\n matrix1 = self.get_weight_count()\n else:\n matrix1 = self.abs_freq_gaps\n if not bg_matrix:\n bg_matrix = np.full((20, 20), 0.0025)\n bg_freq_matrix = np.sum(bg_matrix, 1, dtype=np.float64)\n if redistribute_gaps:\n pssm = self.redistribute_bg(bg_freq_matrix, matrix1)\n else:\n pssm = np.delete(matrix1, -1, axis=1)\n pssm = pssm.astype(np.float64)\n if add_pseudocounts:\n pssm = self.add_pseudocounts(pssm, bg_freq_matrix, bg_matrix, beta)\n pssm = self.calc_rel_freq(pssm)\n pssm = self.calc_aa_score(pssm, bg_freq_matrix)\n pssm = self.remove_gap_rows(pssm)\n pssm = np.where(pssm == np.NINF, -20, pssm)\n return np.rint(pssm).astype(np.int64)\n\n def remove_gap_rows(self, pssm):\n res = pssm\n for foo in range(self.size[1]-1, -1, -1):\n if self.msa_seq_list[0][foo] == '-':\n #remove row foo from pssm\n res = np.delete(res, foo, axis = 0)\n return res\n\n def add_pseudocounts(self, pssm, bg_freq_matrix, sub_freq, beta):\n all_sums = np.zeros((self.size[1], 20), dtype=np.float64)\n alpha = self.get_number_of_observations()-1\n adjusted_freq = np.zeros((self.size[1], 20), dtype=np.float64)\n for i in range(self.size[1]):\n for a in range(20):\n sum_j = 0.0\n for j in range(20):\n q = sub_freq[j][a]\n p = bg_freq_matrix[j]\n f = pssm[i][j]\n sum_j += (f/p)*q\n all_sums[i][a] = sum_j\n for foo in range(self.size[1]):\n for bar in range(20):\n adjusted_freq[foo][bar] = ((alpha*pssm[foo][bar]) + (beta * all_sums[foo][bar]))/(alpha+beta)\n return adjusted_freq\n\n\n def redistribute_bg(self, bg_matrix, matrix1):\n #for each in size[1], add (bg_freq or 0.05)* nr of gaps\n res = np.zeros((self.size[1],20), dtype=np.float64)\n for foo in range(self.size[1]):\n gap_count = matrix1[foo][20]\n if gap_count > 0:\n for bar in range(20):\n count = gap_count * bg_matrix[bar]\n res[foo][bar] = matrix1[foo][bar] + count\n else:\n for bar in range(20):\n res[foo][bar] = matrix1[foo][bar]\n return res\n\n def get_weight_count(self):\n weight_counts = np.zeros((self.size[1], 21), dtype=np.float64)\n for foo in range(self.size[1]):\n for bar in range(self.size[0]):\n weight_counts[foo][AA_TO_INT[self.msa_seq_list[bar][foo]]] += self.weight[bar]\n #print(weight_counts)\n return weight_counts\n\n def get_size(self):\n \"\"\"\n Return the number of sequences in the MSA and the MSA length, i.e.\n the number of columns in the MSA. This includes gaps.\n\n :return: Tuple of two integers. First element is the number of\n sequences in the MSA, second element is the MSA length.\n \"\"\"\n return self.size\n\n def get_primary_sequence(self):\n \"\"\"\n Return the primary sequence of the MSA. In this exercise, the primary\n sequence is always the first sequence of the MSA. The returned \n sequence must NOT include gap characters.\n\n :return: String containing the ungapped primary sequence.\n \"\"\"\n return self.primary_seq\n\n def get_sequence_weights(self):\n \"\"\"\n Return the calculated sequence weights for all sequences in the MSA.\n The order of weights in the array must be equal to the order of the\n sequences in the MSA.\n\n :return: Numpy array (dtype=numpy.float64) containing the weights for\n all sequences in the MSA.\n \"\"\"\n return self.weight\n\n def get_number_of_observations(self):\n \"\"\"\n Return the estimated number of independent observations in the MSA.\n\n :return: Estimate of independent observation (dtype=numpy.float64).\n \"\"\"\n # N = 1/L *\n r_sum = np.sum(self.observed_aa_nr, 0)\n num_obs = (1/self.size[1])*r_sum\n #num_obs = -1\n #pass\n return num_obs.astype(np.float64)\n","sub_path":"codechecker/repos/4/collected_files/pssm/ga78yed.py","file_name":"ga78yed.py","file_ext":"py","file_size_in_byte":10405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"500701422","text":"def LocatePivot(list):\n length = int(len(list))\n pivot = list[0]\n loclist = []\n location = 0\n for i in range(1,length):\n if pivot > list[i]:\n location += 1\n loclist.insert(location-1,list[i])\n else:\n loclist.insert(location , list[i])\n\n loclist.insert(location,pivot)\n return (loclist,location,length-1)\n\ndef Quicksort(list):\n loclist,plocat,comparision=LocatePivot(list)\n leftlist = loclist[:plocat]\n rightlist = loclist[plocat+1:]\n if len(leftlist) > 1:\n compleft = Quicksort(leftlist)\n else:\n compleft = 0\n\n if len(rightlist) > 1:\n compright = Quicksort(rightlist)\n else:\n compright = 0\n return comparision+compleft+compright\n\n# list=[3,8,2,5,1,4,7,6]\n# print(QuickSort(list))","sub_path":"QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"465667534","text":"#!/usr/bin/env python\r\n# _*_ coding: utf-8 _*_\r\n# @Time : 2018/10/23 15:33\r\n# @Author : viekie\r\n# @Site : www.ml2ai.com\r\n# @File : mobile_data.py\r\n# @Software: PyCharm\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import print_function\r\nfrom __future__ import division\r\n\r\nimport os\r\nimport collections\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\nDATASET_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'\r\n\r\nFEATURE_COLUMN_TYPES = collections.OrderedDict([\r\n (\"symboling\", int),\r\n (\"normalized-losses\", float),\r\n (\"make\", str),\r\n (\"fuel-type\", str),\r\n (\"aspiration\", str),\r\n (\"num-of-doors\", str),\r\n (\"body-style\", str),\r\n (\"drive-wheels\", str),\r\n (\"engine-location\", str),\r\n (\"wheel-base\", float),\r\n (\"length\", float),\r\n (\"width\", float),\r\n (\"height\", float),\r\n (\"curb-weight\", float),\r\n (\"engine-type\", str),\r\n (\"num-of-cylinders\", str),\r\n (\"engine-size\", float),\r\n (\"fuel-system\", str),\r\n (\"bore\", float),\r\n (\"stroke\", float),\r\n (\"compression-ratio\", float),\r\n (\"horsepower\", float),\r\n (\"peak-rpm\", float),\r\n (\"city-mpg\", float),\r\n (\"highway-mpg\", float),\r\n (\"price\", float)])\r\n\r\n\r\ndef parse_csv_by_pandas(file_name, yname='price', frac=0.7, seed=None):\r\n if not os.path.exists(file_name):\r\n file_name = tf.keras.utils.get_file(file_name, DATASET_URL)\r\n\r\n datasets = pd.read_csv(file_name, names=FEATURE_COLUMN_TYPES.keys(),\r\n dtype=FEATURE_COLUMN_TYPES, na_values='?')\r\n\r\n datasets = datasets.dropna()\r\n\r\n if seed is None:\r\n seed = np.random.seed()\r\n train_datasets = datasets.sample(frac=0.5, random_state=seed)\r\n test_datasets = datasets.drop(train_datasets.index)\r\n\r\n train_x, train_y = train_datasets, train_datasets.pop(yname)\r\n test_x, test_y = test_datasets, test_datasets.pop(yname)\r\n\r\n return (train_x, train_y), (test_x, test_y)\r\n\r\n\r\ndef input_train_func(features, label, batch_size=100):\r\n train_samples = (dict(features), label)\r\n train_tensors = tf.data.Dataset.from_tensor_slices(train_samples)\r\n train_tensors = train_tensors.shuffle(buffer_size=100)\r\n train_tensors = train_tensors.repeat()\r\n train_tensors = train_tensors.batch(batch_size)\r\n return train_tensors\r\n\r\n\r\ndef input_eval_func(features, labels, batch_size=32):\r\n if labels is None:\r\n feature = dict(features)\r\n else:\r\n feature = (dict(features), labels)\r\n datasets = tf.data.Dataset.from_tensor_slices(feature)\r\n datasets = datasets.batch(batch_size)\r\n return datasets\r\n","sub_path":"tensorflow/02_regression/utils/mobile_data.py","file_name":"mobile_data.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"57867362","text":"\"\"\"\nSerializers from financial aid\n\"\"\"\nimport datetime\n\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import (\n CharField,\n ChoiceField,\n FloatField,\n IntegerField\n)\n\nfrom courses.models import Program\nfrom dashboard.models import ProgramEnrollment\nfrom financialaid.api import (\n determine_auto_approval,\n determine_tier_program,\n determine_income_usd,\n)\nfrom financialaid.constants import (\n FinancialAidJustification,\n FinancialAidStatus\n)\nfrom financialaid.exceptions import NotSupportedException\nfrom financialaid.models import (\n FinancialAid,\n TierProgram\n)\nfrom mail.api import MailgunClient\nfrom mail.utils import generate_financial_aid_email\n\n\nclass FinancialAidRequestSerializer(serializers.Serializer):\n \"\"\"\n Serializer for financial aid requests\n \"\"\"\n original_income = FloatField(min_value=0)\n original_currency = CharField()\n program_id = IntegerField()\n\n def validate(self, data):\n \"\"\"\n Validators for this serializer\n \"\"\"\n data[\"program\"] = get_object_or_404(Program, pk=data[\"program_id\"])\n if not data[\"program\"].financial_aid_availability:\n raise ValidationError(\"Financial aid not available for this program.\")\n if not ProgramEnrollment.objects.filter(program=data[\"program\"], user=self.context[\"request\"].user).exists():\n raise ValidationError(\"User not in program.\")\n return data\n\n def save(self):\n \"\"\"\n Override save method\n \"\"\"\n try:\n income_usd = determine_income_usd(\n self.validated_data[\"original_income\"],\n self.validated_data[\"original_currency\"]\n )\n except NotSupportedException:\n raise ValidationError(\"Currency not supported\")\n user = self.context[\"request\"].user\n tier_program = determine_tier_program(self.validated_data[\"program\"], income_usd)\n\n financial_aid = FinancialAid.objects.create(\n original_income=self.validated_data[\"original_income\"],\n original_currency=self.validated_data[\"original_currency\"],\n tier_program=tier_program,\n user=user,\n income_usd=income_usd,\n country_of_income=user.profile.country,\n date_exchange_rate=datetime.datetime.now(),\n country_of_residence=user.profile.country,\n )\n\n if determine_auto_approval(financial_aid, tier_program) is True:\n financial_aid.status = FinancialAidStatus.AUTO_APPROVED\n else:\n financial_aid.status = FinancialAidStatus.PENDING_DOCS\n financial_aid.save_and_log(user)\n\n return financial_aid\n\n\nclass FinancialAidActionSerializer(serializers.Serializer):\n \"\"\"\n Serializer for financial aid actions\n \"\"\"\n action = ChoiceField(\n choices=[\n FinancialAidStatus.APPROVED,\n FinancialAidStatus.PENDING_MANUAL_APPROVAL\n ],\n write_only=True\n )\n tier_program_id = IntegerField(write_only=True)\n justification = ChoiceField(\n choices=FinancialAidJustification.ALL_JUSTIFICATIONS,\n default=None,\n write_only=True\n )\n\n def validate(self, data):\n \"\"\"\n Validators for this serializer\n \"\"\"\n # Required field\n if data.get(\"action\") is None:\n raise ValidationError({\"action\": \"This field is required.\"})\n # For approving\n if data[\"action\"] == FinancialAidStatus.APPROVED:\n # Required fields\n if data.get(\"tier_program_id\") is None:\n raise ValidationError({\"tier_program_id\": \"This field is required.\"})\n if data.get(\"justification\") is None:\n raise ValidationError({\"justification\": \"This field is required.\"})\n # Required instance status\n if self.instance.status != FinancialAidStatus.PENDING_MANUAL_APPROVAL:\n raise ValidationError(\"Cannot approve an application that is not pending manual approval.\")\n # Check tier program exists\n try:\n data[\"tier_program\"] = TierProgram.objects.get(\n id=data[\"tier_program_id\"],\n program_id=self.instance.tier_program.program_id,\n current=True\n )\n except TierProgram.DoesNotExist:\n raise ValidationError({\"tier_program_id\": \"Financial Aid Tier does not exist for this program.\"})\n # For marking documents received\n if data[\"action\"] == FinancialAidStatus.PENDING_MANUAL_APPROVAL:\n if self.instance.status not in [FinancialAidStatus.PENDING_DOCS, FinancialAidStatus.DOCS_SENT]:\n raise ValidationError(\"Cannot mark documents as received for an application not awaiting docs.\")\n return data\n\n def save(self):\n \"\"\"\n Save method for this serializer\n \"\"\"\n self.instance.status = self.validated_data[\"action\"]\n if self.instance.status == FinancialAidStatus.APPROVED:\n self.instance.tier_program = self.validated_data[\"tier_program\"]\n self.instance.justification = self.validated_data[\"justification\"]\n elif self.instance.status == FinancialAidStatus.PENDING_MANUAL_APPROVAL:\n # This is intentionally left blank for clarity that this is a valid status for .save()\n pass\n self.instance.save()\n\n # Send email notification\n MailgunClient.send_financial_aid_email(\n acting_user=self.context[\"request\"].user,\n financial_aid=self.instance,\n **generate_financial_aid_email(self.instance)\n )\n\n return self.instance\n\n\nclass FinancialAidSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for indicating financial documents have been sent\n \"\"\"\n def validate(self, data):\n \"\"\"\n Validate method for this serializer\n \"\"\"\n if self.instance.status != FinancialAidStatus.PENDING_DOCS:\n raise ValidationError(\n \"Cannot indicate documents sent for an application that is not pending documents\"\n )\n return data\n\n def save(self):\n \"\"\"\n Save method for this serializer\n \"\"\"\n self.instance.status = FinancialAidStatus.DOCS_SENT\n self.instance.date_documents_sent = self.validated_data[\"date_documents_sent\"]\n self.instance.save()\n return self.instance\n\n class Meta:\n model = FinancialAid\n fields = (\"date_documents_sent\", )\n","sub_path":"financialaid/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"215372198","text":"import numpy\nimport unittest\nfrom pylearn2.datasets.hepatitis import Hepatitis, neg_missing\n\nclass TestHepatitis(unittest.TestCase):\n def setUp(self):\n self.dataset = Hepatitis()\n\n def test_neg_missing(self):\n self.assertEqual(neg_missing('?'), '-1')\n self.assertEqual(neg_missing('3'), '3')\n\n def test_data_integrity(self):\n numpy.testing.assert_equal(self.dataset.get_design_matrix()[0], [30.,1.,85.,18.,4.,-1.,1.,1.,0.,1.,1.,1.,1.,0.,1.,1.,1.,1.,1.])\n rng = numpy.random.RandomState()\n \n for _ in xrange(1000):\n targets = self.dataset.get_targets()[rng.randint(len(self.dataset.get_targets()))]\n non_zeros = numpy.transpose(numpy.nonzero(targets))\n\n self.assertEqual(len(non_zeros), 1)\n self.assertEqual(targets[non_zeros[0]], 1)\n\n \n","sub_path":"pylearn2/datasets/tests/test_hepatitis.py","file_name":"test_hepatitis.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"204302254","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 6 01:09:10 2019\r\n\r\n@author: Ashay Fernandes\r\n\"\"\"\r\n\r\n#DICTONARY\r\nmydictionary= {\r\n\"name\": \"Archie\",\r\n\"identity\": \"Student\",\r\n\"age\": 17\r\n}\r\nprint(mydictionary)\r\n\r\nkey = mydictionary[\"name\"]\r\nvalue = mydictionary.get(\"name\")\r\nprint(\"Key is\",key)\r\nprint(\"Value is\",value)\r\n\r\n\r\n#DICTIONARY AND LIST\r\nstudents = {'1MS16IS100':'Asha', '1MS16IS101':'ashok','1MS16IS102':'Rekha','1MS16IS103':'Suma'}\r\nlist1 = [\"value1\",\"value2\",\"value3\",\"value4\"]\r\nlist2=[\"a\",\"b\",\"c\",\"d\"]\r\n\r\n\r\n#printing student names\r\nj=0;\r\nfor i in students:\r\n print(\"Key is \",i, \"Value is \",students[i])\r\n list1[j]=students[i]\r\n list2[j]=i\r\n j=j+1\r\n\r\nprint(list1)\r\nprint(list2)\r\nprint(students.keys())\r\nprint(students.values())\r\nprint(students.items())\r\n\r\n\r\n#IF-ELSE\r\nx=-3\r\nif x>0.0:\r\n print(\"Positive\")\r\nelif x<0.0:\r\n print(\"Negative\")\r\n x=-1.0*x\r\nelse :\r\n print(\"Zero\")\r\n\r\n","sub_path":"python lab/Labwork1.py","file_name":"Labwork1.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"524818773","text":"import tweepy\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport time\n\n#consumer key, consumer secret, access token, access secret.\nckey=\"WNxuoNZkT2YfX65mio3eAvf8f\"\ncsecret=\"QxhuS1xu6NxyfhiSErxTmhYgRgK8HupSPJzVhBWkSXIJEZ4hme\"\natoken=\"1071084889154310145-qYXNlTJtyne6iqaaaRDipyYWMR7wyh\"\nasecret=\"zjrmqthh0W7cYuVGjt7Uztgn6tgkzWZCUEhUob8m6CxKd\"\n\nimport newsread\n\n####\nclass listener(StreamListener):\n\n def on_data(self, data):\n try:\n \n \n filed=open('livestream1.txt','a')\n tweet1= data.split(',\"text\":\"')[1].split('\",\"source\":\"')\n tweet=tweet1\n print(tweet[0])\n print(len(tweet[0]))\n print('----------------')\n if len(tweet[0])>1000:\n pass\n else:\n filed.write(tweet[0])\n filed.write('\\n')\n filed.close()\n \n return(True)\n except BaseException as e:\n print('Error: ')\n print(str(e))\n time.sleep(5)\n\n def on_error(self, status):\n print(status)\n\nauth = OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\n\n\n\n\n\t\nx=newsread.returncleaned()\nprint(x)\ntwitterStream = Stream(auth, listener())\ntwitterStream.filter(track=x,languages=[\"en\"])\n \n \n \n ","sub_path":"readtweet.py","file_name":"readtweet.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"644313458","text":"from __future__ import unicode_literals\n\nimport os\nfrom urlparse import urlparse\n\nfrom jinja2 import Environment, FileSystemLoader\nfrom pkg_resources import resource_filename\nfrom werkzeug import wsgi\nfrom werkzeug.wrappers import BaseResponse as Response\n\nDEFAULT_BLOCKLIST_PATH = resource_filename(\"via\", \"default-blocklist.txt\")\nTEMPLATES_DIR = os.path.dirname(os.path.abspath(__file__)) + \"/../templates/\"\n\n\nclass Blocker(object):\n\n \"\"\"\n Blocker is a WSGI middleware that returns a static response when a\n request path matches a list of predefined domains.\n\n The list of domains and the associated reasons for blocking them are defined\n in a text file with lines in the form:\n\n \n\n Where \"\" is one of \"publisher-blocked\" or \"blocked\". Any lines\n beginning with '#' are ignored. Any lines not in the above form are ignored.\n \"\"\"\n\n def __init__(self, application, blocklist_path=DEFAULT_BLOCKLIST_PATH):\n self._application = application\n self._jinja_env = Environment(\n loader=FileSystemLoader(TEMPLATES_DIR), trim_blocks=True\n )\n\n self._blocklist_path = blocklist_path\n\n # dict of domain to block reason.\n self._blocked_domains = {}\n\n # mtime of the blocklist file when it was last parsed.\n self._blocklist_timestamp = 0\n\n self._update_blocklist()\n\n def __call__(self, environ, start_response):\n self._update_blocklist()\n\n url_to_annotate = wsgi.get_path_info(environ)[1:]\n parsed_url = urlparse(url_to_annotate)\n\n if not parsed_url.scheme:\n url_to_annotate = \"http://\" + url_to_annotate\n parsed_url = urlparse(url_to_annotate)\n\n hostname_to_annotate = parsed_url.hostname\n if hostname_to_annotate in self._blocked_domains:\n reason = self._blocked_domains[hostname_to_annotate]\n if reason == \"publisher-blocked\":\n template_name = \"disallow_access.html.jinja2\"\n status = 451\n else:\n template_name = \"could_not_process.html.jinja2\"\n status = 200\n\n template = self._jinja_env.get_template(template_name).render(\n url_to_annotate=url_to_annotate\n )\n resp = Response(template, status=status, mimetype=\"text/html\")\n return resp(environ, start_response)\n\n return self._application(environ, start_response)\n\n def _update_blocklist(self):\n blocklist_stinfo = os.stat(self._blocklist_path)\n if blocklist_stinfo.st_mtime == self._blocklist_timestamp:\n return\n\n self._blocked_domains = _parse_blocklist(self._blocklist_path)\n self._blocklist_timestamp = blocklist_stinfo.st_mtime\n\n\ndef _parse_blocklist(path):\n blocked_domains = {}\n\n with open(path) as blocklist:\n for line in blocklist:\n line = line.strip()\n\n if not line or line.startswith(\"#\"):\n # Empty or comment line.\n continue\n\n try:\n domain, reason = line.split(\" \")\n blocked_domains[domain] = reason\n except Exception:\n pass\n\n return blocked_domains\n","sub_path":"via/blocker.py","file_name":"blocker.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"386995142","text":"import sys\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtWidgets, QtOpenGL\nimport PyQt5.QtMultimedia as QM\nfrom PyQt5.QtMultimediaWidgets import QGraphicsVideoItem\nimport numpy as np\nimport pandas as pd\nimport pyqtgraph\n\nimport visproj.qt5_structuralize.config as config\nfrom visproj.qt5_structuralize.load_eye_fixation import load_eye_tracking\n\nDEBUG = True\n\nclass videoWidget(QtWidgets.QWidget):\n def __init__(self, *args, **kwargs):\n QtWidgets.QWidget.__init__(self, *args, **kwargs)\n self.scale = 0.7\n self.view_width = 852*self.scale\n self.view_height = 480*self.scale\n # for rolling gesture view\n self.play_state = False\n self.gesture_dict = {0: 'metaphoric', 1: 'beats', 2: 'deictics', 3: 'iconic'}\n self.ges_dict = {}\n self.one = ''\n self.two = ''\n self.time = -1\n self.noseData = None\n self.audio_data = None\n self.body_pts = None\n\n self.flag_video = False\n self.flag_audio = False\n self.flag_nose = False\n self.flag_skeleton = False\n self.flag_eye = False\n\n self.dx, self.dy, self.x_scale, self.y_scale = 0, 0, self.scale, self.scale\n self.eye_dx, self.eye_dy, self.eye_x_scale, self.eye_y_scale = 10, -110, self.scale*(6/7.0), self.scale*(6/7.0)\n\n self.skeleton_lines = []\n for i in range(len(config.SKELETON_LINES)):\n line = QtWidgets.QGraphicsLineItem()\n pen = QtGui.QPen()\n pen.setWidth(3)\n pen.setBrush(QtGui.QColor(0, 255, 0, 200))\n line.setPen(pen)\n self.skeleton_lines.append(line)\n\n self.eye_circle_objs = []\n\n self.line = QtWidgets.QGraphicsLineItem()\n self.pen = QtGui.QPen()\n self.pen.setWidth(5)\n self.pen.setBrush(QtGui.QColor(0, 0, 255, 200))\n self.line.setPen(self.pen)\n\n self.view = QtWidgets.QGraphicsView()\n self.view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.view.setViewport(QtOpenGL.QGLWidget())\n self.view.setFixedSize(self.view_width, self.view_height)\n self.view.setGeometry(0, 0, self.view_width, self.view_height)\n self.videoItem = QGraphicsVideoItem()\n\n # MUST setsize that is half the size of the GraphicsView\n # Most likey a bug in Qt's implementation on Mac OS X\n self.videoItem.setSize(QtCore.QSizeF(self.view_width / 2, self.view_height / 2))\n\n self.player = QM.QMediaPlayer()\n self.player.setVideoOutput(self.videoItem)\n\n self.scene = QtWidgets.QGraphicsScene()\n self.scene.addItem(self.videoItem)\n for i in range(len(self.skeleton_lines)):\n self.scene.addItem(self.skeleton_lines[i])\n self.scene.addItem(self.line)\n\n\n self.view.setScene(self.scene)\n self.scene.setSceneRect(0, 0, self.view_width, self.view_height)\n self.videoItem.setPos(0, 0)\n\n self.label = QtWidgets.QLabel()\n self.label.setFixedWidth(100)\n self.label.setFixedHeight(self.view_height)\n\n self.input_box = QtWidgets.QLineEdit()\n self.input_box.setFixedWidth(100)\n self.input_box.setFixedHeight(5)\n\n flo = self._set_scale_input()\n\n self.layout1 = QtWidgets.QGridLayout()\n self.layout1.addWidget(self.label, 0, 0)\n self.layout1.addWidget(self.view, 0, 1)\n self.layout1.addItem(flo, 0, 2)\n #self.layout1.addChildLayout(flo)\n #self.layout.addLayout(flo)\n self.layout = QtWidgets.QVBoxLayout(self)\n self.layout.addLayout(self.layout1)\n\n\n # plot widget\n self.graph_widget = QtWidgets.QWidget()\n # Layout of Container Widget\n self.graph_layout = QtWidgets.QVBoxLayout()\n self.graph_widget.setLayout(self.graph_layout)\n self.layout.addWidget(self.graph_widget)\n\n self.createUI()\n\n self.view.show()\n\n def _set_scale_input(self):\n flo = QtWidgets.QFormLayout()\n self.qe_dx, self.qe_dy = QtWidgets.QLineEdit(), QtWidgets.QLineEdit()\n self.qe_x_scale, self.qe_y_scale = QtWidgets.QLineEdit(), QtWidgets.QLineEdit()\n self.qe_eye_dx, self.qe_eye_dy = QtWidgets.QLineEdit(), QtWidgets.QLineEdit()\n self.qe_eye_x_scale, self.qe_eye_y_scale = QtWidgets.QLineEdit(), QtWidgets.QLineEdit()\n\n self.qe_dx.setValidator(QtGui.QDoubleValidator())\n self.qe_dy.setValidator(QtGui.QDoubleValidator())\n self.qe_x_scale.setValidator(QtGui.QDoubleValidator())\n self.qe_y_scale.setValidator(QtGui.QDoubleValidator())\n\n self.qe_eye_dx.setValidator(QtGui.QDoubleValidator())\n self.qe_eye_dy.setValidator(QtGui.QDoubleValidator())\n self.qe_eye_x_scale.setValidator(QtGui.QDoubleValidator())\n self.qe_eye_y_scale.setValidator(QtGui.QDoubleValidator())\n\n self.qe_dx.editingFinished.connect(self._recal_position)\n self.qe_dy.editingFinished.connect(self._recal_position)\n self.qe_x_scale.editingFinished.connect(self._recal_position)\n self.qe_y_scale.editingFinished.connect(self._recal_position)\n self.qe_eye_dx.editingFinished.connect(self._recal_position)\n self.qe_eye_dy.editingFinished.connect(self._recal_position)\n self.qe_eye_x_scale.editingFinished.connect(self._recal_position)\n self.qe_eye_y_scale.editingFinished.connect(self._recal_position)\n\n self.qe_dx.setText(str(self.dx))\n self.qe_dy.setText(str(self.dy))\n self.qe_x_scale.setText(str(self.x_scale))\n self.qe_y_scale.setText(str(self.y_scale))\n\n self.qe_eye_dx.setText(str(self.eye_dx))\n self.qe_eye_dy.setText(str(self.eye_dy))\n self.qe_eye_x_scale.setText(str(self.eye_x_scale))\n self.qe_eye_y_scale.setText(str(self.eye_y_scale))\n\n flo.addRow(\"skeleton dx\", self.qe_dx)\n flo.addRow(\"skeleton dy\", self.qe_dy)\n flo.addRow(\"skeleton x scale\", self.qe_x_scale)\n flo.addRow(\"skeleton y scale\", self.qe_y_scale)\n\n flo.addRow(\"eye dx\", self.qe_eye_dx)\n flo.addRow(\"eye dy\", self.qe_eye_dy)\n flo.addRow(\"eye x scale\", self.qe_eye_x_scale)\n flo.addRow(\"eye y scale\", self.qe_eye_y_scale)\n\n self.qe_frame = QtWidgets.QLineEdit()\n self.qe_frame.setValidator(QtGui.QIntValidator())\n self.qe_frame.editingFinished.connect(self._jump_frame)\n flo.addRow(\"jump to frame\", self.qe_frame)\n\n return flo\n\n def _jump_frame(self):\n frame = int(self.qe_frame.text())\n slide_value = frame*1000/30.0\n self.positionSlider.setValue(slide_value)\n\n def _update_frame_text(self, n):\n self.qe_frame.setText(str(n))\n\n def _get_new_eye_draw_object(self):\n ret = QtWidgets.QGraphicsEllipseItem()\n pen = QtGui.QPen()\n pen.setWidth(2)\n pen.setBrush(QtGui.QColor(255, 0, 0, 200))\n ret.setPen(pen)\n return ret\n\n def createUI(self):\n self.graph_view = pyqtgraph.PlotWidget(self)\n self.graph_view.setGeometry(QtCore.QRect(0, 0, 910, 300))\n self.graph_view.setXRange(-100, 100)\n self.graph_view.setYRange(0, 68)\n self.graph_layout.addWidget(self.graph_view)\n\n self.hbuttonbox = QtWidgets.QHBoxLayout()\n\n # video position slider\n self.positionSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal, self)\n self.positionSlider.setStyleSheet(\"QSlider::groove:horizontal {background-color:grey;}\"\n \"QSlider::handle:horizontal {background-color:black; height:8px; width: 8px;}\")\n self.positionSlider.sliderMoved.connect(self.setPosition)\n self.positionSlider.valueChanged.connect(self.display_gesture)\n\n # play button\n self.playbutton = QtWidgets.QPushButton(\"Play\")\n self.hbuttonbox.addWidget(self.playbutton)\n self.playbutton.clicked.connect(self.play_pause)\n\n self.button_video = QtWidgets.QPushButton(\"Hide Video\")\n self.hbuttonbox.addWidget(self.button_video)\n self.button_video.clicked.connect(self.toggle_video)\n\n self.button_audio = QtWidgets.QPushButton(\"Hide audio\")\n self.hbuttonbox.addWidget(self.button_audio)\n self.button_audio.clicked.connect(self.toggle_audio)\n\n self.button_nose = QtWidgets.QPushButton(\"Hide Nose\")\n self.hbuttonbox.addWidget(self.button_nose)\n self.button_nose.clicked.connect(self.toggle_nose)\n\n self.button_skeleton = QtWidgets.QPushButton(\"Hide Skeleton\")\n self.hbuttonbox.addWidget(self.button_skeleton)\n self.button_skeleton.clicked.connect(self.toggle_skeleton)\n\n self.button_eye = QtWidgets.QPushButton(\"Hide Eye Tracking\")\n self.hbuttonbox.addWidget(self.button_eye)\n self.button_eye.clicked.connect(self.toggle_eye)\n\n self.button_load_all = QtWidgets.QPushButton(\"Load All Data\")\n self.hbuttonbox.addWidget(self.button_load_all)\n self.button_load_all.clicked.connect(self._load_all_data)\n\n self.hbuttonbox.addStretch(1)\n self.layout.addWidget(self.positionSlider) # ,3,0,1,-1)\n self.layout.addLayout(self.hbuttonbox) # ,4,0,1,-1)\n\n self.player.setNotifyInterval(200)\n self.player.positionChanged.connect(self.updateUI)\n # self.player.positionChanged.connect(self.printTime)\n self.player.durationChanged.connect(self.setRange)\n self.player.stateChanged.connect(self.setButtonCaption)\n self.setLayout(self.layout)\n\n def _load_all_data(self):\n home = str(config.DEFAULT_DATA_DIR)\n # video\n self.flag_video = False\n if DEBUG:\n self.filename_video = config.DEFAULT_VIDEO\n else:\n self.filename_video, _ = QtWidgets.QFileDialog.getOpenFileName(None, caption=\"Open Video File\", directory=home)\n\n if self.filename_video:\n url = QtCore.QUrl.fromLocalFile(self.filename_video)\n content = QM.QMediaContent(url)\n # self.videoItem.setAspectRatioMode(1)\n self.player.setMedia(content)\n self.toggle_video() #\n\n # open gesture\n if DEBUG:\n self.filename_gesture = config.DEFAULT_GES\n else:\n self.filename_gesture, _ = QtWidgets.QFileDialog.getOpenFileName(None, \"Open Gesture File\", home)\n if self.filename_gesture:\n f = open(self.filename_gesture).readlines()\n for i in f:\n t = i.strip().split()\n self.ges_dict[int(t[1])] = self.gesture_dict[int(t[0])]\n\n # audio\n self.flag_audio = False\n if DEBUG:\n self.filename_audio = config.DEFAULT_AUDIO\n else:\n self.filename_audio, _ = QtWidgets.QFileDialog.getOpenFileName(None, \"Open Audio File\", home)\n if self.filename_audio:\n self.audio_data = pd.read_csv(self.filename_audio).as_matrix()\n self.audio_data = self.audio_data[:, 0]\n self.toggle_audio()\n\n # nose vector\n self.flag_nose = False\n if DEBUG:\n self.filename_nose = config.DEFAULT_NOSE\n else:\n self.filename_nose, _ = QtWidgets.QFileDialog.getOpenFileName(None, \"Open Nose Vector File\", home, selectedFilter='*.csv')\n if self.filename_nose:\n self.noseData = pd.read_csv(self.filename_nose).as_matrix()\n self.toggle_nose()\n\n # skeleton\n self.flag_skeleton = False\n if DEBUG:\n self.filename_skeleton = config.DEFAULT_SKELETON\n else:\n self.filename_skeleton, _ = QtWidgets.QFileDialog.getOpenFileName(None, \"Open Skeleton File\", home, selectedFilter='*.csv')\n if self.filename_skeleton:\n self.body_pts = pd.read_csv(self.filename_skeleton, header=None)\n self.toggle_skeleton()\n\n # eye tracking\n self.flag_eye = False\n if DEBUG:\n self.filename_eye = config.DEFAULT_EYE_TRACKING\n else:\n self.filename_eye, _ = QtWidgets.QFileDialog.getOpenFileName(None, \"Open Eye Tracking File\", home)\n if self.filename_eye:\n self.eye_data = load_eye_tracking(self.filename_eye, config.EYE_SELECT_COLS)\n # self.eye_data = self._shift_pts_df(self.eye_data, config.EYE_X, config.EYE_Y)\n\n self.eye_t_start = self.eye_data[config.EYE_T_START].tolist()\n self.eye_t_end = self.eye_data[config.EYE_T_END].tolist()\n self.eye_frame2index = {} # get frame to df index mapping\n index = 0\n for frame in range(max(self.eye_t_end)):\n cur_frame_start = self.eye_t_start[index]\n cur_frame_end = self.eye_t_end[index]\n if cur_frame_start <= frame < cur_frame_end:\n self.eye_frame2index[frame] = index\n elif frame >= cur_frame_end:\n index += 1\n if self.eye_data is not None:\n # generate eye tracking plot\n for index in range(self.eye_data.shape[0]):\n row = self.eye_data.loc[index, :]\n x, y, pipil_size = row[config.EYE_X], row[config.EYE_Y], row[config.EYE_PUPIL_SIZE]\n x, y = self._shift_pts_eye_fixation(x, y)\n frame_start, frame_end = row[config.EYE_T_START], row[config.EYE_T_END]\n frame_duration = frame_end - frame_start\n w = frame_duration / 2\n\n new_eye_cycle = self._get_new_eye_draw_object()\n new_eye_cycle.setRect(x - w / 2, y - w / 2, w, w)\n new_eye_cycle.setVisible(False)\n self.scene.addItem(new_eye_cycle)\n self.eye_circle_objs.append(new_eye_cycle)\n self.toggle_eye()\n self._recal_position()\n\n def _recal_position(self):\n self.dx = float(self.qe_dx.text())\n self.dy = float(self.qe_dy.text())\n self.x_scale = float(self.qe_x_scale.text())\n self.y_scale = float(self.qe_y_scale.text())\n self.eye_dx = float(self.qe_eye_dx.text())\n self.eye_dy = float(self.qe_eye_dy.text())\n self.eye_x_scale = float(self.qe_eye_x_scale.text())\n self.eye_y_scale = float(self.qe_eye_y_scale.text())\n print(\"recal\")\n if self.eye_data is not None:\n # generate eye tracking plot\n for index in range(len(self.eye_circle_objs)):\n row = self.eye_data.loc[index, :]\n x, y, pipil_size = row[config.EYE_X], row[config.EYE_Y], row[config.EYE_PUPIL_SIZE]\n x, y = self._shift_pts_eye_fixation(x, y)\n frame_start, frame_end = row[config.EYE_T_START], row[config.EYE_T_END]\n frame_duration = frame_end - frame_start\n w = frame_duration / 2\n\n new_eye_cycle = self.eye_circle_objs[index]\n new_eye_cycle.setRect(x - w / 2, y - w / 2, w, w)\n\n def _shift_pts(self, x, y):\n x += self.dx\n y += self.dy\n x *= self.x_scale\n y *= self.y_scale\n return x, y\n\n def _shift_pts_eye_fixation(self, x, y):\n x += self.eye_dx\n y += self.eye_dy\n x *= self.eye_x_scale\n y *= self.eye_y_scale\n return x, y\n\n def _get_body_pts_df(self, pts, idx):\n if pts is None: return None\n df = pts.iloc[idx * 3:idx * 3 + 2, :].transpose()\n df.columns = ['x', 'y']\n #df = df.interpolate(method='linear', axis=0).ffill().bfill()\n return df\n\n def _shift_pts_matrix(self, m):\n for i in range(len(m)):\n m[i][0], m[i][1] = self._shift_pts(m[i][0], m[i][1])\n return m\n\n def _shift_pts_df(self, df, col_x, col_y):\n #print(df.columns)\n df['x'] = df.apply(lambda x: self._shift_pts(x[col_x], x[col_y])[0], axis=1)\n df['y'] = df.apply(lambda x: self._shift_pts(x[col_x], x[col_y])[1], axis=1)\n return df\n\n def setButtonCaption(self, state):\n if self.player.state() == QM.QMediaPlayer.PlayingState:\n self.playbutton.setText(\"Pause\")\n else:\n self.playbutton.setText(\"Play\")\n\n def toggle_video(self):\n self.flag_video = self._toggle_button(self.flag_video, self.button_video, \"Video\")\n\n def toggle_audio(self):\n self.flag_audio = self._toggle_button(self.flag_audio, self.button_audio, \"Audio\")\n\n def toggle_nose(self):\n self.flag_nose = self._toggle_button(self.flag_nose, self.button_nose, \"Nose\")\n\n def toggle_skeleton(self):\n self.flag_skeleton = self._toggle_button(self.flag_skeleton, self.button_skeleton, \"Skeleton\")\n\n def toggle_eye(self):\n self.flag_eye = self._toggle_button(self.flag_eye, self.button_eye, \"Eye Tracking\")\n\n def _toggle_button(self, flag, button, text):\n flag = not flag\n if flag: button.setText(\"Hide \" + text)\n else: button.setText(\"Show \" + text)\n return flag\n\n def play_pause(self):\n # self.videoItem.setSize(QtCore.QSizeF(self.view_width, self.view_height))\n if self.player.state() == QM.QMediaPlayer.PlayingState:\n self.play_state = False\n self.player.pause()\n else:\n self.play_state = True\n self.player.play()\n\n def get_state(self):\n return QM.QMediaPlayer.PlayingState\n\n def setPosition(self, position):\n self.positionSlider.setValue(position)\n self.player.setPosition(position)\n\n def setRange(self, duration):\n self.positionSlider.setRange(0, self.player.duration())\n\n def updateUI(self, position):\n self.positionSlider.setValue(position)\n\n prev_index = -1\n KEEP_DRAW_EYE_TRACKING = True\n def _draw_eye_fixation(self, frame, is_draw=True):\n if frame not in self.eye_frame2index:\n return\n index = self.eye_frame2index[frame]\n if index == self.prev_index:\n return\n\n for i in range(index+1):\n self.eye_circle_objs[i].setVisible(is_draw)\n for i in range(index+1, len(self.eye_circle_objs)):\n self.eye_circle_objs[i].setVisible(False)\n\n self.prev_index = index\n # print(\"draw x,y: %f,%f, duration: %f\" % (x, y, frame_duration))\n return\n\n def _draw_skeleton(self, idx, is_draw=True):\n for i in range(len(config.SKELETON_LINES)):\n self.skeleton_lines[i].setVisible(is_draw)\n if not is_draw: continue\n n, m = config.SKELETON_LINES[i]\n pts1 = self._get_body_pts_df(self.body_pts, n)\n pts2 = self._get_body_pts_df(self.body_pts, m)\n x1, y1 = self._shift_pts(pts1.loc[idx, 'x'], pts1.loc[idx, 'y'])\n x2, y2 = self._shift_pts(pts2.loc[idx, 'x'], pts2.loc[idx, 'y'])\n self.skeleton_lines[i].setLine(x1, y1,\n x2, y2)\n\n def _draw_nose(self, idx, is_draw=True):\n x1, y1 = self._shift_pts(self.noseData[idx][0], self.noseData[idx][1])\n x2, y2 = self._shift_pts(self.noseData[idx][2], self.noseData[idx][3])\n if is_draw:\n self.line.setVisible(True)\n self.line.setLine(x1, y1, x2, y2)\n else:\n self.line.setVisible(False)\n\n\n def display_gesture(self):\n frame = int(self.positionSlider.value() * 30 / 1000)\n self._update_frame_text(frame)\n\n self._draw_skeleton(frame, self.flag_skeleton)\n self._draw_nose(frame, self.flag_nose)\n self._draw_eye_fixation(frame, self.flag_eye)\n\n indx1 = int(self.positionSlider.value() / 10) + 100\n indx2 = np.arange(indx1 - 100, indx1 + 101)\n self.graph_view.clearPlots()\n if self.flag_audio and self.audio_data is not None:\n self.graph_view.plot(np.arange(-100, 101), self.audio_data[indx2], pen=pyqtgraph.mkPen(width=4, color='r'))\n self.graph_view.show()\n\n if not self.flag_video:\n self.player.stop()\n\n if self.time != int(self.positionSlider.value() / 1000):\n self.time = int(self.positionSlider.value() / 1000)\n if self.time in self.ges_dict.keys():\n self.three = self.two\n self.two = self.one\n self.one = self.ges_dict[self.time] + ' ' + '
' + str(self.time) + '

'\n self.label.setText(\n \"

\" + self.one + \"\" + self.two + \"\" + self.three + \"\")\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n w = videoWidget()\n w.show()\n sys.exit(app.exec_())\n","sub_path":"python/visproj/qt5_structuralize/video_widget.py","file_name":"video_widget.py","file_ext":"py","file_size_in_byte":20656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"500818825","text":"#!/usr/bin/env python3\n\nfrom networkit import *\nimport pprint\nimport timeit\nimport argparse\nimport csv\nimport sys\nimport tempfile\nimport numpy as np\nimport subprocess\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--start', type=int, default=4)\nparser.add_argument('--end', type=int, default=6)\nparser.add_argument('--steps', type=int, default=3)\nparser.add_argument('--runlength', type=int, default=10)\nparser.add_argument('--runs', type=int, default=25)\nparser.add_argument('--maxcount', type=int, default=10**8)\nparser.add_argument('--trades', type=int, default=5)\nparser.add_argument('--output', type=str, default='data.csv')\nparser.add_argument('--const', action='store_true')\nparser.add_argument('--linear', action='store_true')\nparser.add_argument('--with_vles', type=str, default='')\n\nparser.add_argument('--with_boost', action='store_true')\nparser.add_argument('--without_boost', action='store_true')\n\nargs = parser.parse_args()\n\nboosts = []\nif not (args.without_boost or args.with_boost or args.with_vles):\n print(\"Select at least one algorithm --with_boost / --without_boost\")\n sys.exit(-1)\nelse:\n if (args.without_boost):\n boosts.append(False)\n if (args.with_boost):\n boosts.append(True)\n\nif not (args.linear or args.const):\n print(\"Select at least one scaling --linear / --const\")\n sys.exit(-1)\n\n\ndef exp10series(start, end, steps):\n return [int(10**(k/steps + start)) for k in range((end - start) * steps + 1)]\n\nconfig = []\n\nif args.const:\n const_min_deg = 50\n const_max_deg = 10000\n node_series = exp10series(args.start, args.end, args.steps)\n config.extend(list(zip(node_series,\n [const_min_deg] * len(node_series),\n [const_max_deg] * len(node_series),\n [\"const\"] * len(node_series))))\n\nif args.linear:\n const_min_deg = 10\n node_series = exp10series(args.start, args.end, args.steps)\n config.extend(list(zip(node_series,\n [const_min_deg] * len(node_series),\n [x // 20 for x in node_series],\n [\"linear\"] * len(node_series))))\n\n\nprint(\"Printing Configurations:\")\npprint.pprint(config)\n\nwith open(args.output, 'a') as out_file:\n writer = csv.writer(out_file, delimiter='\\t')\n for run in range(args.runs):\n for (num_nodes, min_deg, max_deg, scale) in config:\n print(\"[====] At configuration: (%d, %d, %d, %s)\" % (num_nodes, min_deg, max_deg, scale))\n pldgen = generators.PowerlawDegreeSequence(min_deg, max_deg, -2)\n pldgen.run()\n avgdeg = pldgen.getExpectedAverageDegree()\n # skip this setting since too much RAM will be used\n if (num_nodes * avgdeg > args.maxcount):\n continue\n\n degseq = pldgen.getDegreeSequence(num_nodes)\n hhgen = generators.HavelHakimiGenerator(degseq)\n G = hhgen.generate()\n\n print(\"[ ] Graph has %d edges\" % G.numberOfEdges())\n\n list_trades = [curveball.GlobalTradeGenerator(args.runlength, num_nodes).generate() for _ in range(args.trades)]\n\n for boost in boosts:\n print(\"[ ===] Boost: \", boost)\n algo = curveball.Curveball(G, boost)\n for r in range(args.trades):\n start_time = timeit.default_timer()\n algo.run(list_trades[r])\n end_time = timeit.default_timer()\n print(\"[ ==] Finished round %d in time %f\" % (r, end_time - start_time))\n\n writer.writerow([scale, \"emcb\" if boost else \"imcb\", r, num_nodes, min_deg, max_deg, G.numberOfEdges(), end_time - start_time])\n out_file.flush()\n\n del(algo)\n\n del(list_trades)\n\n if args.with_vles:\n with tempfile.TemporaryDirectory() as dir:\n outf = dir + \"/dist\"\n dd_raw = np.array(centrality.DegreeCentrality(G).run().scores(), dtype=int)\n dd = np.vstack(np.unique(dd_raw, return_counts=True)).T\n np.savetxt(outf, dd, fmt=\"%d %d\")\n with open(\"/dev/null\", \"w\") as fnull:\n p = subprocess.Popen([args.with_vles, \"-v\", \"-t\", \"-d\", outf], stdout=fnull, stderr=subprocess.PIPE)\n _, err = p.communicate()\n last_lines = str(err).split(\"\\\\n\")[-3:-1]\n\n assert(\"Performed : \" in last_lines[0])\n assert(\"Time used: \" in last_lines[1])\n\n performed = int(last_lines[0].split(\" \")[-2])\n time = float(last_lines[1].split(\" \")[-1])\n\n expef = args.trades * G.numberOfEdges() * 2\n print(\"Performed %d swaps (%d expected) in %f s\" % (performed, expef, time))\n assert(performed == expef)\n\n writer.writerow([scale, \"vles\", -1 * args.trades, num_nodes, min_deg, max_deg, G.numberOfEdges(), time])\n","sub_path":"experiments/benchmark_runtime.py","file_name":"benchmark_runtime.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"297882020","text":"'''\r\nCreated on 23 Jan 2014\r\n\r\n@author: MBradley\r\n'''\r\nfrom screenui.raceview import StartLineFrame,AddFleetDialog\r\nfrom model.race import RaceManager\r\nfrom screenui.audio import AudioManager\r\nfrom persistence.recovery import RaceRecoveryManager\r\n\r\nimport threading \r\nimport logging\r\nimport sys\r\n\r\nimport datetime\r\nimport tkMessageBox\r\nimport Tkinter\r\nimport Queue\r\nimport ConfigParser\r\nimport os\r\nimport pickle\r\n\r\nRACES_LIST = ['Large handicap','Small handicap','Toppers','Large and small handicap','Teras','Oppies']\r\n\r\n\r\n#\r\n# LightsController uses the EasyDaqUSBRelay to control the hardware lights. It refreshes the lights every\r\n# 500 milliseconds until all fleets have started. \r\n#\r\nclass LightsController():\r\n \r\n def __init__(self, tkRoot,easyDaqRelay,raceManager):\r\n self.tkRoot = tkRoot\r\n self.easyDaqRelay = easyDaqRelay\r\n self.raceManager = raceManager\r\n # we start assuming that our lights are off\r\n self.currentLights = [LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF]\r\n self.wireController()\r\n \r\n self.updateTimer = None\r\n \r\n def wireController(self):\r\n \r\n \r\n self.raceManager.changed.connect(\"generalRecall\",self.handleGeneralRecall)\r\n self.raceManager.changed.connect(\"sequenceStartedWithWarning\",self.handleSequenceStarted)\r\n self.raceManager.changed.connect(\"sequenceStartedWithoutWarning\",self.handleSequenceStarted)\r\n self.raceManager.changed.connect(\"startSequenceAbandoned\",self.handleStartSequenceAbandoned)\r\n \r\n \r\n \r\n \r\n def handleGeneralRecall(self,fleet):\r\n self.cancelUpdateTimer()\r\n self.updateLights()\r\n \r\n def handleSequenceStarted(self):\r\n self.cancelUpdateTimer()\r\n self.updateLights()\r\n \r\n def handleStartSequenceAbandoned(self):\r\n self.cancelUpdateTimer()\r\n self.updateLights()\r\n \r\n def cancelUpdateTimer(self):\r\n # if we have an update timer, cancel it. Note that if the update timer\r\n # has has already executed, the cancel has no effect and does not fail.\r\n if self.updateTimer:\r\n self.tkRoot.after_cancel(self.updateTimer)\r\n \r\n \r\n def calculateLightsDisplay(self):\r\n #\r\n # out default is no lights\r\n lights = [LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF]\r\n \r\n # ask for the next fleet to start\r\n nextFleetToStart = self.raceManager.nextFleetToStart()\r\n \r\n # if we have a fleet to start\r\n if nextFleetToStart:\r\n secondsToStart = -1 * nextFleetToStart.adjustedDeltaSecondsToStartTime()\r\n \r\n if secondsToStart <=300 and secondsToStart > 240:\r\n lights = [LIGHT_ON, LIGHT_ON, LIGHT_ON, LIGHT_ON, LIGHT_ON]\r\n elif secondsToStart <= 240 and secondsToStart > 180:\r\n lights = [LIGHT_ON, LIGHT_ON, LIGHT_ON, LIGHT_ON, LIGHT_OFF]\r\n elif secondsToStart <= 180 and secondsToStart > 120: \r\n lights = [LIGHT_ON, LIGHT_ON, LIGHT_ON, LIGHT_OFF, LIGHT_OFF]\r\n elif secondsToStart <= 120and secondsToStart > 60: \r\n lights = [LIGHT_ON, LIGHT_ON, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF]\r\n elif secondsToStart <= 60 and secondsToStart > 30: \r\n lights = [LIGHT_ON, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF]\r\n elif secondsToStart <= 30 and (int(secondsToStart) % 2 == 0):\r\n lights = [LIGHT_ON, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF]\r\n else:\r\n lights = [LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF]\r\n \r\n return lights\r\n \r\n \r\n \r\n \r\n def updateLights(self):\r\n \r\n newLights = self.calculateLightsDisplay()\r\n \r\n if newLights != self.currentLights:\r\n self.easyDaqRelay.sendRelayCommand(newLights)\r\n self.currentLights = newLights\r\n \r\n # check that we still have a fleet to start, if so,\r\n # calculate the time until our next change\r\n \r\n if self.raceManager.nextFleetToStart():\r\n # make sure we update idle tasks so that the screen updates. This is particularly important in speedy mode\r\n self.tkRoot.update_idletasks()\r\n \r\n self.updateTimer = self.tkRoot.after(500, self.updateLights)\r\n \r\n # if we don't have a race to start any more, set our lights to 0 and don't update ourselves again\r\n else:\r\n self.easyDaqRelay.sendRelayCommand([LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF])\r\n \r\n \r\n \r\n def start(self):\r\n self.easyDaqRelay.start() \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n#\r\n# GunController uses the AudioManager to play a Wav file as the race \"gun\".\r\n# It does this in response to events from the race manager when races change\r\n# during the start sequence or when boats finish. It uses the Tk root\r\n# to provide an event scheduler. \r\n#\r\nclass GunController():\r\n \r\n def __init__(self, tkRoot, audioManager, raceManager):\r\n self.tkRoot = tkRoot\r\n self.audioManager = audioManager\r\n self.raceManager = raceManager\r\n self.scheduledGuns = []\r\n self.wireController()\r\n \r\n #\r\n # We wire the controller by registering with the race manager\r\n # for the events we are interested in\r\n # \r\n def wireController(self):\r\n self.raceManager.changed.connect(\"sequenceStartedWithWarning\",self.handleSequenceStartedWithWarning)\r\n self.raceManager.changed.connect(\"sequenceStartedWithoutWarning\",self.handleSequenceStartedWithoutWarning)\r\n self.raceManager.changed.connect(\"generalRecall\",self.handleGeneralRecall)\r\n self.raceManager.changed.connect(\"startSequenceAbandoned\",self.handleStartSequenceAbandoned)\r\n self.raceManager.changed.connect(\"finishAdded\", self.handleFinishAdded)\r\n \r\n \r\n def fireGun(self):\r\n self.audioManager.queueClip(\"gun\")\r\n \r\n \r\n def soundWarning(self):\r\n self.audioManager.queueClip(\"warning\")\r\n \r\n #\r\n # millis is the time of the gun. The warning beeps are for the ten secoonds prior to the gun\r\n #\r\n def scheduleWarningBeeps(self,gunMillis,finalWarning=False):\r\n for warningMillis in range(gunMillis-10000, gunMillis, 1000):\r\n self.addSchedule(self.tkRoot.after(warningMillis, self.soundWarning))\r\n # if we give a final warning instead of a gun, schedule this\r\n if finalWarning:\r\n self.addSchedule(self.tkRoot.after(gunMillis, self.soundWarning))\r\n \r\n \r\n def scheduleGun(self,millis):\r\n logging.log(logging.DEBUG,\"Scheduling gun for %d \" % millis)\r\n scheduleId = self.tkRoot.after(millis, self.fireGun)\r\n \r\n self.addSchedule(scheduleId)\r\n \r\n def addSchedule(self,scheduleId):\r\n self.scheduledGuns.append(scheduleId)\r\n \r\n def cancelSchedules(self):\r\n for aSchedule in self.scheduledGuns:\r\n self.tkRoot.after_cancel(aSchedule)\r\n self.scheduledGuns = []\r\n \r\n \r\n \r\n def scheduleGunForFleetStart(self,aFleet, secondsBefore):\r\n # calculate seconds to start of fleet\r\n # convert negative seconds to start to positive \r\n secondsToStart = aFleet.deltaSecondsToStartTime() * -1\r\n \r\n \r\n # check that the fleet is still in the future (for example if we are debugging)\r\n if secondsToStart > 0:\r\n \r\n #\r\n # to calculate the seconds to gun, we take the seconds to start \r\n # and subtract the requested seconds before divided by the test speed ratio.\r\n #\r\n # For example, with a test speed ratio of 5, the seconds to start for the\r\n # first race with an F flag start will be 600 / 5 = 120 seconds.\r\n #\r\n # For the five minute (300 seconds) gun, the calculation is:\r\n # 120 - (300/5) = 60 seconds.\r\n #\r\n # For the four minute gun (240 seconds) gun, the calculation is:\r\n # 120 - (240/5) = 72 seconds\r\n #\r\n secondsToGun = secondsToStart - secondsBefore / RaceManager.testSpeedRatio\r\n logging.info(\"Seconds to start: %d, scheduling gun for %d seconds\" % (secondsToStart,secondsToGun))\r\n gunMillis = int(1000*secondsToGun )\r\n self.scheduleWarningBeeps(gunMillis)\r\n \r\n self.scheduleGun(gunMillis)\r\n \r\n \r\n \r\n \r\n \r\n #\r\n # For a sequence start, we fire the gun then schedule our other guns. We ask the race manager to\r\n # adjust our start seconds to reflect if we have speedup the start for testing purposes.\r\n #\r\n def handleSequenceStartedWithWarning(self):\r\n # schedule ten second countdown\r\n self.scheduleWarningBeeps(10000)\r\n # schedule gun for ten seconds\r\n self.scheduleGun(10000)\r\n \r\n #\r\n # schedule beeps for F flag down in 4 minutes time\r\n #\r\n fFlagDownMillis = 10000 + (4 * 60000) / RaceManager.testSpeedRatio\r\n self.scheduleWarningBeeps(fFlagDownMillis, finalWarning=True)\r\n \r\n \r\n # schedule guns for the first fleet\r\n \r\n self.scheduleGunForFleetStart(self.raceManager.fleets[0],300)\r\n \r\n # schedule guns for future fleets\r\n \r\n self.scheduleGunsForFutureFleetStarts()\r\n \r\n def handleFinishAdded(self,aFinish):\r\n self.fireGun()\r\n \r\n def handleSequenceStartedWithoutWarning(self):\r\n # fire a gun straight away\r\n self.fireGun()\r\n \r\n self.scheduleGunsForFutureFleetStarts()\r\n \r\n def handleGeneralRecall(self,aFleet):\r\n self.fireGun()\r\n self.fireGun()\r\n self.cancelSchedules()\r\n self.scheduleGunsForFutureFleetStarts()\r\n \r\n def handleStartSequenceAbandoned(self):\r\n self.cancelSchedules()\r\n \r\n \r\n def scheduleGunsForFutureFleetStarts(self):\r\n #\r\n # iterate over all of the fleet. If the fleet is not started, schedule the guns\r\n #\r\n for aFleet in self.raceManager.fleets:\r\n if not aFleet.isStarted() :\r\n self.scheduleGunForFleetStart(aFleet,240)\r\n self.scheduleGunForFleetStart(aFleet,60)\r\n self.scheduleGunForFleetStart(aFleet,0)\r\n \r\n \r\n \r\n \r\n \r\nclass ScreenController():\r\n pass\r\n\r\n def __init__(self,startLineFrame,raceManager,audioManager,easyDaqRelay,recoveryManager):\r\n self.startLineFrame = startLineFrame\r\n self.raceManager = raceManager\r\n self.audioManager = audioManager\r\n self.easyDaqRelay = easyDaqRelay\r\n self.recoveryManager = recoveryManager\r\n \r\n self.selectedFleet = None \r\n self.selectedFinish = None\r\n \r\n self.fleetButtons=[]\r\n self.buildFleetManagerView()\r\n \r\n \r\n self.wireController()\r\n self.disableButtons()\r\n \r\n \r\n def disableButtons(self):\r\n self.startLineFrame.disableRemoveFleetButton()\r\n self.startLineFrame.disableAbandonStartRaceSequenceButton()\r\n\r\n def wireController(self):\r\n self.raceManager.changed.connect(\"fleetAdded\",self.handleFleetAdded)\r\n self.raceManager.changed.connect(\"fleetRemoved\",self.handleFleetRemoved)\r\n self.raceManager.changed.connect(\"fleetChanged\",self.handleFleetChanged)\r\n self.raceManager.changed.connect(\"finishAdded\",self.handleFinishAdded)\r\n self.raceManager.changed.connect(\"finishChanged\",self.handleFinishChanged)\r\n self.raceManager.changed.connect(\"sequenceStartedWithWarning\",self.handleSequenceStarted)\r\n self.raceManager.changed.connect(\"sequenceStartedWithoutWarning\",self.handleSequenceStarted)\r\n \r\n #\r\n # Need to change this from event based to refreshing as part of the update loop\r\n #\r\n if self.easyDaqRelay:\r\n self.easyDaqRelay.changed.connect(\"connectionStateChanged\",self.handleConnectionStateChanged)\r\n \r\n self.startLineFrame.addFleetButton.config(command=self.addFleetClicked)\r\n self.startLineFrame.removeFleetButton.config(command=self.removeFleetClicked)\r\n self.startLineFrame.fleetsTreeView.bind(\"<>\",self.fleetSelectionChanged)\r\n self.startLineFrame.finishTreeView.bind(\"<>\",self.finishSelectionChanged)\r\n self.startLineFrame.startRaceSequenceWithWarningButton.config(command=self.startRaceSequenceWithWarningClicked)\r\n self.startLineFrame.startRaceSequenceWithoutWarningButton.config(command=self.startRaceSequenceWithoutWarningClicked)\r\n self.startLineFrame.generalRecallButton.config(command=self.generalRecallClicked)\r\n self.startLineFrame.gunButton.config(command=self.gunClicked)\r\n self.startLineFrame.gunAndFinishButton.config(command=self.gunAndFinishClicked)\r\n self.startLineFrame.abandonStartRaceSequenceButton.config(command=self.abandonStartRaceSequenceClicked)\r\n self.startLineFrame.master.protocol(\"WM_DELETE_WINDOW\",self.exitClicked)\r\n \r\n \r\n \r\n def buildFleetManagerView(self):\r\n # we build our tree\r\n \r\n for fleet in self.raceManager.fleets:\r\n self.appendFleetToTreeView(fleet)\r\n \r\n \r\n def appendFleetToTreeView(self,aFleet):\r\n self.startLineFrame.fleetsTreeView.insert(\r\n parent=\"\",\r\n index=\"end\",\r\n iid = aFleet.fleetId,\r\n text = aFleet.name,\r\n values=(self.renderDeltaToStartTime(aFleet),aFleet.status())) \r\n \r\n def showAddFleetDialog(self):\r\n dlg = AddFleetDialog(self.startLineFrame,RACES_LIST)\r\n # ... build the window ...\r\n \r\n ## Set the focus on dialog window (needed on Windows)\r\n dlg.top.focus_set()\r\n ## Make sure events only go to our dialog\r\n dlg.top.grab_set()\r\n ## Make sure dialog stays on top of its parent window (if needed)\r\n dlg.top.transient(self.startLineFrame)\r\n # set the position to be relative to the parent\r\n dlg.top.geometry(\"+%d+%d\" % (self.startLineFrame.winfo_rootx()+50,\r\n self.startLineFrame.winfo_rooty()+50))\r\n ## Display the window and wait for it to close\r\n dlg.top.wait_window()\r\n return dlg.fleetName\r\n \r\n def addFleetClicked(self):\r\n fleetName = self.showAddFleetDialog()\r\n \r\n if fleetName:\r\n self.raceManager.createFleet(fleetName)\r\n self.updateButtonStates()\r\n \r\n def removeFleetClicked(self):#\r\n # check we have a selected fleet\r\n if self.selectedFleet:\r\n self.raceManager.removeFleet(self.selectedFleet)\r\n self.updateButtonStates()\r\n \r\n def startRaceSequenceWithWarningClicked(self):\r\n self.raceManager.startRaceSequenceWithWarning()\r\n self.updateButtonStates()\r\n \r\n \r\n def startRaceSequenceWithoutWarningClicked(self):\r\n self.raceManager.startRaceSequenceWithoutWarning()\r\n self.updateButtonStates()\r\n \r\n \r\n def generalRecallClicked(self):\r\n result = tkMessageBox.askquestion(\"General Recall\",\"Are you sure?\", icon=\"warning\")\r\n if result == 'yes':\r\n self.raceManager.generalRecall()\r\n self.updateButtonStates()\r\n \r\n def gunClicked(self):\r\n self.audioManager.queueClip(\"gun\")\r\n\r\n\r\n def abandonStartRaceSequenceClicked(self):\r\n result = tkMessageBox.askquestion(\"Abandon race sequence\",\"Are you sure?\", icon=\"warning\")\r\n if result == 'yes':\r\n self.raceManager.abandonStartSequence()\r\n self.updateButtonStates()\r\n \r\n \r\n def fleetSelectionChanged(self,event):\r\n item = self.startLineFrame.fleetsTreeView.selection()[0]\r\n \r\n self.selectedFleet = self.raceManager.fleetWithId(item)\r\n \r\n logging.debug(\"User has selected %s\" % str(self.selectedFleet))\r\n self.updateButtonStates()\r\n \r\n def finishSelectionChanged(self,event):\r\n item = self.startLineFrame.finishTreeView.selection()[0]\r\n self.selectedFinish = self.raceManager.finishWithId(item)\r\n self.updateButtonStates()\r\n \r\n def gunAndFinishClicked(self):\r\n logging.debug(\"Gun and finish clicked\")\r\n self.raceManager.createFinish()\r\n \r\n def handleFleetAdded(self,aFleet):\r\n self.appendFleetToTreeView(aFleet)\r\n self.updateButtonStates()\r\n \r\n \r\n def handleFleetRemoved(self,aFleet):\r\n self.startLineFrame.fleetsTreeView.delete(aFleet.fleetId)\r\n self.selectedFleet=None\r\n self.updateButtonStates()\r\n \r\n \r\n def handleFleetChanged(self,aFleet):\r\n pass\r\n \r\n def handleFinishAdded(self,aFinish):\r\n self.appendFinishToFinishTreeView(aFinish)\r\n \r\n \r\n def handleFinishChanged(self,aFinish):\r\n # update the GUI for a finish\r\n self.startLineFrame.finishTreeView.item(aFinish.finishId,\r\n values=(self.renderFinishFleet(aFinish),self.renderFinishElapsedTime(aFinish)))\r\n \r\n def buildFinishView(self):\r\n # we build our tree\r\n \r\n for finish in self.raceManager.finishes:\r\n self.appendFinishToFinishTreeView(finish)\r\n \r\n #\r\n # When the sequence starts, we create our fleet buttons\r\n #\r\n def handleSequenceStarted(self):\r\n self.createFleetButtons()\r\n \r\n def createFleetButtons(self):\r\n for i in range(len(self.raceManager.fleets)):\r\n fleet = self.raceManager.fleets[i]\r\n buttonText = fleet.name.replace(\" \",\"\\n\")\r\n fleetButton = self.startLineFrame.createFleetButton(buttonText,i)\r\n \r\n # we're creating multiple lambdas within the same namespace.\r\n # This workaround comes from http://stackoverflow.com/questions/4236182/generate-tkinter-buttons-dynamically\r\n \r\n fleetButton.configure(command=lambda fleet=fleet: self.handleFleetButtonClickedForFleet(fleet=fleet))\r\n self.fleetButtons.append(fleetButton)\r\n \r\n \r\n def enableFleetButtons(self):\r\n for button in self.fleetButtons:\r\n button['state'] = Tkinter.NORMAL\r\n \r\n def disableFleetButtons(self):\r\n for button in self.fleetButtons:\r\n button['state'] = Tkinter.DISABLED\r\n \r\n \r\n def handleFleetButtonClickedForFleet(self,fleet):\r\n logging.info(\"Fleet button \" + fleet.name + \" clicked\")\r\n if self.selectedFinish:\r\n self.selectedFinish.fleet = fleet\r\n self.raceManager.updateFinish(self.selectedFinish)\r\n self.selectFinishInTreeView(self.nextFinishWithoutFleetAfter(self.selectedFinish))\r\n \r\n def nextFinishWithoutFleetAfter(self,finish):\r\n indexOfFinish = self.raceManager.finishes.index(finish)\r\n for i in range(indexOfFinish+1,len(self.raceManager.finishes)):\r\n if not self.raceManager.finishes[i].hasFleet():\r\n return self.raceManager.finishes[i]\r\n \r\n return None\r\n #\r\n def appendFinishToFinishTreeView(self,aFinish):\r\n finishItem = self.startLineFrame.finishTreeView.insert(\r\n parent=\"\",\r\n index=\"end\",\r\n iid = aFinish.finishId,\r\n text = self.renderFinishTime(aFinish),\r\n values=(self.renderFinishFleet(aFinish),self.renderFinishElapsedTime(aFinish)))\r\n \r\n # the call up update_idletasks is needed to make sure that the\r\n # treeview is fully populated. Without this line, on Active Python 2.7.2.5\r\n # the scroll to the bottom only works every other item. \r\n self.startLineFrame.update_idletasks()\r\n self.startLineFrame.finishTreeView.see(finishItem)\r\n \r\n #\r\n # if we don't already have a selected finish, \r\n # or select the fleet just added\r\n #\r\n if not self.selectedFinish or self.selectedFinish.hasFleet():\r\n self.selectFinishInTreeView(aFinish)\r\n \r\n \r\n #\r\n # This isn't quite right. \r\n #\r\n \r\n def selectFinishInTreeView(self,aFinish):\r\n # if we do have a finish\r\n if aFinish:\r\n self.startLineFrame.finishTreeView.selection_set(aFinish.finishId)\r\n self.selectedFinish = aFinish\r\n self.enableFleetButtons()\r\n else:\r\n # if we don't have a finish\r\n selectedItems = self.startLineFrame.finishTreeView.selection()\r\n self.startLineFrame.finishTreeView.selection_set(selectedItems)\r\n self.updateButtonStates()\r\n \r\n #\r\n # Render the fleet of a finish\r\n #\r\n def renderFinishFleet(self,aFinish):\r\n # if our finish has a fleet, return the name of the fleet\r\n if aFinish.fleet:\r\n return aFinish.fleet.name\r\n else:\r\n return \"-\"\r\n \r\n #\r\n # Render the finish time. This is the clock time of the finish\r\n #\r\n def renderFinishTime(self,finish):\r\n return finish.finishTime.strftime(\"%H:%M:%S\")\r\n \r\n def renderFinishElapsedTime(self,finish):\r\n # if we have a fleet, calculate the delta from the finish time to the \r\n # start time of the fleet.\r\n if finish.hasFleet():\r\n \r\n \r\n \r\n return str(int(finish.elapsedFinishTimeDelta().total_seconds()))\r\n #\r\n # if we don't have a fleet, we can't calculate the elapsed time\r\n #\r\n else:\r\n \r\n return \"-\"\r\n \r\n \r\n #\r\n # event handler for the connection state of the easyDaqRelay changing\r\n #\r\n def handleConnectionStateChanged(self,state):\r\n # update the Tk string variable with the session state description\r\n # from the EasyDaq relay object\r\n self.startLineFrame.after(0, self.updateSessionStateDescription)\r\n \r\n def updateSessionStateDescription(self):\r\n while self.easyDaqRelay.sessionStateDescriptionQueue.qsize():\r\n try:\r\n message = self.easyDaqRelay.sessionStateDescriptionQueue.get_nowait()\r\n self.startLineFrame.connectionStatus.set(message)\r\n except Queue.Empty:\r\n # this should never happen. \r\n message = \"Lights: No message available\"\r\n \r\n \r\n #\r\n # Calculate the integer adjusted seconds to start time. This is counter-intuitive: the\r\n # effect of the int function is to subtract 1 second almost all of the time. If the\r\n # result is 1.99999 seconds, int will reduce to 1. So we add 1 second\r\n # to the float value. This reflects the behaviour\r\n # of a regular clock. On a countdown, we show the time as 2 seconds until it is\r\n # exactly 1 second.\r\n #\r\n def integerAdjustedDeltaSecondsToFleetStartTime(self,aFleet):\r\n return int(aFleet.adjustedDeltaSecondsToStartTime()-1)\r\n \r\n def renderDeltaToStartTime(self, aFleet):\r\n if aFleet.hasStartTime():\r\n deltaToStartTimeSeconds = int(self.integerAdjustedDeltaSecondsToFleetStartTime(aFleet))\r\n \r\n hmsString = str(datetime.timedelta(seconds=(abs(deltaToStartTimeSeconds))))\r\n \r\n if deltaToStartTimeSeconds < 0:\r\n return \"-\" + hmsString \r\n else:\r\n return hmsString\r\n \r\n else:\r\n return \"-\"\r\n \r\n \r\n \r\n def renderDeltaSecondsToStartTime(self, aFleet):\r\n if aFleet.hasStartTime():\r\n return self.integerAdjustedDeltaSecondsToFleetStartTime(aFleet)\r\n \r\n \r\n \r\n else:\r\n return \"-\"\r\n \r\n \r\n def refreshFleetsView(self):\r\n #\r\n # iterate over all of our fleets. Read the start time delta and\r\n # and status, and update the fleetsTreeView with their values\r\n #\r\n \r\n for aFleet in self.raceManager.fleets:\r\n \r\n self.startLineFrame.fleetsTreeView.item(\r\n aFleet.fleetId,\r\n \r\n values=[self.renderDeltaToStartTime(aFleet), self.renderDeltaSecondsToStartTime(aFleet),aFleet.status()])\r\n \r\n \r\n \r\n #\r\n # Ask our race manager if we have a started fleet\r\n #\r\n if self.raceManager.hasStartedFleet():\r\n self.startLineFrame.enableGeneralRecallButton()\r\n else:\r\n self.startLineFrame.disableGeneralRecallButton()\r\n \r\n \r\n #\r\n # Update our clock\r\n #\r\n self.startLineFrame.clockStringVar.set(datetime.datetime.now().strftime(\"%H:%M:%S\"))\r\n \r\n #\r\n # Update the connection status\r\n #\r\n self.updateSessionStateDescription()\r\n \r\n #\r\n # Update the wav file queue depth\r\n #\r\n self.updateGunQueueLength()\r\n \r\n #\r\n # Schedule to update this view again in 250 milliseonds\r\n #\r\n self.startLineFrame.after(250, self.refreshFleetsView)\r\n \r\n \r\n #\r\n # This method enables and disables buttons. Call it after handling a button event\r\n #\r\n def updateButtonStates(self):\r\n #\r\n # Logic for enabling and disabling buttons\r\n # \r\n if self.raceManager.hasSequenceStarted() or self.raceManager.hasStartedFleet(): \r\n \r\n self.startLineFrame.enableAbandonStartRaceSequenceButton()\r\n self.startLineFrame.disableAddFleetButton()\r\n self.startLineFrame.disableRemoveFleetButton()\r\n self.startLineFrame.disableStartRaceSequenceWithoutWarningButton()\r\n self.startLineFrame.disableStartRaceSequenceWithWarningButton()\r\n else:\r\n self.startLineFrame.enableAddFleetButton()\r\n self.startLineFrame.disableAbandonStartRaceSequenceButton()\r\n \r\n \r\n if self.raceManager.hasFleets():\r\n \r\n \r\n \r\n self.startLineFrame.enableStartRaceSequenceWithoutWarningButton()\r\n self.startLineFrame.enableStartRaceSequenceWithWarningButton()\r\n if self.selectedFleet:\r\n self.startLineFrame.enableRemoveFleetButton()\r\n else:\r\n self.startLineFrame.disableRemoveFleetButton()\r\n else:\r\n self.startLineFrame.disableRemoveFleetButton()\r\n self.startLineFrame.disableStartRaceSequenceWithoutWarningButton()\r\n self.startLineFrame.disableStartRaceSequenceWithWarningButton()\r\n \r\n \r\n if self.selectedFinish:\r\n self.enableFleetButtons()\r\n else:\r\n self.disableFleetButtons()\r\n #\r\n # start the controller. Every 500 milliseconds we refresh the start time and the status\r\n # of the race manager \r\n #\r\n def start(self):\r\n # if we have recovered, we need to build our finish view and our fleet buttons\r\n self.buildFinishView()\r\n self.createFleetButtons()\r\n \r\n self.startLineFrame.after(500, self.refreshFleetsView)\r\n \r\n #\r\n # The gun queue has changed. Update the UI to show the length of the gun queue\r\n #\r\n def updateGunQueueLength(self):\r\n \r\n self.startLineFrame.gunQueueCount.set(\"Gun Q : %d \" % self.audioManager.queueLength())\r\n\r\n\r\n def exitClicked(self):\r\n result = tkMessageBox.askquestion(\"Exit\",\"Are you sure?\", icon=\"warning\")\r\n if result == 'yes':\r\n self.shutdown()\r\n \r\n def shutdown(self):\r\n \r\n logging.info(\"Shutting down\")\r\n self.easyDaqRelay.sendRelayCommand([LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF, LIGHT_OFF])\r\n self.easyDaqRelay.stop()\r\n \r\n # delete our recovery file if we have one\r\n if self.recoveryManager:\r\n self.recoveryManager.stop()\r\n \r\n # and then quit after a second\r\n self.startLineFrame.after(1000,self.startLineFrame.master.quit)\r\n\r\n#\r\n# to manage our sub-process, we must ensure that our main is only invoked once, here. Otherewise\r\n# the subprocess will also invoke this code.\r\n#\r\nif __name__ == '__main__':\r\n \r\n \r\n configFilename = sys.argv[1] \r\n sys.stderr.write(\"Reading config from %s\" % configFilename) \r\n \r\n \r\n config = ConfigParser.ConfigParser()\r\n\r\n config.read(configFilename)\r\n \r\n loglevel= config.get(\"Logging\",\"level\")\r\n logfilename = config.get(\"Logging\",\"filename\")\r\n \r\n logging.basicConfig(\r\n level=getattr(logging, loglevel.upper()),\r\n format = \"%(levelname)s:%(asctime)-15s %(message)s\",\r\n filename = logfilename)\r\n \r\n \r\n if config.get(\"Lights\",\"enabled\") == 'Y':\r\n comPort = config.get(\"Lights\",\"comPort\")\r\n logging.info(\"Lights enabled on COM port %s\" % comPort)\r\n else:\r\n logging.info(\"Lights not enabled\")\r\n \r\n \r\n if config.get(\"Training\",\"trainingMode\") =='Y':\r\n testSpeedRatio = config.getint(\"Training\",\"trainingSpeed\")\r\n logging.info(\"Running in training mode at speed %i\" % testSpeedRatio)\r\n else:\r\n testSpeedRatio = 1\r\n logging.info(\"Running in race mode at standard speed\")\r\n \r\n \r\n #\r\n # config.items returns a list of (name,value) pairs.\r\n # In the Audio section, this is clipname,wavFilename\r\n #\r\n \r\n audioClips = config.items(\"Audio\")\r\n \r\n \r\n \r\n backgroundColour = config.get(\"UserInterface\",\"backgroundColour\") \r\n app = StartLineFrame(backgroundColour=backgroundColour) \r\n #\r\n # Check for a recovery file. If we have one, ask if we want to recover our race manager\r\n #\r\n recoveryFilename = config.get(\"Persistence\",\"recoveryFilename\") \r\n if recoveryFilename:\r\n if os.path.exists(config.get(\"Persistence\",\"recoveryFilename\")):\r\n if tkMessageBox.askyesno(\"Crash detected\",\"Do you want to recover?\", icon=\"warning\"):\r\n raceManager = pickle.load(open(recoveryFilename))\r\n else:\r\n raceManager = RaceManager()\r\n else:\r\n raceManager = RaceManager()\r\n else:\r\n raceManager = RaceManager()\r\n \r\n if testSpeedRatio:\r\n RaceManager.testSpeedRatio = testSpeedRatio\r\n logging.info(\"Setting test speed ratio to %d\" % testSpeedRatio)\r\n easyDaqRelay = None\r\n \r\n if comPort: \r\n from lightsui.hardware import LIGHT_OFF, LIGHT_ON, EasyDaqUSBRelay\r\n \r\n easyDaqRelay = EasyDaqUSBRelay(comPort)\r\n relayThread = threading.Thread(target = easyDaqRelay.run)\r\n # run as a background thread. Allow application to end even if this thread is still running.\r\n relayThread.daemon = True\r\n \r\n # the audio manager runs in its own thread \r\n audioManager = AudioManager(audioClips) \r\n audioThread = threading.Thread(target = audioManager.run)\r\n audioThread.daemon = True\r\n \r\n \r\n recoveryManager = None\r\n if config.get(\"Persistence\",\"recoveryFilename\"):\r\n recoveryManager = RaceRecoveryManager(config.get(\"Persistence\",\"recoveryFilename\"),raceManager)\r\n raceManager.changed.connect(None,recoveryManager.handleRaceManagerChanged)\r\n recoveryThread = threading.Thread(target = recoveryManager.run)\r\n recoveryThread.daemon = True\r\n recoveryThread.start()\r\n screenController = ScreenController(app,raceManager,audioManager,easyDaqRelay, recoveryManager)\r\n gunController = GunController(app, audioManager, raceManager)\r\n # check if a recovered raceManager has a started sequence. If so, schedule guns.\r\n # note, this does not recover the F flag up beeps and gun nor F flag down beeps\r\n if raceManager.hasSequenceStarted():\r\n gunController.scheduleGunsForFutureFleetStarts()\r\n \r\n \r\n logging.info(\"Starting screen controller\") \r\n screenController.start()\r\n \r\n if comPort:\r\n lightsController = LightsController(app, easyDaqRelay, raceManager)\r\n logging.info(\"Starting lights controller\") \r\n relayThread.start()\r\n audioThread.start()\r\n app.master.title('Startline') \r\n app.mainloop() ","sub_path":"HHSCStartLine/src/controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":32558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"126117615","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os,sys\nimport argparse\nimport time\n\nimport numpy as np\n\nimport chainer\nfrom chainer import computational_graph\nfrom chainer import cuda, Variable\nimport chainer.links as L\nimport chainer.optimizers as O\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', '-g', default=-1, type=int,\n help='GPU ID (negative value indicates CPU)')\n\nparser.add_argument('--test', '-t', choices=['simple', 'mnist'],\n default='mnist',\n help='test type (\"simple\", \"mnist\")')\n\nargs = parser.parse_args()\nif args.gpu >= 0:\n cuda.check_cuda_available()\nxp = cuda.cupy if args.gpu >= 0 else np\n\nxp.seterr(all=\"ignore\")\n\ndef sigmoid(x):\n return 1. / (1 + xp.exp(-x))\n\ndef add_noise(x, p=0.2):\n x = xp.copy(x)\n\n for xi in x:\n perm = xp.random.permutation(len(xi))\n tmp = xp.random.choice(perm[:int(p*len(perm))])\n perm = perm[tmp:tmp+p*len(perm)]\n xi[perm] = 0\n\n return x\n\ndef sample(x):\n return xp.array(255*x, xp.int16)\n\ndef plot(datas, labels, perm=None):\n import pylab\n \n if perm is None:\n p = xp.random.random_integers(0, len(datas), 25)\n else:\n p = perm[:25]\n \n for index, i in enumerate(p):\n data = datas[i]\n label = labels[i]\n pylab.subplot(5, 5, index + 1)\n pylab.axis('off')\n pylab.imshow(data.reshape(28, 28), cmap=pylab.cm.gray_r, interpolation='nearest')\n pylab.title('%i' % label)\n pylab.show()\n\ndef plot_error(loss, loss_func=\"MSE\"):\n import matplotlib.pyplot as plt\n plt.figure(figsize=(8,6))\n plt.plot(range(len(loss)), loss, lw=2)\n plt.legend([\"train\"], loc=1)\n plt.xlabel(\"epoch\")\n plt.ylabel(loss_func)\n plt.show()\n\nclass RBM(object):\n def __init__(self, n_visible=2, n_hidden=3, \\\n W=None, c=None, b=None, s=None, z=None, xp_rng=None, \\\n mode=\"Bernoulli\", sample_flag=1, pcd_flag=0, update_variance=False, \\\n winit=\"normal\", scale=0.001):\n\n self.n_visible = n_visible\n self.n_hidden = n_hidden\n self.mode = mode\n self.sample_flag = sample_flag\n self.pcd_flag = pcd_flag\n\n if xp_rng is None:\n xp_rng = xp.random.RandomState(123)\n\n if W is None:\n if winit == \"uniform\":\n a = 1. / n_visible\n initial_W = xp.array(xp_rng.uniform(\n low=-a,\n high=a,\n size=(n_visible, n_hidden)))\n elif winit == \"normal\":\n initial_W = scale * xp_rng.randn(n_visible, n_hidden)\n\n W = initial_W\n\n if c is None:\n c = xp.zeros(n_hidden)\n\n if b is None:\n b = xp.zeros(n_visible)\n \n if s is None:\n s = xp.ones(n_visible)\n\n if z is None:\n z = xp.zeros(n_visible)\n\n self.xp_rng = xp_rng\n self.W = np.array(W)\n self.c = np.array(c)\n self.b = np.array(b)\n self.z = np.array(z)\n self.s = np.array(s)\n\n self.dW = xp.zeros(shape=(n_visible, n_hidden))\n self.db = xp.zeros(n_visible)\n self.dc = xp.zeros(n_hidden)\n self.dz = xp.zeros(n_visible)\n\n self.update_variance = update_variance\n\n def update(self, v_data, v_prev_data=None, limit=1e-4):\n lr = self.lr\n\n if self.pcd_flag == 0:\n v_prev_data = v_data\n elif self.pcd_flag ==1 and v_prev_data is None:\n v_prev_data = v_data\n\n h1 = self.propup(v_data)\n v1 = v_data\n _, v2, h2 = self.contrastive_divergence(v_prev_data, self.k)\n\n if self.mode == \"Bernoulli\":\n dW = (xp.dot(v1.T, h1) - xp.dot(v2.T, h2)) / self.batchsize\n db = xp.mean(v1 - v2, axis=0)\n dc = xp.mean(h1 - h2, axis=0)\n elif self.mode == \"Gaussian\":\n vz1 = 0.5*xp.square(v1 - self.b) - xp.dot(h1, self.W.T)*v1\n vz2 = 0.5*xp.square(v2 - self.b) - xp.dot(h2, self.W.T)*v2\n\n dW = (xp.dot((v1/self.s).T, h1) - xp.dot((v2/self.s).T, h2)) / self.batchsize\n db = xp.mean(v1/self.s - v2/self.s, axis=0)\n dc = xp.mean(h1 - h2, axis=0)\n \n if self.update_variance == True:\n dz = xp.mean(vz1/self.s - vz2/self.s, axis=0)\n self.z += (lr/self.div_lr) * dz + self.momentum * self.dz\n self.s = xp.maximum(xp.exp(self.z), limit)\n self.dz = dz\n\n self.W += lr * (dW - self.decay * self.W) + self.momentum * self.dW\n self.b += lr * db + self.momentum * self.db\n self.c += lr * dc + self.momentum * self.dc\n\n self.dW = dW\n self.db = db\n self.dc = dc\n\n def train(self, input, n_epoch=50, batchsize=10, k=1, lr=0.1, cost=\"reconstruction error\", momentum=0.0, decay=0.0, div_lr=1.):\n self.k = k\n self.lr = lr\n self.momentum = momentum\n self.decay = decay\n self.batchsize = batchsize\n self.div_lr = div_lr\n\n V_train = input\n N = V_train.shape[0]\n v_prev_data = None\n loss = []\n\n if cost == \"free energy\":\n get_cost = self.get_cost\n elif cost == \"reconstruction cross entropy\":\n get_cost = self.get_reconstruction_cross_entropy\n elif cost == \"reconstruction error\":\n get_cost = self.get_reconstruction_error\n\n print(\"Start training...\")\n \n for epoch in range(0, n_epoch):\n sys.stdout.flush()\n perm = xp.random.permutation(N)\n for i in range(0, N, batchsize):\n v_data = xp.asarray(V_train[perm[i:i+batchsize]])\n self.update(v_data, v_prev_data)\n v_prev_data = v_data\n sys.stdout.flush()\n\n l = get_cost(V_train)\n loss.append(l)\n\n print(\"Done\")\n\n return loss\n \n def contrastive_divergence(self, v_data, k):\n v1 = v_data\n \n h1, hs = self.sample_h_given_v(v1)\n chain_start = h1\n\n for step in range(k):\n if step == 0:\n v2 = self.propdown(chain_start)\n h2, hs = self.sample_h_given_v(v2)\n else:\n v2 = self.propdown(h2)\n h2, hs = self.sample_h_given_v(v2)\n\n return h1, v2, h2\n\n def sample_h_given_v(self, v0_sample):\n sample_flag = self.sample_flag\n h1_mean = self.propup(v0_sample)\n if sample_flag == 0:\n return h1_mean, h1_mean\n \n h1_sample = self.xp_rng.binomial(size=h1_mean.shape, n=1, p=h1_mean)\n \n return h1_mean, h1_sample\n\n def sample_v_given_h(self, h0_sample):\n sample_flag = self.sample_flag\n v1_mean = self.propdown(h0_sample)\n if sample_flag == 0:\n return v1_mean, v1_mean\n \n if self.mode ==\"Bernoulli\":\n v1_sample = self.xp_rng.binomial(size=v1_mean.shape, n=1, p=v1_mean)\n elif self.mode == \"Gaussian\":\n v1_sample = (self.xp_rng.randn(h0_sample.shape[0], self.n_visible) * xp.sqrt(self.s)) + v1_mean\n \n return v1_mean, v1_sample\n\n def propup(self, v):\n if self.mode == \"Bernoulli\":\n pre_sigmoid_activation = xp.dot(v, self.W) + self.c\n elif self.mode == \"Gaussian\":\n pre_sigmoid_activation = xp.dot(v/self.s, self.W) + self.c\n\n return sigmoid(pre_sigmoid_activation)\n\n def propdown(self, h):\n if self.mode == \"Bernoulli\":\n pre_sigmoid_activation = xp.dot(h, self.W.T) + self.b\n v_mean = sigmoid(pre_sigmoid_activation)\n elif self.mode == \"Gaussian\":\n v_mean = xp.dot(h, self.W.T) + self.b\n \n return v_mean\n\n def gibbs_hvh(self, h0_sample):\n v1_mean, v1_sample = self.sample_v_given_h(h0_sample)\n h1_mean, h1_sample = self.sample_h_given_v(v1_sample)\n \n return v1_mean, v1_sample, h1_mean, h1_sample\n\n def get_reconstruction_cross_entropy(self,v_test):\n pre_sigmoid_activation_h = xp.dot(v_test, self.W) + self.c\n sigmoid_activation_h = sigmoid(pre_sigmoid_activation_h)\n\n pre_sigmoid_activation_v = xp.dot(sigmoid_activation_h, self.W.T) + self.b\n sigmoid_activation_v = sigmoid(pre_sigmoid_activation_v)\n\n cross_entropy = - xp.mean(\n xp.sum(v_test * xp.log(sigmoid_activation_v) +\n (1 - v_test) * xp.log(1 - sigmoid_activation_v),\n axis=1))\n\n return cross_entropy\n\n def get_reconstruction_error(self, v_test):\n reconstructed_v = self.reconstruct(v_test)\n\n return xp.sum(xp.square(xp.abs(v_test - reconstructed_v))) / self.batchsize\n\n def reconstruct(self, v):\n if self.mode == \"Bernoulli\":\n h = sigmoid(xp.dot(v, self.W) + self.c)\n reconstructed_v = sigmoid(xp.dot(h, self.W.T) + self.b)\n elif self.mode == \"Gaussian\":\n #h = sigmoid(xp.dot(v, self.W) + self.c)\n h = sigmoid(xp.dot(v/self.s, self.W) + self.c)\n reconstructed_v = xp.dot(h, self.W.T) + self.b\n #reconstructed_v = xp.random.randn(v.shape[0], v.shape[1])/xp.sqrt(self.s) + reconstructed_v\n \n return reconstructed_v\n\n def get_cost(self, v_test):\n k = self.k\n \n _, vh_test, nh_test = self.contrastive_divergence(v_test, k)\n loss = self.free_energy(v_test) - self.free_energy(vh_test)\n \n return loss\n\n def free_energy(self,v_test):\n wx_b = xp.dot(v_test, self.W) + self.c\n if self.mode == \"Bernoulli\":\n b_term = xp.sum(xp.dot(v_test, self.b.T))\n elif self.mode == \"Gaussian\":\n v_ = v_test - self.b\n b_term = xp.sum(0.5 * v_ * v_)\n\n hidden_term = xp.sum(xp.log(1 + xp.exp(wx_b)))\n\n return -hidden_term - b_term\n\ndef test_mnist(n_epoch=1000, index=100, batchsize=10, lr=0.1, n_hidden=256, cost=\"reconstruction cross entropy\"):\n from sklearn.datasets import fetch_mldata\n from sklearn.preprocessing import LabelBinarizer\n from sklearn.cross_validation import train_test_split\n\n mnist = fetch_mldata(\"MNIST original\", data_home=\".\")\n X = mnist.data\n y = mnist.target\n\n X = X.astype(xp.float64)\n X /= X.max()\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n label = y_train\n y_train = LabelBinarizer().fit_transform(y_train)\n y_test = LabelBinarizer().fit_transform(y_test)\n\n V_train = X_train[:index] > 0.4\n V_train = V_train.astype(float)\n V_test = X_train[index:index*2] > 0.4\n V_test = V_test.astype(float)\n\n V_train = X_train[:index]\n\n n_visible = V_train.shape[1]\n n_hidden = n_hidden\n\n rng = xp.random.RandomState(123)\n rbm = RBM(n_visible=n_visible, n_hidden=n_hidden, xp_rng=rng, sample_flag=1, pcd_flag=1, \n mode=\"Bernoulli\")\n\n cost = \"reconstruction error\"\n \n # train\n loss = rbm.train(V_train, lr=lr, cost=cost, n_epoch=n_epoch)\n plot_error(loss, cost)\n \n # test\n V_test = xp.copy(V_train)\n perm = xp.random.permutation(V_train.shape[0])\n plot(V_train[:index], label[:index], perm=perm)\n #recon = rbm.reconstruct(V_test)\n #plot(xp.maximum(sample(recon), 0), label[:index], perm=perm)\n V_test = add_noise(V_test, p=0.5)\n recon = rbm.reconstruct(V_test)\n #recon = recon > 0.3\n #recon = recon.astype(float)\n\n plot(V_test[:index], label[:index], perm=perm)\n plot(xp.maximum(sample(recon), 0), label[:index], perm=perm)\n \nif __name__ == \"__main__\":\n test_mnist(n_epoch=50, index=50)\n","sub_path":"rbm.py","file_name":"rbm.py","file_ext":"py","file_size_in_byte":11637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"98672532","text":"import asyncio\r\nimport os\r\n\r\nimport pandas as pd\r\nimport geopandas as gpd\r\nfrom shapely.geometry import Point, LineString\r\nfrom geopy.distance import geodesic\r\n\r\nfrom fgeocode import ForwardGeocoderV7, MyFg\r\n\r\ndef prepare_data (input_filename, delimiter=\";\", encoding='utf-8'):\r\n '''\r\n Prepare data to analyze quality of Geocoder\r\n '''\r\n \r\n # Read original addresses\r\n original_df = pd.read_csv(input_filename, delimiter=delimiter, encoding=encoding)\r\n\r\n # Extract list of addresses to geocode\r\n list_to_geocode = [row['ADDRESS'] for index, row in original_df.iterrows()]\r\n\r\n # Initialize geocoding service\r\n # fg = ForwardGeocoderV7(list_to_geocode, '0JM0F5MiF3lOZDEySf19K_LHu_6uKgaEckSk_Kz6JF8')\r\n fg = MyFg(list_to_geocode, 'd9iIt3blT6x2EM3QInT8_cB4V0HZv7wetmpQoT-AHfA')\r\n\r\n # Run geocoding\r\n # results = asyncio.run(fg.main())\r\n results = fg.main()\r\n\r\n # Transofrm data to DataFrame \r\n geocoded_df = pd.DataFrame(results)\r\n \r\n # Join two data sets\r\n original_df = original_df.join(geocoded_df)\r\n errors_df = original_df[original_df['LAT'].isnull()]\r\n df_to_geojson('none_points', errors_df, 'ORIGINAL_LAT', 'ORIGINAL_LNG')\r\n original_df = original_df.dropna(subset=['LAT'])\r\n\r\n original_df['distances'] = list(calculate_distance(original_df, 'ORIGINAL_LAT', 'ORIGINAL_LNG', 'LAT', 'LNG'))\r\n\r\n return original_df\r\n\r\n\r\ndef calculate_distance (data_frame, original_lat_col, original_lng_col, geocoded_lat_col, geocoded_lng_col):\r\n\r\n for index, row in data_frame.iterrows():\r\n \r\n # Get coordinates of original point\r\n original_lat = row[original_lat_col]\r\n original_lng = row[original_lng_col]\r\n\r\n # Get coordinates of geocoded point\r\n geocoded_lat = row[geocoded_lat_col]\r\n geocoded_lng = row[geocoded_lng_col]\r\n \r\n # Create tuples of coordinates\r\n original_point = (original_lat, original_lng)\r\n geocoded_point = (geocoded_lat, geocoded_lng)\r\n \r\n # Calculate geodesic distance and append to list\r\n yield round(geodesic(original_point, geocoded_point).meters, 0)\r\n\r\n\r\ndef create_error_line (data_frame, original_lat_col, original_lng_col, geocoded_lat_col, geocoded_lng_col, distance_col):\r\n \r\n # Convert Pandas DataFrame to Geopandas DataFrame\r\n error_df = gpd.GeoDataFrame(data=data_frame)\r\n\r\n # Create lists of points for comparison\r\n original_points_list = [point for point in zip(error_df[original_lng_col], error_df[original_lat_col])]\r\n geocoded_points_list = [point for point in zip(error_df[geocoded_lng_col], error_df[geocoded_lat_col])]\r\n\r\n # Create geometry field in existing Geopandas DataFrame\r\n error_df[\"geometry\"] = [LineString(points) for points in zip(original_points_list, geocoded_points_list)]\r\n\r\n # Create output directory\r\n if not os.path.exists('results'):\r\n os.mkdir('results')\r\n\r\n # Save results to file\r\n error_df.to_file('results/error.geojson', driver='GeoJSON', encoding=\"utf-8\")\r\n\r\ndef df_to_geojson (result_filename, data_frame, lat_col, lng_col):\r\n # Convert Pandas DataFrame to Geopandas DataFrame\r\n spatial_df = gpd.GeoDataFrame(data=data_frame)\r\n\r\n # Create geometry field in existing Geopandas DataFrame\r\n spatial_df[\"geometry\"] = [Point(point) for point in zip(spatial_df[lng_col], spatial_df[lat_col])]\r\n\r\n # Create output directory\r\n if not os.path.exists('results'):\r\n os.mkdir('results')\r\n\r\n # Save results to file\r\n spatial_df.to_file(f'results/{result_filename}.geojson', driver='GeoJSON', encoding=\"utf-8\")\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n # Prepare data for analysis\r\n data_for_analysis = prepare_data('rigla.csv')\r\n\r\n # Create error line between points\r\n create_error_line(data_for_analysis, 'ORIGINAL_LAT', 'ORIGINAL_LNG', 'LAT', 'LNG', 'distances')\r\n\r\n # Create original points layer\r\n df_to_geojson('original_points', data_for_analysis, 'ORIGINAL_LAT', 'ORIGINAL_LNG')\r\n\r\n # Create geocoded points layer\r\n df_to_geojson('geocoded_points', data_for_analysis, 'LAT', 'LNG')\r\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"224204323","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\nimport copy\n\nfrom base_normalize import BaseNormalize\n\n\nIGNORED_CHARS = {'\\n', '\\t', '\\r', ' ', ' ', '_', '\\3', '\\4', '\\u2028', '\\xa0'}\nREPLACED_CHARS = {\n '[0-9]': 'n',\n '[〇一二三四五六七八九十]': 'N',\n '[壹贰叁肆伍陆柒捌玖零拾佰仟萬]': 'M'\n}\n\n\nclass Normalize(BaseNormalize):\n\n \"\"\"\n 对样本进行归一化处理\n \"\"\"\n\n def __init__(self, content, sbc2dbc=True, ignored_chars=IGNORED_CHARS, replaced_chars=REPLACED_CHARS,\n is_english=False):\n super(Normalize, self).__init__(content)\n self._sbc2dbc = sbc2dbc\n self._ignored_chars = copy.deepcopy(ignored_chars)\n self._replaced_chars = copy.deepcopy(replaced_chars)\n self._is_english = is_english\n if self._is_english:\n self._ignored_chars.remove(' ')\n\n def normalize(self):\n normed_content = self._content\n if self._sbc2dbc:\n normed_content = self.sbc2dbc(normed_content) # sbc 2 dbc\n for k, v in self._replaced_chars.iteritems(): # replace char\n normed_content = re.sub(k, v, normed_content)\n tmp_content = normed_content\n normed_content = re.sub('|'.join(self._ignored_chars), '', normed_content) # delete ignore chars\n self._gen_index_map(tmp_content, normed_content)\n if self._is_english:\n normed_content = normed_content.lower()\n return normed_content\n\n @staticmethod\n def sbc2dbc(string):\n n = []\n for char in string:\n num = ord(char)\n if num == 0x3000:\n num = 32\n elif 0xFF01 <= num <= 0xFF5E:\n num -= 0xfee0\n num = unichr(num)\n n.append(num)\n return ''.join(n)\n","sub_path":"extract_framework/normalize/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"454376769","text":"#\n# This file is part of Eclipse Steady.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n# SPDX-FileCopyrightText: Copyright (c) 2018-2020 SAP SE or an SAP affiliate company and Eclipse Steady contributors\n#\n\nimport requests\nimport zipfile\nfrom contextlib import closing\nimport io\nimport os\nimport sys\nimport json\nimport time\nimport plac\nfrom tqdm import tqdm\nfrom io import StringIO\nfrom pprint import pprint\nimport logging\n\n# note: The NVD has not data older than 2002\nSTART_FROM_YEAR=os.environ.get('CVE_DATA_AS_OF_YEAR') or '2002'\nDATA_PATH=os.environ.get('CVE_DATA_PATH') or 'data/'\nFEED_SCHEMA_VERSION=os.environ.get('FEED_SCHEMA_VERSION') or '1.1'\n\ndef do_update(verbose=False):\n # read metadata of last fetch\n last_fetch_metadata = dict()\n try:\n with open(os.path.join(DATA_PATH, 'metadata.json'), 'r') as f:\n last_fetch_metadata = json.load(f)\n print('[ii] last fetch: ' + last_fetch_metadata['sha256'])\n except:\n last_fetch_metadata['sha256'] = ''\n print('[ii] Could not read metadata about previous fetches (this might be the first time we fetch data).')\n\n\n # read metadata of new data from the NVD site\n url = 'https://nvd.nist.gov/feeds/json/cve/{}/nvdcve-{}-modified.meta'.format(FEED_SCHEMA_VERSION, FEED_SCHEMA_VERSION)\n r = requests.get(url)\n if r.status_code != 200:\n print('[!!] Received status code {} when contacting {}.'.format(r.status_code, url))\n return False\n\n metadata_txt = r.text.strip().split('\\n')\n metadata_dict = dict()\n for d in metadata_txt:\n d_split = d.split(':',1)\n metadata_dict[d_split[0]] = d_split[1].strip()\n print('[ii] current: ' + metadata_dict['sha256'])\n\n # check if the new data is actually new\n if last_fetch_metadata['sha256'] == metadata_dict['sha256']:\n print('[ii] We already have this update, no new data to fetch.')\n return False\n else:\n do_fetch('modified')\n with open(os.path.join(DATA_PATH, 'metadata.json'), 'w') as f:\n f.write(json.dumps(metadata_dict))\n return True\n\ndef do_fetch_full(start_from_year=START_FROM_YEAR, verbose=False):\n years_to_fetch = [ y for y in range(int(START_FROM_YEAR), int(time.strftime(\"%Y\"))+1 ) ]\n if verbose:\n print('[ii] Fetching feeds: ' + str(years_to_fetch))\n\n for y in years_to_fetch:\n if not do_fetch(y):\n print(\"[!!] Could not fetch data for year \" + str(y))\n\n\ndef do_fetch(what,verbose=False):\n '''\n the 'what' parameter can be a year or 'recent' or 'modified'\n '''\n url = 'https://nvd.nist.gov/feeds/json/cve/{}/nvdcve-{}-{}.json.zip'.format(FEED_SCHEMA_VERSION, FEED_SCHEMA_VERSION, what)\n r = requests.get(url)\n if r.status_code != 200:\n print('[!!] Received status code {} when contacting {}.'.format(r.status_code, url))\n return False\n\n with closing(r), zipfile.ZipFile(io.BytesIO(r.content)) as archive:\n for f in archive.infolist():\n print(f.filename)\n data = json.loads(archive.read(f).decode())\n\n pbar = tqdm(data['CVE_Items'])\n for v in pbar:\n CVE_id = v['cve']['CVE_data_meta']['ID']\n CVE_year = CVE_id.split('-')[1]\n target_dir = os.path.join(DATA_PATH, CVE_year)\n if not os.path.isdir(target_dir):\n # pbar.set_description('Create dir ' + target_dir)\n os.makedirs(target_dir)\n\n with open(os.path.join(target_dir, CVE_id + '.json'), 'w') as f:\n # pbar.set_description('Updating: ' + CVE_id)\n f.write(json.dumps(v))\n\n return True\n\ndef need_full():\n if os.path.exists(DATA_PATH) and os.path.isdir(DATA_PATH):\n if not os.listdir(DATA_PATH):\n print('[ii] Data folder is empty')\n return True\n else:\n # Directory exists and is not empty\n print('[ii] Data folder found')\n return False\n else:\n # Directory doesn't exist\n print('[ii] Data folder is missing')\n return True\n\n@plac.annotations(\n force=(\"Force a full update of all feeds\", 'flag', 'f', bool),\n verbose=(\"Verbose mode\", 'flag', 'v', bool)\n)\ndef main(force=False, verbose=False):\n\n if force or need_full():\n do_fetch_full(verbose=verbose)\n\n # always do this, so that metadata are fine and so is the /status API\n do_update(verbose=verbose)\n\nif __name__ == \"__main__\":\n plac.call(main)\n","sub_path":"rest-nvd/app/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"274648192","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport subprocess\n\nfrom tempfile import TemporaryFile\n\nfrom polyaxon.exceptions import PolyaxonOperatorException\n\n\nclass CmdOperator:\n CMD = \"\"\n\n @classmethod\n def _execute(cls, params, env, is_json=False, stream=False):\n def _stream():\n with TemporaryFile(\"w+\") as stderr:\n ps = subprocess.Popen(params, env=env, stderr=stderr)\n exit_status = ps.wait()\n stderr.seek(0)\n if exit_status != 0:\n raise PolyaxonOperatorException(\n cmd=cls.CMD,\n args=params,\n return_code=exit_status,\n stdout=None,\n stderr=stderr,\n )\n\n def _block():\n with TemporaryFile(\"w+\") as stdout, TemporaryFile(\"w+\") as stderr:\n ps = subprocess.Popen(params, env=env, stdout=stdout, stderr=stderr)\n exit_status = ps.wait()\n stdout.seek(0)\n stderr.seek(0)\n if exit_status != 0:\n raise PolyaxonOperatorException(\n cmd=cls.CMD,\n args=params,\n return_code=exit_status,\n stdout=stdout,\n stderr=stderr,\n )\n\n return json.load(stdout) if is_json else stdout.read()\n\n return _stream() if stream else _block()\n\n @classmethod\n def check(cls):\n return True\n","sub_path":"core/polyaxon/deploy/operators/cmd_operator.py","file_name":"cmd_operator.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"434902450","text":"from django.shortcuts import render,redirect\nfrom PyDictionary import PyDictionary\n\n# Create your views here.\ndef index(request):\n return render(request, 'dictionary.htm')\n\ndef search(request):\n value = request.GET.get('text')\n dictionary = PyDictionary()\n meaning = dictionary.meaning(value)\n antonyms = dictionary.antonym(value)\n synonyms = dictionary.synonym(value)\n context = {\n 'meaning' : meaning,\n 'antonyms': antonyms,\n 'synonyms':synonyms\n }\n return render(request, 'search.htm', context)","sub_path":"djangoP/dictionary/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"414115854","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 17 16:22:00 2017\n\n@author: bradley\n\nData preprocessing module\n\"\"\"\nimport GPy\nimport numpy as np\nimport sys\nimport os\nimport platform\n# if(platform.system() != 'Darwin'):\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\"\n# import pycuda\nfrom matplotlib import pyplot as plt\nimport math\nimport pandas as pd\nimport random\nplt.style.use('ggplot')\n\ndef select_kernel(no_dims):\n \"\"\" Selects a new kernel at random. Also checks if input is augmented or\n single If changed will update the\n kernel .\n \"\"\"\n kernel_list = ['RBF']\n# kernel_list = ['RBF']\n kernel_number = np.random.randint(0,1)\n\n # getattr allows us to get the related method from the class \n kernel_name = kernel_list[kernel_number]\n \n # Randomly generate variance and length scale parameters\n var = random.random()\n ls = random.random()\n\n if kernel_name != 'MLP':\n kernel = getattr(GPy.kern,kernel_list[kernel_number])(input_dim=no_dims,\\\n variance=var,lengthscale = ls)\n else:\n wv = np.random.randn()\n bv = np.random.randn()\n kernel = getattr(GPy.kern,kernel_list[kernel_number])(input_dim=no_dims,\\\n variance=var,weight_variance = wv,bias_variance=bv)\n\n return kernel, kernel_list[kernel_number],var,ls\ndef error(prediction, groundtruth):\n \"\"\"Calculates the RMSE\"\"\"\n groundtruth = groundtruth[:prediction.shape[0]]\n assert np.size(prediction) == np.size(groundtruth)\n diff_square = (prediction - groundtruth)**2\n error = np.mean(diff_square)\n nrmse = math.sqrt(error)/ (np.amax(groundtruth) - np.amin(groundtruth))\n return nrmse\n\ndef norm_data(data):\n \"\"\"Takes training and test data and normalises it, to ensure zero mean\n and unit variance. It also ensures ||data|| = 1\n \n Input: \n \n data - N x M np.array\n \n Output\n \n data_norm - \\mu(norm_data) = 0 \\var(norm_data) = 1\n \"\"\"\n mean_data = np.mean(data)\n std_data = np.sqrt(np.var(data))\n data_norm = (data - mean_data)/(std_data)\n try:\n assert np.mean(data_norm)<0.00000001\n assert np.var(data_norm) > 0.9999999 and np.var(data_norm) < 1.0000001\n except:\n print('Houston, we have a problem with Norm_data',np.var(data_norm))\n return data_norm\ndef unit_data(data):\n dot_data = data**2\n sum_norm = sum(dot_data)\n scalar = 1 / math.sqrt(sum_norm)\n data_unit = data*scalar\n try:\n assert sum(data_unit**2) > 0.9999999 and sum(data_unit*2) <1.01\n except:\n print('Here lies the problem ' , scalar)\n return data_unit\n \n \ndef sample_and_unroll(augX,res):\n \"\"\" \n - Selects a random sample of numbers from both the original inputs \n augX[:,1] and previous outputs augX[:,0]. \n - Then constructs y, which is sampled from : y ~ N(0, f(Xsamp))\n \n Inputs:\n \n augX - Updated inputs + original inputs\n *kernel_prams - So that we sample using the right composed kernel\n kernel - A combination of all previous kernels\n \n Ouputs:\n \n Xsamp - A sampled subset of the augX\n Y - Our outputs, given an Xsamp \n \n \"\"\"\n resolution = res\n lower_domain = np.amin(augX,axis=0)\n upper_domain = np.amax(augX,axis=0)\n\n D = lower_domain.shape[0];\n N_1D = int(np.ceil(np.power(resolution,1/D)))\n large_arr = np.zeros((N_1D,D))\n large_arr2 = np.zeros((N_1D,D))\n\n for ii in range(1):\n large_arr[:,ii] = np.linspace(lower_domain[ii],upper_domain[ii],N_1D)\n\n for ii in range(1):\n large_arr2[:,ii] = np.linspace(lower_domain[ii],upper_domain[ii],N_1D)\n\n large_arr = np.repeat(large_arr,N_1D/2,axis=1)\n large_arr2 = np.repeat(large_arr2.T,N_1D/2,axis=0)\n coloum2 = np.ravel(large_arr)[:,None]\n coloum1 = np.ravel(large_arr2)[:,None]\n \n Xsamp = np.concatenate((coloum1,coloum2),axis=1)\n\n return Xsamp\n\ndef save_data(input_data,rmse,kernel_params,post_mean,post_var,parameters):\n no_points = parameters[0]\n option = parameters[1]\n model = parameters[3]\n data_keys = parameters[4]\n layer_no = parameters[5]\n kernel_name = parameters[6]\n var = parameters[7][0]\n ls = parameters[7][1]\n \n predicted = post_mean.flatten()\n actual = input_data[data_keys[1]].flatten()\n time_pred = input_data['val'][0:no_points,0].flatten()\n time_all = input_data['val'][:,0].flatten()\n cov_diag = post_var.flatten()\n kernel_params = kernel_params.flatten()\n \n d = dict(Time_pred = time_pred, Time = time_all,Predicted = predicted, Actual = actual, Diag_cov = cov_diag,Kernel_params = kernel_params, Kernels = kernel_name, NRMSE = rmse, Variance = var, Length_scale = ls)\n df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in d.items() ]))\n PATH = '../Data/' +model+ '/data_layer_/' + option \n file = PATH + '/'+str(layer_no)+'_' + str(no_points) +'.csv'\n os.makedirs(PATH,exist_ok = True)\n df.to_csv(file)\n \ndef plotting(data,rmse,parameters):\n \"\"\"For plotting the graphs to verify the confidence in the prediction of\n our training data. Saves plots in ../../Plots. Takes the model, m\n and uses GPys inbuilt plotting. \n Inputs:\n \n augX - augX[:,0] posterior mean (post_mean)\n augX[:,1] original inputs\n \n Y - The given output for the training or test data\n \n post_cov - The diagonal of cov(x*,x*) for plotting confidence\n \n\n \n Output:\n \n plots - Saved as pdf to given file name \n \"\"\"\n no_points = parameters[0]\n layer_no = parameters[5]\n option = parameters[1]\n no_dims = parameters[2]\n model = parameters[3]\n Xtrain = data[0]\n Ytrain = data[1]\n Xtest = data[2]\n Ytest = data[3]\n post_mean = data[4]\n post_var = data[5]\n\n s = np.sqrt(post_var.flatten())\n mu = post_mean.flatten()\n plt.figure(1)\n plt.clf()\n plt.hold(True)\n \n if no_dims == 2:\n plt.plot(Xtest[:no_points,0].flatten(),Ytrain.flatten(), 'r.',label = 'training data')\n elif no_dims == 1:\n plt.plot(Xtrain.flatten(),Ytrain.flatten(), 'r.',label = 'training data')\n \n plt.plot(Xtest[no_points:,0].flatten(), Ytest[no_points:].flatten(), 'b-', label='test data')\n plt.fill_between(Xtest[:,0].flatten(), mu-2*s, mu+2*s, color=\"#C0C0C0\", label = 'mu +/- 2sd')\n plt.plot(Xtest[:,0].flatten(), mu, 'w-', lw=1, label = 'Prediction')\n\n plt.legend(loc='best',prop={'size':4})\n name_of_plot = 'Layer ' + str(layer_no) + \" with NRMSE \" + str(rmse)\n plt.title(name_of_plot)\n name = 'Prediction_in_layer_' + str(layer_no)\n PATH = '../Plots/'+ model + '/'+ option\n os.makedirs(PATH,exist_ok = True)\n file = PATH + '/_' + name +'_'+str(parameters[0]) +'.png'\n plt.savefig(file, format = 'png',dpi=600)\n\n\ndef plot_cov_ft(kernel,Xtrain,parameters):\n layer_no = parameters[5] \n option = parameters[1]\n model = parameters[3]\n \n plt.figure(2)\n plt.clf()\n k = kernel.K(Xtrain)\n plt.imshow(k)\n plt.colorbar()\n name = 'covariance_function_layer_ ' + str(layer_no)\n name_of_plot = 'Layer ' + str(layer_no)\n plt.title(name_of_plot)\n PATH = '../Plots/'+ model + '/'+ option\n os.makedirs(PATH,exist_ok = True)\n file = PATH + '/_' + name +'_' +str(parameters[0]) +'.png'\n plt.savefig(file, format ='png',dpi=600)\n","sub_path":"src/exec/Testing/dataprocessing.py","file_name":"dataprocessing.py","file_ext":"py","file_size_in_byte":7840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"510265995","text":"from numbers import Number\nimport random as rd\nimport time\nimport numpy as np\n\n\nclass Matrice:\n\n # constructeur\n def __init__(self, l, c=None, fill=0.0):\n self.lignes = l\n\n # matrice carree ?\n if c is None:\n self.colonnes = l\n else:\n self.colonnes = c\n\n # cree une matrice lxc avec des fill\n self.matrice = [[fill] * self.colonnes for i in range(self.lignes)]\n\n def __str__(self):\n result = \"\"\n for line in self.matrice:\n result += str(line) + \"\\n\"\n return result\n\n '''\n operateurs arithmetiques (+,-,*,/)\n '''\n\n # +\n def __add__(self, other):\n try:\n if not isinstance(other, Matrice):\n raise TypeError('Type Matrice requis')\n\n # meme dimensions\n if self.colonnes != other.colonnes or self.lignes != other.lignes:\n raise ValueError('Les matrices doivent etre de meme format..')\n\n reponse = Matrice(self.lignes, self.colonnes)\n for i in range(self.lignes):\n for j in range(self.colonnes):\n reponse.matrice[i][j] = self.matrice[i][j] + other.matrice[i][j]\n return reponse\n\n except (TypeError, ValueError) as err:\n print(err.message)\n return None\n\n # -\n def __sub__(self, other):\n try:\n if not isinstance(other, Matrice):\n raise TypeError('Type Matrice requis')\n\n if self.colonnes != other.colonnes or self.lignes != other.lignes:\n raise ValueError('Les matrices doivent etre de meme format..')\n\n reponse = Matrice(self.lignes, self.colonnes)\n for i in range(self.lignes):\n for j in range(self.colonnes):\n reponse.matrice[i][j] = self.matrice[i][j] - other.matrice[i][j]\n return reponse\n\n except (TypeError, ValueError) as err:\n print(err.message)\n return None\n\n # *\n def __mul__(self, other):\n try:\n # multiplication scalaire\n if isinstance(other, Number):\n reponse = Matrice(self.lignes, self.colonnes)\n for i in range(self.lignes):\n for j in range(self.colonnes):\n reponse.matrice[i][j] = self.matrice[i][j] * other\n return reponse\n\n # les 2 sont des matrices?\n if not isinstance(other, Matrice):\n raise TypeError('Type Matrice ou Number requis')\n\n # Alxp * Bpxc\n if self.colonnes != other.lignes:\n raise ValueError('Toutes les multiplications doivent respecter la contrainte: Alxp * Bpxc..')\n\n reponse = Matrice(self.lignes, other.colonnes)\n # parcours de la matrice reponseante\n for i in range(self.lignes):\n time.sleep(0.1) # slow it down sinon c'est trop vite et la diff est en milisecondes..\n for j in range(other.colonnes):\n # ligne de m multiplie colonne de n\n for k in range(self.colonnes):\n reponse.matrice[i][j] += self.matrice[i][k] * other.matrice[k][j]\n return reponse\n\n except (ValueError, TypeError) as err:\n print(err.message)\n return None\n\n '''\n 2*A == A*2\n faux pour matrices A*B != B*A\n on assume que __mul__ sera appelee..\n '''\n __rmul__ = __mul__\n\n # /\n def __div__(self, other):\n return self.__truediv__(other)\n\n def __truediv__(self, other):\n try:\n self.makeFloat()\n\n if isinstance(other, Number):\n if other == 0.0:\n raise ZeroDivisionError('Division par zero..')\n\n other = float(other)\n reponse = Matrice(self.lignes, self.colonnes)\n for i in range(self.lignes):\n for j in range(self.colonnes):\n reponse.matrice[i][j] = self.matrice[i][j] / float(other)\n return reponse\n\n if not isinstance(other, Matrice):\n raise TypeError('Type Matrice ou Number requis')\n\n if not other.estCarree():\n raise ValueError('Matrices carrees seulement')\n\n other.makeFloat()\n # A/B ==> A*BInverse // c'est ce qui se raproche le plus d'une division matricielle.. qu'on m'a dit..\n if other.estInversible():\n return self * other.Inverse()\n else:\n raise ValueError('La matrice B \\'(A/B)\\' n\\'est pas inversible.. ')\n\n except (ZeroDivisionError, TypeError, ValueError) as err:\n print(err.message)\n return None\n\n def randomFilling(self, start=0, end=25):\n for i in range(self.lignes):\n for j in range(self.colonnes):\n self.matrice[i][j] = rd.randint(start, end)\n\n '''\n operations matricielles\n '''\n\n # valeurabsolue pour eviter les fausses diagonales vides..\n def Trace(self, valeurabsolue=False):\n try:\n if not self.estCarree():\n raise ValueError('Matrices carrees seulement')\n\n reponse = 0\n for i in range(self.lignes):\n if valeurabsolue:\n reponse += abs(self.matrice[i][i])\n else:\n reponse += self.matrice[i][i]\n return reponse\n\n except ValueError as err:\n print(err.message)\n return None\n\n def estCarree(self):\n if self.colonnes == self.lignes:\n return True\n return False\n\n def estReguliere(self):\n if self.Determinant() != 0:\n return True\n return False\n\n def Determinant(self):\n try:\n self.makeFloat()\n\n if not self.estCarree():\n raise ValueError('Matrices carrees seulement')\n\n # cas de base\n if self.lignes == 1:\n return self.matrice[0][0]\n if self.lignes == 2:\n return self.matrice[0][0] * self.matrice[1][1] - self.matrice[0][1] * self.matrice[1][0]\n\n # else if diagonale ou triangulaire\n if self.estTriangulaire():\n reponse = self.matrice[0][0]\n for i in range(1, self.lignes):\n for j in range(1, self.colonnes):\n if i == j:\n reponse *= self.matrice[i][j]\n return reponse\n\n # else\n i = 0 # on choisit la premiere ligne..\n reponse = 0\n for j in range(self.colonnes):\n m = Matrice(self.colonnes - 1)\n # nouvelle matrice self moins ligne i, colonne j\n ligne = 1\n for k in range(m.lignes):\n colonne = 0\n for l in range(m.colonnes):\n if j == l:\n colonne += 1\n m.matrice[k][l] = self.matrice[ligne][colonne]\n colonne += 1\n ligne += 1\n # determiner le signe\n signe = self.matrice[i][j]\n if (i + j) % 2 != 0:\n signe *= -1\n reponse += signe * m.Determinant()\n return reponse\n\n except ValueError as err:\n print(err.message)\n return None\n\n def Inverse(self):\n try:\n self.makeFloat()\n\n if not self.estCarree():\n raise ValueError('Matrices carrees seulement')\n\n if not self.estReguliere():\n raise ZeroDivisionError('Le determinant ne peut etre zero pour la division..')\n\n if self.estDiagonale():\n reponse = Matrice(self.lignes)\n for i in range(self.lignes):\n if self.matrice[i][i] != 0:\n reponse.matrice[i][i] = 1 / self.matrice[i][i]\n else:\n reponse.matrice[i][i] = 0\n return reponse\n\n reciprocal = self.Determinant()\n if self.lignes == 2:\n m = Matrice(2)\n m.matrice[0][0] = self.matrice[1][1]\n m.matrice[1][1] = self.matrice[0][0]\n m.matrice[0][1] = self.matrice[0][1] * -1\n m.matrice[1][0] = self.matrice[1][0] * -1\n return m / reciprocal\n\n comatriceT = self.CoMatrice().Transposee()\n return comatriceT / reciprocal\n\n except (ValueError, ZeroDivisionError) as err:\n print(err.message)\n return None\n\n def CoMatrice(self):\n try:\n if not self.estCarree():\n raise ValueError('Matrices carrees seulement')\n\n if self.lignes == 1:\n return self\n\n reponse = Matrice(self.lignes)\n for i in range(self.lignes):\n for j in range(self.colonnes):\n m = Matrice(self.lignes - 1)\n ligne = 0\n for k in range(m.lignes):\n colonne = 0\n if ligne == i:\n ligne += 1\n for l in range(m.colonnes):\n if colonne == j:\n colonne += 1\n m.matrice[k][l] = self.matrice[ligne][colonne]\n colonne += 1\n ligne += 1\n\n reponse.matrice[i][j] = m.Determinant()\n # determiner le signe\n if (i + j) % 2 != 0: # and reponse.matrice[i][j]!=0:\n reponse.matrice[i][j] *= -1\n\n return reponse\n\n except ValueError as err:\n print(err.message)\n return None\n\n def estDiagonale(self):\n try:\n if not self.estCarree():\n raise ValueError('Matrices carrees seulement')\n\n for i in range(self.lignes):\n for j in range(self.colonnes):\n if i != j and self.matrice[i][j] != 0:\n return False\n\n # diagonale vide \n if self.Trace(True) == 0:\n raise ValueError('Matrice vide..')\n\n return True\n\n except ValueError as err:\n print(err.message)\n return None\n\n def estTriangulaire(self, sens=None, stricte=False):\n try:\n if not self.estCarree():\n raise ValueError('Matrices carrees seulement')\n\n inferieure = False\n superieure = False\n for i in range(self.lignes):\n for j in range(self.colonnes):\n if i < j and self.matrice[i][j] != 0:\n inferieure = True\n elif i > j and self.matrice[i][j] != 0:\n superieure = True\n\n diagonaleVide = self.Trace(True) == 0\n if not inferieure and not superieure and diagonaleVide:\n raise ValueError('Matrice vide..')\n\n if stricte and not diagonaleVide:\n return False\n\n # xor\n if superieure != inferieure:\n if sens != None:\n if (sens == \"inferieure\" or sens == \"i\") and not inferieure:\n return False\n elif (sens == \"superieure\" or sens == \"s\") and not superieure:\n return False\n else:\n raise ValueError(\n \"Seules les entrees suivantes sont acceptees:\\n 'inferieure', 'i', 'superieure', 's'\")\n return True\n return False\n\n except ValueError as err:\n print(err.message)\n return None\n\n def estInversible(self):\n if not self.estCarree():\n return False\n if self.Determinant() == 0:\n return False\n return True\n\n def Transposee(self):\n if self.lignes == 1:\n return self\n\n reponse = Matrice(self.colonnes, self.lignes)\n for i in range(self.lignes):\n for j in range(self.colonnes):\n reponse.matrice[j][i] = self.matrice[i][j]\n return reponse\n\n # pour jacobi\n def delta(self, other):\n try:\n if not isinstance(other, Matrice):\n raise ValueError('Matrice attendue')\n if self.lignes != other.lignes or self.colonnes != other.colonnes:\n raise ValueError('Matrices de meme dimensions requises')\n if self.colonnes != 1 or other.colonnes != 1:\n raise ValueError('Vecteur requis (\\'Matrice colonne, xLignes 0Colonne\\')')\n diff = 0\n for i in range(self.lignes):\n diff += abs(self.matrice[i][0] - other.matrice[i][0])\n\n return diff / self.lignes\n\n except ValueError as err:\n print(err.message)\n return None\n\n # strictement dominante diagonalement \n def estSDD(self):\n for i in range(self.lignes):\n a = 0\n x = 0\n for j in range(self.colonnes):\n if i == j:\n a = self.matrice[i][j]\n else:\n x += abs(self.matrice[i][j])\n if not a > x:\n return False\n return True\n\n def makeFloat(self):\n for i in range(self.lignes):\n for j in range(self.colonnes):\n self.matrice[i][j] = float(self.matrice[i][j])\n\n\ndef mean(matrice):\n mean_vector = []\n for col in range(matrice.colonnes):\n total = 0\n for ligne in range(matrice.lignes):\n total += matrice.matrice[ligne][col]\n mean_vector.append(total / matrice.lignes)\n return mean_vector\n\n\ndef mean_matrice(matrice):\n result = Matrice(matrice.lignes, matrice.colonnes)\n mean_vector = mean(matrice)\n for ligne in range(len(matrice.matrice)):\n result.matrice[ligne] = mean_vector\n return result\n\n\ndef substract_mean(matrice):\n return matrice - mean_matrice(matrice)\n\n\ndef Identite(dimension):\n reponse = Matrice(dimension)\n for i in range(dimension):\n reponse.matrice[i][i] = 1\n return reponse\n\n\ndef MultiplieXMatrices(matrices):\n try:\n # matrices doit etre un dictionnaire..\n if not isinstance(matrices, dict):\n raise TypeError('Les matrices doivent etre stockees dans un dictionnaire..')\n\n # au moins 2 matrices\n if len(matrices) < 2:\n raise ValueError('Il faut au moins 2 matrices..')\n\n # toutes sont Alxp * Bpxc\n for i in range(len(matrices) - 1):\n if not isinstance(matrices[i], Matrice) or not isinstance(matrices[i + 1], Matrice):\n raise TypeError('Tous les elements doivent etre de type Matrice..')\n if matrices[i].colonnes != matrices[i + 1].lignes:\n raise ValueError('Toutes les multiplications doivent respecter la contrainte: Alxp * Bpxc..')\n\n # peupler d\n d = [0] * (len(matrices) + 1)\n d[0] = matrices.get(0).lignes\n d[1] = matrices.get(0).colonnes\n for i in range(1, len(matrices)):\n d[i + 1] = matrices.get(i).colonnes\n\n # avant parantheses\n csp = 0\n for i in range(len(d) - 2):\n csp += d[i] * d[i + 1] * d[i + 2]\n print('Cout sans parantheses: ' + str(csp))\n\n # recoit et evalue une string de format ((A*B)*(C*D))*(E*F)...\n return eval(CalculeMeilleurOrdreParantheses(d))\n\n except (ValueError, TypeError) as err:\n print(err.message)\n return None\n\n\ndef CalculeMeilleurOrdreParantheses(d):\n size = len(d) - 1\n # tableau des couts\n couts = [[None] * size for i in range(size)]\n separation = [[None] * size for i in range(size)]\n\n # etapes\n for etape in range(size):\n for i in range(size - etape):\n if etape == 0:\n couts[i][i] = 0\n elif etape == 1:\n couts[i][i + 1] = d[i] * d[i + 1] * d[i + 2]\n separation[i][i + 1] = i + 1\n else:\n minimum = -1\n # les cas possibles: (M11+M24+d0d1d4 / M12+M34+d0d2d4 / ...\n for k in range(i, i + etape):\n least = couts[i][k] + couts[k + 1][i + etape] + d[i] * d[k + 1] * d[i + etape + 1]\n if minimum == -1:\n minimum = least\n separation[i][i + etape] = k + 1\n if least < minimum:\n minimum = least\n separation[i][i + etape] = k + 1\n couts[i][i + etape] = minimum\n\n # on formatte la string pour permettre l'evaluation..\n parenthesis_order = StringFormatParenthesageMinimal(separation, 0, size - 1)\n parenthesis_order = parenthesis_order.replace(' m', '*m')\n parenthesis_order = parenthesis_order.replace(' ', '')\n parenthesis_order = parenthesis_order.replace(')m', ')*m')\n parenthesis_order = parenthesis_order.replace(')(', ')*(')\n print('Meilleur ordre:')\n print(parenthesis_order)\n print('Cout: ' + str(couts[0][size - 1]))\n return parenthesis_order\n\n\ndef StringFormatParenthesageMinimal(l, i, j):\n if i == j:\n return \"matrices.get(\" + str(i) + \") \"\n else:\n reponse = \"(\"\n reponse += StringFormatParenthesageMinimal(l, i, l[i][j] - 1)\n reponse += StringFormatParenthesageMinimal(l, l[i][j], j)\n reponse += \")\"\n return reponse\n\n\ndef Jacobi(A):\n if not A.estCarree():\n raise ValueError('La matrice doit etre carree..')\n n = A.colonnes # matrice carree\n maxit = 100 # nombre d'iterations maximal\n eps = 1.0e-15 # niveau d'acuitee\n pi = np.pi\n ev = Matrice(1, n) # initialisation des eigenvalues\n U = Matrice(n) # initialisation des eigenvector\n for i in range(0, n):\n U.matrice[i][i] = 1.0\n\n for t in range(0, maxit):\n s = 0 # compute sum of off-diagonal elements in A(i,j)\n for i in range(0, n):\n s = s + np.sum(np.abs(A.matrice[i][(i + 1):n]))\n if s < eps: # diagonal form reached\n for i in range(0, n):\n ev.matrice[0][i] = A.matrice[i][i]\n break\n else:\n limit = s / (n * (n - 1) / 2.0) # average value of off-diagonal elements\n for i in range(0, n - 1): # loop over lines of matrix\n for j in range(i + 1, n): # loop over columns of matrix\n if np.abs(A.matrice[i][j]) > limit: # determine (ij) such that |A(i,j)| larger than average\n # value of off-diagonal elements\n denom = A.matrice[i][i] - A.matrice[j][j] # denominator of Eq. (3.61)\n if np.abs(denom) < eps:\n phi = pi / 4 # Eq. (3.62)\n else:\n phi = 0.5 * np.arctan(2.0 * A.matrice[i][j] / denom) # Eq. (3.61)\n si = np.sin(phi)\n co = np.cos(phi)\n for k in range(i + 1, j):\n store = A.matrice[i][k]\n A.matrice[i][k] = A.matrice[i][k] * co + A.matrice[k][j] * si # Eq. (3.56)\n A.matrice[k][j] = A.matrice[k][j] * co - store * si # Eq. (3.57)\n for k in range(j + 1, n):\n store = A.matrice[i][k]\n A.matrice[i][k] = A.matrice[i][k] * co + A.matrice[j][k] * si # Eq. (3.56)\n A.matrice[j][k] = A.matrice[j][k] * co - store * si # Eq. (3.57)\n for k in range(0, i):\n store = A.matrice[k][i]\n A.matrice[k][i] = A.matrice[k][i] * co + A.matrice[k][j] * si\n A.matrice[k][j] = A.matrice[k][j] * co - store * si\n store = A.matrice[i][i]\n A.matrice[i][i] = A.matrice[i][i] * co * co + 2.0 * A.matrice[i][j] * co * si + A.matrice[j][j] * si * si # Eq. (3.58)\n A.matrice[j][j] = A.matrice[j][j] * co * co - 2.0 * A.matrice[i][j] * co * si + store * si * si # Eq. (3.59)\n A.matrice[i][j] = 0.0 # Eq. (3.60)\n for k in range(0, n):\n store = U.matrice[k][j]\n U.matrice[k][j] = U.matrice[k][j] * co - U.matrice[k][i] * si # Eq. (3.66)\n U.matrice[k][i] = U.matrice[k][i] * co + store * si # Eq. (3.67)\n return ev, U\n","sub_path":"matrices.py","file_name":"matrices.py","file_ext":"py","file_size_in_byte":20819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"482771092","text":"import numpy as np\n\nimport random\n\nimport Levenshtein\n\nimport copy\n\nclass Player:\n\tdef __init__(self, deck):\n\t\tself.hand = []\n\t\tself.kb = dict()\n\t\tself.reversekb = dict()\n\t\tself.binhand = [0]*len(deck)\n\t\tself.hand_str = str(0)\n\t\tself.levenlist = []\n\t\t\n\t\tself.levenlistcopy = []\n\n\t\tself.win = False\n\t\t\n\n\t\t#The knowledge base keeps track of the list index which represents each card\n\t\tindexes = list(range(len(deck)))\n\t\tself.kb = dict(zip(deck,indexes))\n\t\tself.reversekb = {v: k for k, v in self.kb.items()}\n\n\tdef playTurn(self, deck, discard, draw, seq, binstr):\n\t\tself.converter(deck)\n\t\tself.levendist(binstr)\n\t\tself.pick(seq, discard, draw)\n\t\tself.converter(deck)\n\t\tself.levendist(binstr)\n\t\tself.drop(seq, discard)\n\t\tself.converter(deck)\n\t\tself.levendist(binstr)\n\t\tif 0 in self.levenlist:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef converter(self, deck):\n\t\tself.binhand = [0]*len(deck)\n\t\tfor i in self.hand:\n\t\t\tx = self.kb.get(i)\n\t\t\tself.binhand[x] = 1\n\t\tself.hand_str = ''.join(map(str, self.binhand))\n\n\tdef levendist(self, binstr):\n\t\tself.levenlist = []\t\n\t\tfor i in range(len(binstr)):\n\t\t\ttemp = binstr[i]\n\t\t\ta = Levenshtein.distance(temp, self.hand_str)\n\t\t\tself.levenlist.append(a)\n\t\treturn self.levenlist\n\n\tdef pick(self, seq, discard, draw):\n\t\tself.priority1 = []\n\t\tself.priority2 = []\n\t\tself.closest1 = []\n\t\tself.closest2 = []\n\t\tself.pickup1 = []\n\t\tself.pickup2 = []\n\n\t\tself.levenlistcopy = copy.deepcopy(self.levenlist)\n\t\tfor i in range(len(self.levenlistcopy)):\n\t\t\tif self.levenlistcopy[i] == min(self.levenlistcopy):\n\t\t\t\tself.priority1.append(i)\n\t\tfor i in self.priority1:\n\t\t\tself.closest1.append(seq[i])\n\t\tfor i in self.priority1:\n\t\t\tself.levenlistcopy[i] = 1000\n\n\t\tfor i in range(len(self.levenlistcopy)):\n\t\t\tif self.levenlistcopy[i] == min(self.levenlistcopy):\n\t\t\t\tself.priority2.append(i)\n\t\tfor i in self.priority2:\n\t\t\tself.closest2.append(seq[i])\n\n\t\tfor i in self.closest1:\n\t\t\tfor j in range(len(self.binhand)):\n\t\t\t\tif self.binhand[j] != i[j]:\n\t\t\t\t\tif self.binhand[j] == 0 and i[j] ==1:\n\t\t\t\t\t\tcard = self.reversekb.get(j)\n\t\t\t\t\t\tself.pickup1.append(card)\n\t\tfor i in self.closest2:\n\t\t\tfor j in range(len(self.binhand)):\n\t\t\t\tif self.binhand[j] != i[j]:\n\t\t\t\t\tif self.binhand[j] == 0 and i[j] ==1:\n\t\t\t\t\t\tcard = self.reversekb.get(j)\n\t\t\t\t\t\tself.pickup2.append(card)\n\n\t#This strategy is very conservative and only checks in pickup1, i.e., only for the minimum distance sequences\n\t\tz = discard[-1]\n\t\tif z in self.pickup1 or z in self.pickup2:\n\t\t\tself.hand.append(z)\n\t\t\tdiscard.remove(discard[-1])\n\t\telse:\n\t\t\tz = draw[-1]\n\t\t\tself.hand.append(z)\n\t\t\tdraw.remove(draw[-1])\n\n\tdef drop(self, seq, discard):\n\t\tself.priority1 = []\n\t\tself.closest1 = []\n\t\tself.drop1 = []\n\n\t\tself.levenlistcopy = copy.deepcopy(self.levenlist)\n\t\tminLeven = min(self.levenlistcopy)\n\t\tfor i in range(len(self.levenlistcopy)):\n\t\t\tif self.levenlistcopy[i] == min(self.levenlistcopy):\n\t\t\t\tself.priority1.append(i)\n\t\tfor i in self.priority1:\n\t\t\tself.closest1.append(seq[i])\n\n\t\tfor i in self.closest1:\n\t\t\tfor j in range(len(self.binhand)):\n\t\t\t\tif self.binhand[j] != i[j]:\n\t\t\t\t\tif self.binhand[j] == 1 and i[j] == 0:\n\t\t\t\t\t\tcard = self.reversekb.get(j)\n\t\t\t\t\t\tself.drop1.append(card)\n\n\n\t\t\n\t\t#This strategy is very conservative and only checks in drop1, i.e., only for the minimum distance sequences\n\t\tz = random.sample(self.drop1, 1)\n\t\tself.hand.remove(z[0])\n\t\tdiscard.append(z[0])\n\n","sub_path":"samples/detbprobc.py","file_name":"detbprobc.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"336851170","text":"import os\nimport sys\nfrom datetime import timedelta\n\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nPROJECT_NAME = os.environ['PROJECT_NAME']\nAPPLICATION_USER = os.environ['PROJECT_NAME']\nAPPLICATION_GROUP = os.environ['PROJECT_NAME']\nENV_TYPE = os.environ['ENVIRONMENT_TYPE']\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ['DJANGO_SECRET_KEY']\nFILE_ENCRYPTION_KEY = os.environ['FILE_ENCRYPTION_KEY']\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = eval(os.environ['DJANGO_ISNOT_PRODUCTION'])\n\nALLOWED_HOSTS = ['{0}'.format(i) for i in os.environ['ALLOWED_HOSTS'].strip().split(' ')]\n\n# INTERNAL_IPS = ['corporalocal.nz', 'corporalocal.io', 'dev.koreromaori.com', '10.1.160.139', '127.0.0.1']\n\n# For ELB Certificate & NGINX settings.\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Application definition\n\nINSTALLED_APPS = [\n 'dal',\n 'dal_select2',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.postgres',\n 'collectfast',\n 'corpora.staticfiles.CorporaStaticFilesConfig',\n 'django.contrib.sites',\n 'django.contrib.sitemaps',\n 'corpora',\n 'corpus',\n 'demo',\n 'people',\n 'license',\n 'message',\n 'transcription',\n 'reo_api',\n 'helpers',\n 'storages',\n 'djangobower',\n 'sekizai',\n 'compressor',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'analytical',\n 'ckeditor',\n 'ckeditor_uploader',\n 'corsheaders',\n 'webpack_loader',\n 'django_extensions',\n]\n\nMIDDLEWARE = [\n\n\n 'corsheaders.middleware.CorsMiddleware',\n\n # 'debug_toolbar.middleware.DebugToolbarMiddleware',\n\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n #'django.middleware.cache.UpdateCacheMiddleware', # <= for caching entire site\n\n\n 'django.middleware.locale.LocaleMiddleware',\n\n\n\n 'corpora.middleware.LanguageMiddleware',\n 'django.middleware.common.CommonMiddleware',\n #'django.middleware.cache.FetchFromCacheMiddleware', # <= for caching entire site\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'corpora.middleware.PersonMiddleware',\n 'corpora.middleware.LicenseMiddleware',\n 'corpora.middleware.ExpoLoginMiddleware',\n 'django.contrib.sites.middleware.CurrentSiteMiddleware',\n\n]\n\nDEBUG_TOOLBAR_CONFIG = {\n \"SHOW_TOOLBAR_CALLBACK\": 'corpora.middleware.show_toolbar_callback'\n}\n\n\nROOT_URLCONF = 'corpora.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.i18n',\n 'sekizai.context_processors.sekizai',\n 'license.context_processors.license',\n 'corpora.context_processors.site',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'corpora.wsgi.application'\n\n# CORS\nCORS_ORIGIN_WHITELIST = \\\n ['https://{0}'.format(i) for i in os.environ['ALLOWED_HOSTS'].split(',')]\nCORS_ORIGIN_WHITELIST = tuple(CORS_ORIGIN_WHITELIST)\n\n\nCORS_ORIGIN_WHITELIST = CORS_ORIGIN_WHITELIST + ('https://172.28.128.13', 'https://kaituhi.nz')\n\nCORS_ORIGIN_ALLOW_ALL = True\n\n# STORAGES #\nDEFAULT_FILE_STORAGE = os.environ['FILE_STORAGE']\nAWS_ACCESS_KEY_ID = os.environ['AWS_ID']\nAWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET']\nAWS_STORAGE_BUCKET_NAME = os.environ['AWS_BUCKET']\nAWS_QUERYSTRING_AUTH = False\nAWS_DEFAULT_ACL = 'private'\nAWS_S3_OBJECT_PARAMETERS = {\n 'CacheControl': 'max-age=86400',\n}\n\n# S3 only access\nAWS_ACCESS_KEY_ID_S3 = os.environ['AWS_ID_S3']\nAWS_SECRET_ACCESS_KEY_S3 = os.environ['AWS_SECRET_S3']\n\n\nCKEDITOR_UPLOAD_PATH = \"ckeditor_uploads/\"\nCKEDITOR_IMAGE_BACKEND = 'pillow'\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n# We use ansible to create the environment variables to use.\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ['DATABASE_NAME'], # TODO: Give this a better name?\n 'USER': os.environ['DATABASE_USER'],\n 'PASSWORD': os.environ['DATABASE_PASSWORD'], # TODO: Secure this!\n 'HOST': os.environ['DATABASE_HOST'],\n 'PORT': '5432',\n },\n 'replica': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ['DATABASE_NAME'], # TODO: Give this a better name?\n 'USER': os.environ['DATABASE_USER'],\n 'PASSWORD': os.environ['DATABASE_PASSWORD'], # TODO: Secure this!\n 'HOST': os.environ['DATABASE_HOST'].replace('base.','base-replica.'),\n 'PORT': '5432',\n },\n }\nDATABASE_ROUTERS = ['corpora.db_routes.ReadRouter']\n\n# All auth\nAUTHENTICATION_BACKENDS = (\n\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n\n)\nLOGIN_REDIRECT_URL = 'people:profile' # is there a more fool proof option?\nACCOUNT_ADAPTER = \"people.adapter.PersonAccountAdapter\"\nSOCIALACCOUNT_ADAPTER = \"people.adapter.PersonSocialAccountAdapter\"\nSOCIALACCOUNT_PROVIDERS = {\n 'facebook': {\n 'METHOD': 'oauth2',\n 'SCOPE': ['email', 'public_profile', ], # Will require app approval for user_about_me access.\n 'FIELDS': [ # see https://developers.facebook.com/docs/graph-api/reference/user/\n 'id',\n 'email',\n 'name',\n 'first_name',\n 'last_name',\n 'locale',\n 'timezone',\n 'languages'],\n # 'LOCALE_FUNC': 'path.to.callable',\n 'VERSION': 'v2.4'},\n 'google': {\n 'SCOPE': ['profile', 'email'], # https://developers.google.com/identity/protocols/OAuth2\n 'FIELDS': [\n 'id',\n 'email',\n 'name',\n 'first_name',\n 'last_name',\n 'locale',\n 'timezone',\n 'languages'],\n }}\n\nACCOUNT_AUTHENTICATION_METHOD = \"email\"\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_EMAIL_SUBJECT_PREFIX = None\n# ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/'\n\n\n# Email\nEMAIL_BACKEND = 'django_ses.SESBackend' # Use AWS Simple Email Service\nAWS_REGION = None if 'local' in ENV_TYPE else os.environ['AWS_REGION']\nAWS_SES_REGION_NAME = 'ap-southeast-2'\nAWS_SES_REGION_ENDPOINT = \"email.ap-southeast-2.amazonaws.com\"\nDEFAULT_FROM_EMAIL = u'\"Kōrero Māori\" '\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Site ID\nSITE_ID = 1\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\nTIME_ZONE = 'Pacific/Auckland'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.environ['STATIC_PATH'] #os.path.join(BASE_DIR, 'public', 'static')\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.environ['MEDIA_PATH']\n\nBOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'corpora/static')\n\n# BOWER IS DEPRICATED - FIND AN ALTERNATIVE!\nBOWER_INSTALLED_APPS = {\n 'jquery',\n 'jquery-ui',\n 'bootstrap',\n #'opus-recorderjs#v5.1.1',\n 'js-cookie',\n 'popper.js',\n 'chart.js',\n}\n\n# STATICFILES_DIRS = (\n# os.path.join(BASE_DIR, '...', 'static'),\n# )\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n\n # Additional finders\n 'djangobower.finders.BowerFinder',\n # 'sass_processor.finders.CssFinder',\n 'compressor.finders.CompressorFinder',\n\n)\n\n# memcache_server = os.environ['DJANGO_MEMCACHED_IP']\n# memcache_servers = []\n# for srv in memcache_server.split(','):\n# srv = srv.strip()\n# if srv != '':\n# memcache_servers.append(\n# \"{0}:{1}\".format(\n# srv.strip(), os.environ['DJANGO_MEMCACHED_PORT']))\n\n\n\n\n# DJANGO-COMPRESSOR SETTINGS\nCOMPRESS_PRECOMPILERS = (\n # ('text/coffeescript', 'coffee --compile --stdio'),\n # ('text/less', 'lessc {infile} {outfile}'),\n # ('text/x-sass', 'sass {infile} {outfile}'),\n # ('text/x-scss', 'sass --scss {infile} {outfile}'),\n # ('module', 'compressor_toolkit.precompilers.ES6Compiler'),\n ('text/x-scss', 'django_libsass.SassCompiler'),\n # ('text/stylus', 'stylus < {infile} > {outfile}'),\n # ('text/foobar', 'path.to.MyPrecompilerFilter'),\n)\nCOMPRESS_LOCAL_NPM_INSTALL = False\nCOMPRESS_ENABLED = not DEBUG\n# COMPRESS_NODE_MODULES = \"/usr/local/lib/node_modules/\"\n\n\n'''\nThere are a set of settings that allow us to use CloudFront for s3 hosted\nfiles. We also use a separate bucket for static files, because Corpora\nneeds protected s3 files (e.g. recordings).\n'''\nENVIRONMENT_TYPE = os.environ['ENVIRONMENT_TYPE']\n\nCOLLECTFAST_CACHE = 'collectfast'\nCOLLECTFAST_THREADS = 10\nif ENVIRONMENT_TYPE != 'local':\n AWS_PRELOAD_METADATA = True\n AWS_STATIC_BUCKET_NAME = os.environ['AWS_STATIC_BUCKET']\n AWS_STATIC_DEFAULT_ACL = 'public-read'\n COMPRESS_URL = 'https://'+os.environ['AWS_CLOUDFRONT_CNAME']+'/'\n AWS_S3_CUSTOM_DOMAIN = os.environ['AWS_CLOUDFRONT_CNAME']\n STATIC_URL = COMPRESS_URL\n COMPRESS_STORAGE = 'corpora.storage.CachedS3BotoStorage'\n STATICFILES_STORAGE = 'corpora.storage.CachedS3BotoStorage'\n COLLECTFAST_STRATEGY = 'collectfast.strategies.boto3.Boto3Strategy'\n\n # AWS_IS_GZIPPED = True\n CORS_ORIGIN_WHITELIST = CORS_ORIGIN_WHITELIST + \\\n ('https://' + os.environ['AWS_CLOUDFRONT_CNAME'],)\n\nelse:\n COLLECTFAST_STRATEGY = 'collectfast.strategies.filesystem.FileSystemStrategy'\n\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(asctime)s -- %(message)s'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'stream': sys.stdout,\n 'formatter': 'verbose',\n },\n 'testconsole': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'stream': sys.stdout,\n 'formatter': 'verbose',\n },\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': '../../logs/django.log',\n 'formatter': 'verbose',\n 'maxBytes': 1024 * 1000, # 1kb * X\n 'backupCount': 20,\n },\n 'celery': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': '../../logs/celery.log',\n 'formatter': 'simple',\n 'maxBytes': 1024 * 1000, # 500 kb,\n 'backupCount': 20,\n }\n },\n 'loggers': {\n 'django.test': {\n 'handlers': ['testconsole'],\n 'level': 'DEBUG',\n 'propogate': True\n },\n 'django.request': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False,\n },\n 'corpora': {\n 'handlers': ['file'],\n 'level': 'DEBUG',\n 'propogate': True\n },\n 'celery': {\n 'handlers': ['celery', 'console'],\n 'level': 'DEBUG',\n 'propogate': True\n }\n }\n}\n\n\n# API Stuff\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticatedOrReadOnly',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n 'corpora.authentication.StagingTokenAuthentication',\n 'reo_api.authentication.ApplicationAPITokenAuthentication',\n 'reo_api.authentication.UserAPITokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework_simplejwt.authentication.JWTAuthentication',\n ],\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'reo_api.renderers.PlainTextRenderer',\n 'reo_api.renderers.WebVTTRenderer',\n ),\n 'PAGE_SIZE': 10,\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'DEFAULT_THROTTLE_CLASSES': [\n 'rest_framework.throttling.AnonRateThrottle',\n 'corpora.throttle.CorporaUserRateThrottle',\n 'rest_framework.throttling.ScopedRateThrottle',\n ],\n 'DEFAULT_THROTTLE_RATES': {\n 'listen': '200/day',\n 'sentence': '500/day',\n 'anon': '50/day',\n 'user': '1000/day',\n }\n}\n\n\n# CELERY #\nif 'local' in ENV_TYPE:\n CELERY_BROKER_URL = 'amqp://%s:%s@%s:%s/%s' % (\n os.environ['CELERY_USER'],\n os.environ['CELERY_PASSWORD'],\n os.environ['CELERY_HOST'],\n os.environ['CELERY_PORT'],\n os.environ['CELERY_VHOST'])\n CELERY_RESULT_BACKEND= 'cache+memcached://%s:%s/' % ( os.environ['DJANGO_MEMCACHED_IP'], os.environ['DJANGO_MEMCACHED_PORT'])\nelse:\n REDIS_URL = os.environ['AWS_ELASTICACHE_URL']\n CELERY_BROKER_URL = f\"redis://{REDIS_URL}:6379/0\"\n CELERY_RESULT_BACKEND = f\"redis://{REDIS_URL}:6379/0\"\n CELERY_BROKER_TRANSPORT_OPTIONS = {\n 'fanout_prefix': True,\n 'fanout_patterns': True,\n }\n\nCELERY_TASK_DEFAULT_QUEUE = f\"celery_{ENV_TYPE}\"\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_EVENT_QUEUE_PREFIX: f\"celery:{ENV_TYPE}\"\nCELERY_TASK_RESULT_EXPIRES = 21600 # 6 hours.\n\n\n# CACHE\nif 'local' in ENV_TYPE:\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': '%s:%s' % (\n os.environ['DJANGO_MEMCACHED_IP'],\n os.environ['DJANGO_MEMCACHED_PORT']),\n \"KEY_PREFIX\": f\"{PROJECT_NAME}:{ENV_TYPE}\",\n 'TIMEOUT': 300,\n },\n 'collectfast': {\n 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',\n 'LOCATION': '/tmp/django_cache',\n },\n }\nelse:\n if 'stag' in ENV_TYPE:\n timeout = 180\n else:\n timeout = 300\n\n CACHES = {\n 'default': {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': f\"redis://{REDIS_URL}/0\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\"\n },\n \"KEY_PREFIX\": f\"{PROJECT_NAME}:{ENV_TYPE}\",\n 'TIMEOUT': 300,\n },\n 'collectfast': {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': f\"redis://{REDIS_URL}/0\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\"\n },\n \"KEY_PREFIX\": f\"{PROJECT_NAME}:cf:{ENV_TYPE}\",\n 'TIMEOUT': timeout,\n },\n }\n\n\n\n# These may be required if caching the entire site.\n# CACHE_MIDDLEWARE_ALIAS\n# CACHE_MIDDLEWARE_SECONDS\n# CACHE_MIDDLEWARE_KEY_PREFIX\n\n\n\n### ERROS WITH UTC = FALSE!\n# CELERY_TIMEZONE = TIME_ZONE\n# CELERY_ENABLE_UTC = False\n\n\n# DJANGO ANALYTICAL\nGOOGLE_ANALYTICS_PROPERTY_ID = 'UA-114290321-1'\n# GOOGLE_ANALYTICS_TRACKING_STYLE = \\\n# analytical.templatetags.google_analytics.SCOPE_TRACK_MULTIPLE_DOMAINS\nGOOGLE_ANALYTICS_DISPLAY_ADVERTISING = True\nGOOGLE_ANALYTICS_SITE_SPEED = True\nGOOGLE_ANALYTICS_ANONYMIZE_IP = True\n# FACEBOOK_PIXEL_ID = '158736294923584'\n# INTERCOM_APP_ID =''\n\n\n# Transcode API Stuff\nTRANSCODE_API_TOKEN = os.environ['TRANSCODE_API_TOKEN']\n\n\n### Vue frontend Config ###\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'CACHE': not DEBUG,\n 'BUNDLE_DIR_NAME': 'vue_bundles/', # must end with slash\n 'STATS_FILE': os.path.join(\n PROJECT_NAME,\n 'static',\n 'vue_bundles',\n 'webpack-stats.json'\n ),\n 'POLL_INTERVAL': 0.1,\n 'TIMEOUT': None,\n 'IGNORE': [r'.+\\.hot-update.js', r'.+\\.map']\n }\n}\n\nif 'local' not in ENV_TYPE:\n\n sentry_sdk.init(\n dsn=\"https://ba4e6615d22748dc98442c7c29696e87@o232677.ingest.sentry.io/5458955\",\n integrations=[DjangoIntegration()],\n traces_sample_rate=0.1,\n environment=ENV_TYPE,\n # If you wish to associate users to errors (assuming you are using\n # django.contrib.auth) you may enable sending PII data.\n send_default_pii=True\n )\n\n\n# JWT Messages\nSIMPLE_JWT = {\n 'ACCESS_TOKEN_LIFETIME': timedelta(hours=3),\n 'REFRESH_TOKEN_LIFETIME': timedelta(weeks=1),\n 'ALGORITHM': 'RS256',\n 'SIGNING_KEY': os.environ['JWT_AUTH_SSL_PRIVATE'],\n 'VERIFYING_KEY': os.environ['JWT_AUTH_SSL_PUBLIC'],\n 'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.SlidingToken',),\n}\n","sub_path":"corpora/corpora/base_settings.py","file_name":"base_settings.py","file_ext":"py","file_size_in_byte":18457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"83815973","text":"#等待界面元素出现\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import wait, expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass TestCase():\n def __init__(self):\n self.wd = webdriver.Chrome()\n #self.wd.implicitly_wait(10)\n\n\n def test_baidu(self):\n self.wd.get('https://www.baidu.com')\n element = self.wd.find_element_by_id('kw')\n\n element.send_keys('白月黑羽\\n')\n\n # id 为 1 的元素 就是第一个搜索结果\n element = self.wd.find_element_by_id('1')\n\n # 打印出 第一个搜索结果的文本字符串\n print(element.text)\n\n def test_selenium_alert(self):\n self.wd.get('https://www.selenium.dev/zh-cn/documentation/webdriver/js_alerts_prompts_and_confirmations/')\n # Click the link to activate the alert\n self.wd.find_element(By.LINK_TEXT, \"查看样例警告框\").click()\n wait = WebDriverWait(self.wd,10)\n # Wait for the alert to be displayed and store it in a variable\n alert = wait.until(expected_conditions.alert_is_present())\n # Store the alert text in a variable\n text = alert.text\n print(text)\n # Press the OK button\n alert.accept()\n\n def test_selenium_confirm(self):\n self.wd.get('https://www.selenium.dev/zh-cn/documentation/webdriver/js_alerts_prompts_and_confirmations/')\n # Click the link to activate the alert\n self.wd.find_element(By.LINK_TEXT, \"查看样例确认框\").click()\n wait = WebDriverWait(self.wd,10)\n # Wait for the alert to be displayed and store it in a variable\n wait.until(expected_conditions.alert_is_present())\n # Store the alert text in a variable\n alert = self.wd.switch_to.alert\n text = alert.text\n print(text)\n # Press the OK button\n #点击确认按钮\n # alert.accept()\n #点击取消按钮\n alert.dismiss()\n\nif __name__ == '__main__':\n # TestCase().test_selenium_alert()\n TestCase().test_selenium_confirm()","sub_path":"webselenium.py","file_name":"webselenium.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"47645863","text":"from random import random\nfrom uuid import uuid4\n\nfrom procgen.species.culture import get_culture\nfrom procgen.species.aspects.type_aspects import get_species_type\nfrom procgen.species.aspects.preferences_aspects import get_world_preference\nfrom procgen.species.aspects.trait_aspects import get_traits\n\nclass Species:\n def __init__(self,\n basis=None,\n modifier=None,\n preferences=None,\n aspects=None):\n if aspects is None:\n aspects = []\n self.uuid = uuid4()\n self.culture = None\n self.name = None\n self.awakened = False\n self.english_name = None\n self.basis = basis\n self.modifier = modifier\n self.preferences = preferences\n self.aspects = aspects\n if modifier is basis:\n raise ValueError\n\n @property\n def description(self):\n if self.awakened:\n output = \"The {0} are {1} {2}.\".format(\n self.name if random() > 0.75 else self.english_name,\n self.modifier.adjective,\n self.basis.plural\n )\n output += \"\\n\\t\" + \"They prefer {0} worlds.\".format(self.preferences.description)\n output += \"\\n\\tTheir culture interests include:\"\n for item in self.culture.themes:\n output += '\\n\\t\\t' + item\n else:\n output = \"These organisms are {0} {1}\".format(\n self.modifier.adjective,\n self.basis.plural\n )\n output += \"\\n\\t\" + \"They prefer {0} worlds.\".format(self.preferences.description)\n output += \"\\n\\tTraits:\"\n for trait in self.aspects:\n output += \"\\n\\t\\t\" + trait.description\n return output\n\n def elevate(self, culture=None):\n if culture:\n self.culture = culture\n else:\n self.culture = get_culture()\n self.name, self.english_name = self.culture.language.get_proper_name()\n self.awakened = True\n\n\ndef get_species():\n type_a = get_species_type()\n type_b = get_species_type()\n while type_a.conflicts_with(type_b):\n type_b = get_species_type()\n return Species(\n basis=type_a,\n modifier=type_b,\n preferences=get_world_preference(),\n aspects=get_traits()\n )\n\n\nif __name__ == \"__main__\":\n for _ in range(10):\n\n type_a = get_species_type()\n type_b = get_species_type()\n while type_a.conflicts_with(type_b):\n type_b = get_species_type()\n species = Species(\n basis=type_a,\n modifier=type_b,\n preferences=get_world_preference(),\n aspects=get_traits()\n )\n\n if random() <= 0.2:\n species.elevate()\n print(species.description)","sub_path":"procgen/species/species.py","file_name":"species.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"136539948","text":"# Imports\nimport os\nimport json\nimport argparse\nimport pandas as pd\nfrom tqdm import tqdm\nimport torch\n#import torch.nn as nn\n#import torch.nn.functional as F\nfrom torch.utils.data import DataLoader, Dataset\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_auc_score\nfrom model_str import LM_dataset, LM_model\n\n# Test function\ndef test_f(args):\n # Load holdout data\n model_test = pd.read_pickle(args.path_model_holdout)\n \n # Instantiate dataclass\n test_dataset = LM_dataset(model_test)\n \n # Instantiate dataloader\n test_dl = DataLoader(test_dataset, batch_size = args.batch_size, shuffle = False)\n \n # Instantiate model and load model weights\n str_vars = model_test.columns.drop(['full_denial'])\n num_str_vars = len(str_vars)\n\n device = torch.device(args.gpu_id)\n model = LM_model(args, num_str_vars)\n model.load_state_dict(torch.load(args.path_model)) #, map_location = 'cuda:0'))\n model.to(device)\n model.eval()\n \n # Test procedure\n Y_predicted_score = []\n Y_predicted_binary = []\n Y_ground_truth = []\n\n for X_str, Y in tqdm(test_dl, desc = 'Testing'):\n # Move to cuda\n X_str = X_str.cuda(device)\n Y = Y.cuda(device)\n \n # Compute predictions and append\n with torch.no_grad():\n pred_batch_score = model(X_str).view(-1)\n pred_batch_binary = torch.round(pred_batch_score.view(pred_batch_score.shape[0]))\n Y_predicted_score += pred_batch_score.tolist()\n Y_predicted_binary += pred_batch_binary.tolist()\n Y_ground_truth += Y.tolist()\n \n return Y_predicted_score, Y_predicted_binary, Y_ground_truth\n \ndef main():\n # Argument parsing\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_dir', default = None, type = str, required = True,\n help = 'input data folder')\n parser.add_argument('--work_dir', default = None, type = str, required = True,\n help = 'work folder')\n parser.add_argument('--batch_size', default = None, type = int, required = True,\n help = 'train batch size')\n parser.add_argument('--gpu_id', default = None, type = int, required = True,\n help = 'gpu id for testing')\n args = parser.parse_args()\n args.dropout = 0.4\n \n # Path initialization\n args.path_model_holdout = os.path.join(args.input_dir, 'holdout_model.pkl')\n args.path_model = os.path.join(args.work_dir, 'model.pt')\n args.path_results_train = os.path.join(args.work_dir, 'train_results.json')\n args.path_results_full = os.path.join(args.work_dir, 'full_results.json')\n \n # Compute predictions\n Y_predicted_score, Y_predicted_binary, Y_ground_truth = test_f(args)\n \n # Compute and print metrics\n tn, fp, fn, tp = confusion_matrix(Y_ground_truth, Y_predicted_binary).ravel()\n precision_model = tp / (tp + fp)\n recall_model = tp / (tp + fn)\n f1_model = 2 * (precision_model * recall_model) / (precision_model + recall_model)\n auc_model = roc_auc_score(Y_ground_truth, Y_predicted_score)\n\n print(f'\\nPrecision model: {precision_model:.4f}')\n print(f'Recall model: {recall_model:.4f}')\n print(f'F1 model: {f1_model:.4f}')\n print(f'AUC model: {auc_model:.4f}\\n')\n \n # Apend results to results json file\n with open(args.path_results_train, 'r') as fr:\n results = json.load(fr)\n \n results['Y_test_ground_truth'] = Y_ground_truth\n results['Y_test_prediction_scores'] = Y_predicted_score\n results['Y_test_prediction_binary'] = Y_predicted_binary\n \n with open(args.path_results_full, 'w') as fw:\n results = json.dump(results, fw) \n \nif __name__ == \"__main__\":\n main() \n\n","sub_path":"04_models/01_distributed/01_str_only/test_str.py","file_name":"test_str.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"496130878","text":"import uuid\nfrom statistics import mean, median\n\nfrom celery import shared_task\nfrom django.apps import apps\n\nfrom grandchallenge.challenges.models import Challenge\nfrom grandchallenge.components.models import ComponentInterfaceValue\nfrom grandchallenge.evaluation.utils import Metric, rank_results\n\n\n@shared_task\ndef set_evaluation_inputs(*_, evaluation_pk):\n \"\"\"\n Sets the inputs to the Evaluation for a algorithm submission.\n\n If all of the `AlgorithmEvaluation`s for this algorithm `Submission` are\n successful this will set the inputs to the `Evaluation` job and schedule\n it. If any of the `AlgorithmEvaluation`s are unsuccessful then the\n `Evaluation` will be marked as Failed.\n\n Parameters\n ----------\n evaluation_pk\n The primary key of the evaluation.Evaluation object\n \"\"\"\n Evaluation = apps.get_model( # noqa: N806\n app_label=\"evaluation\", model_name=\"Evaluation\"\n )\n\n evaluation = Evaluation.objects.get(pk=evaluation_pk)\n\n unsuccessful_jobs = evaluation.submission.algorithmevaluation_set.exclude(\n status=Evaluation.SUCCESS\n ).count()\n\n if unsuccessful_jobs:\n evaluation.update_status(\n status=evaluation.FAILURE,\n output=(\n f\"The algorithm failed to execute on {unsuccessful_jobs} \"\n f\"images.\"\n ),\n )\n else:\n evaluation.inputs.set(\n ComponentInterfaceValue.objects.filter(\n evaluation_algorithmevaluations_as_output__submission=evaluation.submission\n )\n )\n evaluation.signature.apply_async()\n\n\ndef filter_by_creators_most_recent(*, evaluations):\n # Go through the evaluations and only pass through the most recent\n # submission for each user\n users_seen = set()\n filtered_qs = []\n\n for e in evaluations:\n creator = e.submission.creator\n\n if creator not in users_seen:\n users_seen.add(creator)\n filtered_qs.append(e)\n\n return filtered_qs\n\n\ndef filter_by_creators_best(*, evaluations, ranks):\n best_result_per_user = {}\n\n for e in evaluations:\n creator = e.submission.creator\n\n try:\n this_rank = ranks[e.pk]\n except KeyError:\n # This result was not ranked\n continue\n\n if creator not in best_result_per_user or (\n this_rank < ranks[best_result_per_user[creator].pk]\n ):\n best_result_per_user[creator] = e\n\n return [r for r in best_result_per_user.values()]\n\n\n@shared_task # noqa: C901\ndef calculate_ranks(*, challenge_pk: uuid.UUID): # noqa: C901\n challenge = Challenge.objects.get(pk=challenge_pk)\n display_choice = challenge.evaluation_config.result_display_choice\n score_method_choice = challenge.evaluation_config.scoring_method_choice\n\n Evaluation = apps.get_model( # noqa: N806\n app_label=\"evaluation\", model_name=\"Evaluation\"\n )\n\n metrics = (\n Metric(\n path=challenge.evaluation_config.score_jsonpath,\n reverse=(\n challenge.evaluation_config.score_default_sort\n == challenge.evaluation_config.DESCENDING\n ),\n ),\n *[\n Metric(\n path=col[\"path\"],\n reverse=col[\"order\"] == challenge.evaluation_config.DESCENDING,\n )\n for col in challenge.evaluation_config.extra_results_columns\n ],\n )\n\n if score_method_choice == challenge.evaluation_config.ABSOLUTE:\n\n def score_method(x):\n return list(x)[0]\n\n elif score_method_choice == challenge.evaluation_config.MEAN:\n score_method = mean\n elif score_method_choice == challenge.evaluation_config.MEDIAN:\n score_method = median\n else:\n raise NotImplementedError\n\n valid_evaluations = (\n Evaluation.objects.filter(\n submission__challenge=challenge,\n published=True,\n status=Evaluation.SUCCESS,\n )\n .order_by(\"-created\")\n .select_related(\"submission__creator\")\n .prefetch_related(\"outputs__interface\")\n )\n\n if display_choice == challenge.evaluation_config.MOST_RECENT:\n valid_evaluations = filter_by_creators_most_recent(\n evaluations=valid_evaluations\n )\n elif display_choice == challenge.evaluation_config.BEST:\n all_positions = rank_results(\n evaluations=valid_evaluations,\n metrics=metrics,\n score_method=score_method,\n )\n valid_evaluations = filter_by_creators_best(\n evaluations=valid_evaluations, ranks=all_positions.ranks\n )\n\n final_positions = rank_results(\n evaluations=valid_evaluations,\n metrics=metrics,\n score_method=score_method,\n )\n\n evaluations = Evaluation.objects.filter(submission__challenge=challenge)\n\n _update_evaluations(\n evaluations=evaluations, final_positions=final_positions\n )\n\n\ndef _update_evaluations(*, evaluations, final_positions):\n Evaluation = apps.get_model( # noqa: N806\n app_label=\"evaluation\", model_name=\"Evaluation\"\n )\n\n for e in evaluations:\n try:\n rank = final_positions.ranks[e.pk]\n rank_score = final_positions.rank_scores[e.pk]\n rank_per_metric = final_positions.rank_per_metric[e.pk]\n except KeyError:\n # This result will be excluded from the display\n rank = 0\n rank_score = 0.0\n rank_per_metric = {}\n\n e.rank = rank\n e.rank_score = rank_score\n e.rank_per_metric = rank_per_metric\n\n Evaluation.objects.bulk_update(\n evaluations, [\"rank\", \"rank_score\", \"rank_per_metric\"]\n )\n","sub_path":"app/grandchallenge/evaluation/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"617970844","text":"class Solution:\n def minDeletions(self, s: str) -> int:\n res, count, st=0, collections.Counter(s), set()\n for v in count.values():\n while v>0 and v in st:\n v-=1\n res+=1\n st.add(v)\n \n return res\n\n","sub_path":"python/minimum-deletions-to-make-character-frequencies-unique.py","file_name":"minimum-deletions-to-make-character-frequencies-unique.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"121238646","text":"'''\r\n@author: Quyen Doan, https://github.com/qdoan1651/DevMathPython\r\n@file: cdr/prof_dunn/retrieve_assignments_cgid/retrieve_assignments_cgid.py\r\n@desc: Extract assessment and homework assignments CGID\r\n'''\r\nimport logging, time, os\r\nfrom selenium import webdriver\r\nfrom mtmf.pages.instructor_home_page import InstructorHomePage\r\nfrom mtmf.pages import login_page\r\nfrom myutils import utils_files_io\r\nfrom new_course_editor import assignment\r\nfrom mtmf.retrieve_assignments_cgid import retrieve_assessment_cgid\r\nfrom mtmf.retrieve_assignments_cgid import retrieve_homework_cgid\r\n\r\ndef retrieve_assignments_cgid(course_key):\r\n driver = webdriver.Chrome('C:/Workspace/Tools/drivers/chromedriver.exe')\r\n driver.set_window_size(1420, 1000)\r\n \r\n # Login into Mindtap Math Foundation\r\n print('Logging into MTMF Prod...')\r\n login_page.instructor_login_mindtap_prod(driver)\r\n \r\n # Launch course\r\n home_page = InstructorHomePage(driver)\r\n home_page.launch_course(course_key); time.sleep(3)\r\n \r\n assignments_list = utils_files_io.read_list_from_file('../extract_assignments_names/' + course_key + '_assignments_working.txt')\r\n assignments_cgid_info = []\r\n for assignment in assignments_list:\r\n assignment_name, assignment_type = assignment.split('; ')\r\n if assignment_type in ['prerequisite', 'test', 'quiz']:\r\n print('Retrieving question CGID for assessment assignment \"{}\"...'.format(assignment_name))\r\n info = retrieve_assessment_cgid.retrieve_questions_cgid(driver, assignment_name)\r\n jo = {'name': assignment_name, 'type': assignment_type, 'content': info}\r\n assignments_cgid_info.append(jo)\r\n else: \r\n info = retrieve_homework_cgid.retrieve_questions_cgid(driver, assignment_name)\r\n jo = {'name': assignment_name, 'type': assignment_type, 'content': info}\r\n assignments_cgid_info.append(jo)\r\n \r\n print('Write question CGID from assignment \"{}\" to disk...'.format(assignment_name))\r\n utils_files_io.write_json_to_file(assignments_cgid_info, course_key + '_cgid.json')\r\n\r\nif __name__ == '__main__':\r\n log_file = 'C:/Workspace/Sandbox/log.txt'\r\n if os.path.isfile(log_file): os.remove(log_file)\r\n logging.basicConfig(filename = log_file, level=logging.INFO)\r\n # logging.disable(logging.INFO)\r\n \r\n# retrieve_assignments_cgid('DEPN-T02N-5NPR')\r\n# retrieve_assignments_cgid('DEPN-W6RN-586S')\r\n retrieve_assignments_cgid('DEPP-L1JN-G3X9')\r\n","sub_path":"DevMathPython/cdr/prof_dunn_courses_cgid/part2_retrieve_assignments_cgid/retrieve_assignments_cgid.py","file_name":"retrieve_assignments_cgid.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"302161041","text":"#!/usr/bin/env python3\n# coding:utf-8\nimport logging\nfrom argparse import ArgumentParser\nfrom eterna.recordings import show_nmap_report_recording\n\nfrom ..vnmap.core import VNmap\n\n\nclass ArgumentError(Exception):\n pass\n\n\ndef vnmap(option):\n logger = logging.getLogger(\"vnmap\")\n if not option.target:\n logger.error(\"No target is set. Plz check your input.\")\n raise ArgumentError\n\n vnmap = VNmap(option.poolsize)\n vnmap.start()\n\n nmap_arguments = option.arguments + f\" -p{option.ports}\"\n vnmap.start_execute_nmap(\n option.target,\n nmap_arguments,\n option.timeout,\n show_nmap_report_recording\n )\n vnmap.shutdown()\n\n\ndef default_handler(*vargs, **kwargs):\n raise ArgumentError\n\ndef cli_entry():\n parser = ArgumentParser()\n parser.add_argument(\"-o\", \"--output\", help=\"set format type for output.\", choices=[\n \"json\", \"xml\", \"raw\"\n ], default=\"raw\")\n parser.set_defaults(handler=default_handler)\n\n subparsers = parser.add_subparsers()\n\n # nmap cli parser.\n nmap_parser: ArgumentParser = subparsers.add_parser(\"vnmap\")\n nmap_parser.add_argument(\"-t\", \"--target\", help='target for scan, splited by comma \",\".')\n nmap_parser.add_argument(\"-s\", \"--poolsize\", default=10, help=\"how many instances of nmap run at the same time.\")\n nmap_parser.add_argument(\"-p\", \"--ports\", default=\"1-65535\", help=\"what ports will be scan!\")\n nmap_parser.add_argument(\"--timeout\", default=600, help=\"timeout for each scaning thread.\")\n nmap_parser.add_argument(\"-a\", \"--arguments\", default='-sV', help=\"extra argument for nmap\")\n nmap_parser.set_defaults(handler=vnmap)\n\n # vdnsmap parser\n\n option = parser.parse_args()\n\n try:\n option.handler(option)\n except ArgumentError:\n parser.print_help()\n","sub_path":"survee/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"538622304","text":"from django.conf.urls import url, include\nfrom user.views import *\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib import admin\nfrom django.urls import path\n\nurlpatterns = [\n path(r'', include('django.contrib.auth.urls')),\n url(r'^register/$', RegisterView.as_view(), name='register'),\n url(r'^login/$', LoginView.as_view(), name='login'),\n url(r'^tracks/$', tracks, name='tracks'),\n url(r'^new_track/$', new_track, name='new_track'),\n url(r'^edit_track/$', edit_track, name='edit_track'),\n url(r'^delete_track/$', delete_track, name='delete_track'),\n url(r'^default_enhancers/$', default_enhancers, name='default_enhancers'),\n url(r'^default_tads/$', default_tads, name='default_tads'),\n url(r'^default_cnvs/$', default_cnvs , name='default_cnvs')\n]","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"438742858","text":"from __future__ import absolute_import, division, print_function\nimport utool\nprint, print_, printDBG, rrr, profile = utool.inject(__name__, '[results]', DEBUG=False)\nfrom ibeis.dev import results_organizer\nfrom ibeis.dev import results_analyzer\n\n\nclass AllResults(utool.DynStruct):\n \"\"\"\n Data container for all compiled results\n \"\"\"\n def __init__(allres):\n super(AllResults, allres).__init__(child_exclude_list=['qaid2_qres'])\n allres.ibs = None\n allres.qaid2_qres = None\n allres.allorg = None\n allres.cfgstr = None\n\n def get_orgtype(allres, orgtype):\n orgres = allres.allorg.get(orgtype)\n return orgres\n\n def get_qres(allres, qaid):\n return allres.qaid2_qres[qaid]\n\n def get_orgres_desc_match_dists(allres, orgtype_list):\n return results_analyzer.get_orgres_desc_match_dists(allres, orgtype_list)\n\n def get_orgres_annotationmatch_scores(allres, orgtype_list):\n return results_analyzer.get_orgres_annotationmatch_scores(allres, orgtype_list)\n\n\ndef init_allres(ibs, qaid2_qres):\n allres_cfgstr = ibs.qreq.get_cfgstr()\n print('Building allres')\n allres = AllResults()\n allres.qaid2_qres = qaid2_qres\n allres.allorg = results_organizer.organize_results(ibs, qaid2_qres)\n allres.cfgstr = allres_cfgstr\n allres.ibs = ibs\n return allres\n","sub_path":"ibeis/dev/results_all.py","file_name":"results_all.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"247777868","text":"import datetime\nimport numpy as np\nimport keras\n\nfrom data_handler.api_data import IndicatorData\nfrom common.model_loader import ModelLoader\nfrom algorithm.exception import ParamError, DataError\n\n\nclass OneDayLSTM(object):\n def __init__(self, asset: dict):\n self.model = asset['model']\n self.scaler = asset['scaler']\n self.system_code = asset['system_code']\n self.calc_code = asset['calc_code']\n\n def forecast(self, target_date):\n data = self._get_data(target_date)\n result = self._get_result(data)\n return result\n\n def _get_data(self, target_date: datetime):\n start_time = target_date + datetime.timedelta(days=-5)\n end_time = target_date + datetime.timedelta(days=-1)\n\n ds = IndicatorData(self.system_code, self.calc_code)\n raw_data = ds.get_data(start_time, end_time, 'day')\n results = raw_data['data'][0]['result']\n\n data = []\n for item in results:\n if not isinstance(item['value'], (int, float)):\n raise DataError('History data type error')\n data.append(item['value'])\n if len(data) != 5:\n raise DataError('History data not enough')\n\n return data\n\n def _get_result(self, data):\n input_x = self.scaler.transform(np.asarray(data).reshape((-1, 1)))\n predict_y = self.model.predict(input_x.reshape((1, -1, 1)))\n result = self.scaler.inverse_transform(predict_y.reshape(-1, 1))\n return result.tolist()[0][0]\n\n\ndef call(*args, **kwargs):\n for p in ['param', 'model_url']:\n if p not in kwargs.keys():\n raise ParamError('Missing required parameter in the JSON body: \\'%s\\'' % p)\n\n date = kwargs['param'].get('date')\n if not date:\n raise ParamError('Required parameter \\'date\\' not found in \\'param\\'')\n\n keras.backend.clear_session()\n asset = ModelLoader.load(kwargs['model_url'])\n forecaster = OneDayLSTM(asset=asset)\n date = datetime.datetime.strptime(date, '%Y-%m-%d')\n result = forecaster.forecast(target_date=date)\n return result\n","sub_path":"algorithm/forecast/power/one_day_lstm.py","file_name":"one_day_lstm.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"491813435","text":"n=int(input())\nfor i in range(n):\n a=int(input())\n list=[]\n for j in range(a+1):\n if a&j not in list:\n list.append(a&j)\n list.sort()\n print(list)\n for j in range(len(list)-1):\n print(list[len(list)-1-j],end=' ')\n print(list[0])\n","sub_path":"Code/CodeRecords/2669/60647/296727.py","file_name":"296727.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"546537737","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.10.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport matplotlib\n# %%\nimport pandas as pd\n\n# %matplotlib inline\n# %config InlineBackend.figure_format = 'png' \nmatplotlib.rcParams['figure.figsize'] = (24.0, 10.0)\nfontsize = 12\n\nmatplotlib.rcParams['font.size'] = fontsize\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nrc={'font.size': fontsize, 'axes.labelsize': fontsize+2, 'legend.fontsize': fontsize+2, \n 'axes.titlesize': fontsize+2, 'xtick.labelsize': fontsize+2, 'ytick.labelsize': fontsize+2}\nsns.set(rc=rc)\nsns.set_style('darkgrid')\nimport numpy as np\nimport os\nimport os.path as osp\n\n# %load_ext autoreload\n# %autoreload 2\n\nHOME = '/home/fattouhm/'\n\nos.sys.path.insert(0, osp.join(HOME, 'projects/ieeg/visualization'))\nfrom exp_plot import create_results_df as collect_task_results\n\n# %%\nprint(os.sys.path)\n\n# %%\nEXPERIMENTS = [osp.join(HOME, exp) for exp in ['500epochs_seed1', '500epochs_seed2', '100epochs_seed1', '100epochs_seed2']]\n# ORDER = ['SHALLOW', 'HYBRID','DEEP4', 'DEEP5_x2', 'DEEP5_x4', 'RNN']\nORDER = ['SHALLOW', 'HYBRID','DEEP4','RNN']\n\nEXPERIMENTS = ['500epochs_seed1', '500epochs_seed2', '100epochs_seed1', '100epochs_seed2']\n# EXPERIMENTS = ['500epochs_seed1']\nexperiments_dirs = [osp.join(HOME, exp) for exp in EXPERIMENTS] #, '100epochs_seed1', '100epochs_seed2']]\n\nTASKS = ['XPOS', 'XVEL']\n\n\n# %%\ndef load_results(experiments, tasks, experiments_dirs):\n results_list = []\n for task in tasks:\n for exp, exp_dir in zip(experiments, experiments_dirs):\n results_df = collect_task_results(osp.join(exp_dir, task, 'TRAIN'))\n results_df['exp'] = exp\n results_df['task'] = task\n results_list.append(results_df)\n\n return pd.concat(results_list)\n\n\n# %%\ndef average_seeds(df):\n new_df = pd.pivot_table(df, index=['sub', 'task', 'model'], columns='exp')\n new_df.columns = new_df.columns.droplevel(0)\n new_df['500epochs'] = new_df[['500epochs_seed1', '500epochs_seed2']].mean(axis=1)\n new_df['100epochs'] = new_df[['100epochs_seed1', '100epochs_seed2']].mean(axis=1)\n new_df.drop(['500epochs_seed1', '500epochs_seed2'], axis=1, inplace=True)\n new_df.drop(['100epochs_seed1', '100epochs_seed2'], axis=1, inplace=True)\n new_df.reset_index(inplace=True)\n \n return new_df.melt(id_vars=['sub', 'task', 'model'], value_name='corr')\n\n\n# %%\nresults_df = load_results(EXPERIMENTS, TASKS, experiments_dirs)\nresults_df.head()\n\n# %%\ng1 = sns.catplot(data=results_df, kind='point', x='exp', y='corr', row='task', col='model', col_order=ORDER, ci='sd', estimator=np.mean, capsize=0.05,)\nfor ax in g1.axes.flat:\n for label in ax.get_xticklabels():\n label.set_rotation(45)\n\ng1.set_titles(row_template = '{row_name}', col_template = '{col_name}')\ng1.set(xlabel='')\nplt.subplots_adjust(top=0.9)\ng1.fig.suptitle('Effect of # of Epochs & Seeds') # can also get the figure from plt.gcf()\n\n# %%\ng1.savefig(osp.join(HOME, 'final_epochs_seeds.png'))\n\n# %%\naveraged_results_df = average_seeds(results_df)\naveraged_results_df.head()\n\n# %%\ng2 = sns.catplot(data=averaged_results_df, kind='strip', x='task', y='corr', row='exp', col='model', col_order=ORDER, hue='sub')\n\nfor ax_idx, data in g2.facet_data():\n sns.pointplot(x='task', y='corr', data=data, ci='sd', estimator=np.mean, capsize=0.05, ax=g2.axes[ax_idx[0], ax_idx[1]], markers='D')\n \ng2.set(xlabel='')\ng2.set_titles(row_template = '{row_name}', col_template = '{col_name}')\nplt.subplots_adjust(top=0.9)\ng2._legend.set_title('Recording') \ng2.fig.suptitle('DA of each model for different tasks')\n\n# %%\ng2.savefig(osp.join(HOME, 'final_task_da.png'))\n\n# %%\ng3 = sns.catplot(data=averaged_results_df, kind='strip', x='model', y='corr', row='exp', order=ORDER, col='task', hue='sub', aspect=1.1, height=7)\n\nfor ax_idx, data in g3.facet_data():\n sns.pointplot(x='model', y='corr', data=data, ci='sd', estimator=np.mean, capsize=0.05,order=ORDER, ax=g3.axes[ax_idx[0], ax_idx[1]], markers='D')\n \ng3.set(xlabel='')\nplt.subplots_adjust(top=0.93)\ng3.set_titles(row_template = '{row_name}', col_template = '{col_name}')\ng3._legend.set_title('Recording') \ng3.fig.suptitle('DA of each task for different models')\n\n# %%\ng3.savefig(osp.join(HOME, 'final_model_da.png'))\n","sub_path":"notebooks/vis_exp.py","file_name":"vis_exp.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"177011805","text":"import os\nfrom TAP.Simple import *\nimport kludge101\n\nplan(5)\n\nrepodir = os.path.abspath(\"t/101repo\")\n\ndef path_ok(path, comment):\n result = kludge101.checkpath(path)\n eq_ok(kludge101.checkpath(path), os.path.join(repodir, path), comment)\n\ndef path_not_ok(path, comment):\n is_ok(kludge101.checkpath(path), None, comment)\n\n\nos.environ[\"repo101dir\"] = repodir\n\npath_ok( \"contributions/regular/extractor.py\",\n \"regular path is fine\")\n\npath_ok( \"contributions/nonexistent/extractor.py\",\n \"path to nonexistent file is also fine\")\n\npath_not_ok(\"contributions/symlink/extractor.py\",\n \"symlinked path is dangerous\")\n\npath_not_ok(\"contributions/symlink/nonexistent/extractor.py\",\n \"nonexistent path with symlink is also dangerous\")\n\npath_not_ok(\"../gitdeps/user/malicious/extractor.py\",\n \"non-101repo path is dangerous\")\n","sub_path":"101worker/libraries/kludge101/t/01_checkpath.t.py","file_name":"01_checkpath.t.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"249764181","text":"import os\nimport nose\nimport sys\n\nfrom nose.tools import assert_raises\n\nimport ciliwung.pre.from_poly as from_poly\n\nproject_dir = os.environ[\"IRFLOOD\"]\n\nclass TestSimpleTSH():\n def setUp(self):\n prepath = \"ciliwung/pre/test/data/simple\"\n self.files = {\n \"node_txt\": os.path.join(project_dir, prepath, \"box.1.txt\"),\n \"poly_input\": os.path.join(project_dir, prepath, \"box.poly\"),\n \"poly\": os.path.join(project_dir, prepath, \"box.1.poly\"),\n \"ele\": os.path.join(project_dir, prepath, \"box.1.ele\"),\n \"neigh\": os.path.join(project_dir, prepath, \"box.1.neigh\"),\n \"node\": os.path.join(project_dir, prepath, \"box.1.node\"),\n \"edge\": os.path.join(project_dir, prepath, \"box.1.edge\"),\n \"bbt\": os.path.join(project_dir, prepath, \"box_01.bbt\"),\n \"bbx\": os.path.join(project_dir, prepath, \"box_01.bbx\"),\n }\n self.f_dem_elevation = \"/home/somat/ir_flood/data/dem2000/jakDEM_150630.tif\"\n\n def tearDown(self):\n pass\n\n def test_input_files(self):\n assert os.path.isfile(self.files[\"poly_input\"])\n\n def test_execute_triangle_convert_poly(self):\n f_node, f_ele, f_neigh, f_edge = from_poly.convert(self.files[\"poly_input\"])\n assert f_node == self.files[\"node\"]\n assert f_ele == self.files[\"ele\"]\n assert f_neigh == self.files[\"neigh\"]\n assert f_edge == self.files[\"edge\"]\n assert os.path.isfile(self.files[\"poly\"])\n assert os.path.isfile(self.files[\"ele\"])\n assert os.path.isfile(self.files[\"neigh\"])\n assert os.path.isfile(self.files[\"node\"])\n assert os.path.isfile(self.files[\"edge\"])\n\n def test_execute_triangle_not_exist_poly_input(self):\n assert_raises(IOError, from_poly.convert, \"box.1.txt\")\n\n def test_execute_triangle_using_relative_poly_input(self):\n f_node, f_ele, f_neigh, f_edge = from_poly.convert(\"data/simple/box.poly\")\n assert f_node == self.files[\"node\"]\n assert f_ele == self.files[\"ele\"]\n assert f_neigh == self.files[\"neigh\"]\n assert f_edge == self.files[\"edge\"]\n assert os.path.isfile(self.files[\"poly\"])\n assert os.path.isfile(self.files[\"ele\"])\n assert os.path.isfile(self.files[\"neigh\"])\n assert os.path.isfile(self.files[\"node\"])\n assert os.path.isfile(self.files[\"edge\"])\n\n def test_fill_elevation_and_manning(self):\n f_node_txt = from_poly.fill_elevation_and_manning(self.files[\"node\"],\n f_dem_elevation=self.f_dem_elevation)\n assert f_node_txt == self.files[\"node_txt\"]\n assert os.path.isfile(self.files[\"node_txt\"])\n\n def test_simple_convert_to_tsh(self):\n f_tsh = from_poly.create_tsh(\n self.files[\"node_txt\"],\n self.files[\"ele\"],\n self.files[\"neigh\"],\n self.files[\"edge\"],\n self.files[\"poly_input\"],\n self.files[\"bbt\"])\n assert os.path.isfile(f_tsh)\n\nif __name__ == \"__main__\":\n nose.run()\n","sub_path":"pre/test/test_simple_tsh.py","file_name":"test_simple_tsh.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"347975840","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport csv\nimport time\nimport copy\nfrom collections import deque\n\nimport cv2 as cv\nimport numpy as np\nimport tensorflow as tf\n\nfrom utils import CvFpsCalc\nfrom utils import CvDrawText\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--device\", type=int, default=0)\n parser.add_argument(\"--width\", help='cap width', type=int, default=960)\n parser.add_argument(\"--height\", help='cap height', type=int, default=540)\n parser.add_argument(\"--file\", type=str, default=None)\n\n parser.add_argument(\"--fps\", type=int, default=10)\n parser.add_argument(\"--skip_frame\", type=int, default=0)\n\n parser.add_argument(\"--model\", default='model/EfficientDetD0/saved_model')\n parser.add_argument(\"--score_th\", type=float, default=0.75)\n\n parser.add_argument(\"--sign_interval\", type=float, default=2.0)\n parser.add_argument(\"--jutsu_display_time\", type=int, default=5)\n\n parser.add_argument(\"--use_display_score\", type=bool, default=False)\n parser.add_argument(\"--erase_bbox\", type=bool, default=False)\n parser.add_argument(\"--use_jutsu_lang_en\", type=bool, default=False)\n\n parser.add_argument(\"--chattering_check\", type=int, default=1)\n\n parser.add_argument(\"--use_fullscreen\", type=bool, default=False)\n\n args = parser.parse_args()\n\n return args\n\n\ndef run_inference_single_image(image, inference_func):\n tensor = tf.convert_to_tensor(image)\n output = inference_func(tensor)\n\n output['num_detections'] = int(output['num_detections'][0])\n output['detection_classes'] = output['detection_classes'][0].numpy()\n output['detection_boxes'] = output['detection_boxes'][0].numpy()\n output['detection_scores'] = output['detection_scores'][0].numpy()\n return output\n\n\ndef main():\n # 引数解析 #################################################################\n args = get_args()\n\n cap_width = args.width\n cap_height = args.height\n cap_device = args.device\n if args.file is not None: # 動画ファイルを利用する場合\n cap_device = args.file\n\n fps = args.fps\n skip_frame = args.skip_frame\n\n model_path = args.model\n score_th = args.score_th\n\n sign_interval = args.sign_interval\n jutsu_display_time = args.jutsu_display_time\n\n use_display_score = args.use_display_score\n erase_bbox = args.erase_bbox\n use_jutsu_lang_en = args.use_jutsu_lang_en\n\n chattering_check = args.chattering_check\n\n use_fullscreen = args.use_fullscreen\n\n # カメラ準備 ###############################################################\n cap = cv.VideoCapture(cap_device)\n cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)\n cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)\n\n # モデル読み込み ############################################################\n DEFAULT_FUNCTION_KEY = 'serving_default'\n loaded_model = tf.saved_model.load(model_path)\n inference_func = loaded_model.signatures[DEFAULT_FUNCTION_KEY]\n\n # FPS計測モジュール #########################################################\n cvFpsCalc = CvFpsCalc()\n\n # フォント読み込み ##########################################################\n # https://opentype.jp/kouzanmouhitufont.htm\n font_path = './utils/font/衡山毛筆フォント.ttf'\n\n # ラベル読み込み ###########################################################\n with open('setting/labels.csv', encoding='utf8') as f: # 印\n labels = csv.reader(f)\n labels = [row for row in labels]\n\n with open('setting/jutsu.csv', encoding='utf8') as f: # 術\n jutsu = csv.reader(f)\n jutsu = [row for row in jutsu]\n\n # 印の表示履歴および、検出履歴 ##############################################\n sign_max_display = 18\n sign_max_history = 44\n sign_display_queue = deque(maxlen=sign_max_display)\n sign_history_queue = deque(maxlen=sign_max_history)\n\n chattering_check_queue = deque(maxlen=chattering_check)\n for index in range(-1, -1 - chattering_check, -1):\n chattering_check_queue.append(index)\n\n # 術名の言語設定 ###########################################################\n lang_offset = 0\n jutsu_font_size_ratio = sign_max_display\n if use_jutsu_lang_en:\n lang_offset = 1\n jutsu_font_size_ratio = int((sign_max_display / 3) * 4)\n\n # その他変数初期化 #########################################################\n sign_interval_start = 0 # 印のインターバル開始時間初期化\n jutsu_index = 0 # 術表示名のインデックス\n jutsu_start_time = 0 # 術名表示の開始時間初期化\n frame_count = 0 # フレームナンバーカウンタ\n\n window_name = 'NARUTO HandSignDetection Ninjutsu Demo'\n if use_fullscreen:\n cv.namedWindow(window_name, cv.WINDOW_NORMAL)\n\n while True:\n start_time = time.time()\n\n # カメラキャプチャ #####################################################\n ret, frame = cap.read()\n if not ret:\n continue\n frame_count += 1\n debug_image = copy.deepcopy(frame)\n\n if (frame_count % (skip_frame + 1)) != 0:\n continue\n\n # FPS計測 ##############################################################\n fps_result = cvFpsCalc.get()\n\n # 検出実施 #############################################################\n frame = frame[:, :, [2, 1, 0]] # BGR2RGB\n image_np_expanded = np.expand_dims(frame, axis=0)\n result_inference = run_inference_single_image(image_np_expanded,\n inference_func)\n\n # 検出内容の履歴追加 ####################################################\n num_detections = result_inference['num_detections']\n for i in range(num_detections):\n score = result_inference['detection_scores'][i]\n class_id = result_inference['detection_classes'][i].astype(np.int)\n\n # 検出閾値未満の結果は捨てる\n if score < score_th:\n continue\n\n # 指定回数以上、同じ印が続いた場合に、印検出とみなす ※瞬間的な誤検出対策\n chattering_check_queue.append(class_id)\n if len(set(chattering_check_queue)) != 1:\n continue\n\n # 前回と異なる印の場合のみキューに登録\n if len(sign_display_queue) == 0 or \\\n sign_display_queue[-1] != class_id:\n sign_display_queue.append(class_id)\n sign_history_queue.append(class_id)\n sign_interval_start = time.time() # 印の最終検出時間\n\n # 前回の印検出から指定時間が経過した場合、履歴を消去 ####################\n if (time.time() - sign_interval_start) > sign_interval:\n sign_display_queue.clear()\n sign_history_queue.clear()\n\n # 術成立判定 #########################################################\n jutsu_index, jutsu_start_time = check_jutsu(\n sign_history_queue,\n labels,\n jutsu,\n jutsu_index,\n jutsu_start_time,\n )\n\n # キー処理 ###########################################################\n key = cv.waitKey(1)\n if key == 99: # C:印の履歴を消去\n sign_display_queue.clear()\n sign_history_queue.clear()\n if key == 27: # ESC:プログラム終了\n break\n\n # FPS調整 #############################################################\n elapsed_time = time.time() - start_time\n sleep_time = max(0, ((1.0 / fps) - elapsed_time))\n time.sleep(sleep_time)\n\n # 画面反映 #############################################################\n debug_image = draw_debug_image(\n debug_image,\n font_path,\n fps_result,\n labels,\n result_inference,\n score_th,\n erase_bbox,\n use_display_score,\n jutsu,\n sign_display_queue,\n sign_max_display,\n jutsu_display_time,\n jutsu_font_size_ratio,\n lang_offset,\n jutsu_index,\n jutsu_start_time,\n )\n if use_fullscreen:\n cv.setWindowProperty(window_name, cv.WND_PROP_FULLSCREEN,\n cv.WINDOW_FULLSCREEN)\n cv.imshow(window_name, debug_image)\n # cv.moveWindow(window_name, 100, 100)\n\n cap.release()\n cv.destroyAllWindows()\n\n\ndef check_jutsu(\n sign_history_queue,\n labels,\n jutsu,\n jutsu_index,\n jutsu_start_time,\n):\n # 印の履歴から術名をマッチング\n sign_history = ''\n if len(sign_history_queue) > 0:\n for sign_id in sign_history_queue:\n sign_history = sign_history + labels[sign_id][1]\n for index, signs in enumerate(jutsu):\n if sign_history == ''.join(signs[4:]):\n jutsu_index = index\n jutsu_start_time = time.time() # 術の最終検出時間\n break\n\n return jutsu_index, jutsu_start_time\n\n\ndef draw_debug_image(\n debug_image,\n font_path,\n fps_result,\n labels,\n result_inference,\n score_th,\n erase_bbox,\n use_display_score,\n jutsu,\n sign_display_queue,\n sign_max_display,\n jutsu_display_time,\n jutsu_font_size_ratio,\n lang_offset,\n jutsu_index,\n jutsu_start_time,\n):\n frame_width, frame_height = debug_image.shape[1], debug_image.shape[0]\n\n # 印のバウンディングボックスの重畳表示(表示オプション有効時) ###################\n if not erase_bbox:\n num_detections = result_inference['num_detections']\n for i in range(num_detections):\n score = result_inference['detection_scores'][i]\n bbox = result_inference['detection_boxes'][i]\n class_id = result_inference['detection_classes'][i].astype(np.int)\n\n # 検出閾値未満のバウンディングボックスは捨てる\n if score < score_th:\n continue\n\n x1, y1 = int(bbox[1] * frame_width), int(bbox[0] * frame_height)\n x2, y2 = int(bbox[3] * frame_width), int(bbox[2] * frame_height)\n\n # バウンディングボックス(長い辺にあわせて正方形を表示)\n x_len = x2 - x1\n y_len = y2 - y1\n square_len = x_len if x_len >= y_len else y_len\n square_x1 = int(((x1 + x2) / 2) - (square_len / 2))\n square_y1 = int(((y1 + y2) / 2) - (square_len / 2))\n square_x2 = square_x1 + square_len\n square_y2 = square_y1 + square_len\n cv.rectangle(debug_image, (square_x1, square_y1),\n (square_x2, square_y2), (255, 255, 255), 4)\n cv.rectangle(debug_image, (square_x1, square_y1),\n (square_x2, square_y2), (0, 0, 0), 2)\n\n # 印の種類\n font_size = int(square_len / 2)\n debug_image = CvDrawText.puttext(\n debug_image, labels[class_id][1],\n (square_x2 - font_size, square_y2 - font_size), font_path,\n font_size, (185, 0, 0))\n\n # 検出スコア(表示オプション有効時)\n if use_display_score:\n font_size = int(square_len / 8)\n debug_image = CvDrawText.puttext(\n debug_image, '{:.3f}'.format(score),\n (square_x1 + int(font_size / 4),\n square_y1 + int(font_size / 4)), font_path, font_size,\n (185, 0, 0))\n\n # ヘッダー作成:FPS #########################################################\n header_image = np.zeros((int(frame_height / 18), frame_width, 3), np.uint8)\n header_image = CvDrawText.puttext(header_image, \"FPS:\" + str(fps_result),\n (5, 0), font_path,\n int(frame_height / 20), (255, 255, 255))\n\n # フッター作成:印の履歴、および、術名表示 ####################################\n footer_image = np.zeros((int(frame_height / 10), frame_width, 3), np.uint8)\n\n # 印の履歴文字列生成\n sign_display = ''\n if len(sign_display_queue) > 0:\n for sign_id in sign_display_queue:\n sign_display = sign_display + labels[sign_id][1]\n\n # 術名表示(指定時間描画)\n if lang_offset == 0:\n separate_string = '・'\n else:\n separate_string = ':'\n if (time.time() - jutsu_start_time) < jutsu_display_time:\n if jutsu[jutsu_index][0] == '': # 属性(火遁等)の定義が無い場合\n jutsu_string = jutsu[jutsu_index][2 + lang_offset]\n else: # 属性(火遁等)の定義が有る場合\n jutsu_string = jutsu[jutsu_index][0 + lang_offset] + \\\n separate_string + jutsu[jutsu_index][2 + lang_offset]\n footer_image = CvDrawText.puttext(\n footer_image, jutsu_string, (5, 0), font_path,\n int(frame_width / jutsu_font_size_ratio), (255, 255, 255))\n # 印表示\n else:\n footer_image = CvDrawText.puttext(footer_image, sign_display, (5, 0),\n font_path,\n int(frame_width / sign_max_display),\n (255, 255, 255))\n\n # ヘッダーとフッターをデバッグ画像へ結合 ######################################\n debug_image = cv.vconcat([header_image, debug_image])\n debug_image = cv.vconcat([debug_image, footer_image])\n\n return debug_image\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"_legacy/v2/Ninjutsu_demo.py","file_name":"Ninjutsu_demo.py","file_ext":"py","file_size_in_byte":13799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"49411171","text":"import unittest\nfrom unittest.mock import patch, Mock, MagicMock\n\nfrom system import models\nfrom system.exceptions import PreconditionFailedError\n\n\nclass GlobalDataTest(unittest.TestCase):\n @patch('django.db.transaction.atomic')\n @patch('django.db.models.Manager.get')\n @patch('system.processing.signalling.notify')\n @patch('django.db.models.Model.save')\n def test_advance_tick(self, save, notify, load, atomic):\n models.GlobalData.advance_tick()\n for func in (save, notify, load, atomic):\n func.assert_called_once()\n\n\nclass AdventurerTest(unittest.TestCase):\n def test_standard_adventurer_name(self):\n adventurer = models.Adventurer(first_name='First', surname='Second')\n self.assertEqual(adventurer.name, 'First Second')\n\n def test_adventurer_with_no_surname(self):\n adventurer = models.Adventurer(first_name='First')\n self.assertEqual(adventurer.name, 'First')\n\n\nclass AdventurerJourneyTest(unittest.TestCase):\n @patch('system.models.GlobalData.load')\n def test_set_dungeon_destination(self, global_data):\n journey = models.AdventurerJourney(status='stationary')\n dungeon = models.Dungeon()\n journey.set_destination(dungeon)\n\n self.assertEqual(journey.status, 'to-dungeon')\n self.assertEqual(journey.destination_dungeon, dungeon)\n\n global_data.assert_called()\n\n def test_set_dungeon_destination_when_not_stationary(self):\n journey = models.AdventurerJourney(status='to-dungeon')\n dungeon = models.Dungeon()\n try:\n journey.set_destination(dungeon)\n self.fail()\n except PreconditionFailedError:\n pass\n\n @patch('system.models.GlobalData.load')\n def test_set_settlement_destination(self, global_data):\n journey = models.AdventurerJourney(status='stationary')\n settlement = models.Settlement()\n journey.set_destination(settlement)\n\n self.assertEqual(journey.status, 'to-settlement')\n self.assertEqual(journey.destination_settlement, settlement)\n\n global_data.assert_called()\n\n def test_set_settlement_destination_when_not_stationary(self):\n journey = models.AdventurerJourney(status='to-dungeon')\n settlement = models.Settlement()\n try:\n journey.set_destination(settlement)\n self.fail()\n except PreconditionFailedError:\n pass\n\n def test_progress_to_dungeon_journey(self):\n dungeon = models.Dungeon()\n journey = models.AdventurerJourney(status='to-dungeon',\n destination_dungeon=dungeon)\n\n journey.progress_journey()\n self.assertEqual(journey.status, 'in-dungeon')\n\n @patch('system.models.GlobalData.load')\n def test_progress_in_dungeon_journey(self, global_data):\n dungeon = models.Dungeon()\n settlement = models.Settlement()\n journey = models.AdventurerJourney(status='in-dungeon',\n source_settlement=settlement,\n destination_dungeon=dungeon)\n\n journey.progress_journey()\n self.assertEqual(journey.status, 'from-dungeon')\n\n global_data.assert_called()\n\n def test_progress_from_dungeon_journey(self):\n dungeon = models.Dungeon()\n settlement = models.Settlement()\n journey = models.AdventurerJourney(status='from-dungeon',\n destination_dungeon=dungeon,\n source_settlement=settlement)\n\n journey.progress_journey()\n self.assertEqual(journey.status, 'stationary')\n self.assertEqual(journey.stationary_settlement, settlement)\n self.assertEqual(journey.destination_dungeon, None)\n self.assertEqual(journey.source_settlement, None)\n\n def test_progress_to_settlement_journey(self):\n source_settlement = models.Settlement()\n destination = models.Settlement()\n journey = models.AdventurerJourney(status='to-settlement',\n source_settlement=source_settlement,\n destination_settlement=destination)\n\n journey.progress_journey()\n self.assertEqual(journey.status, 'stationary')\n self.assertEqual(journey.source_settlement, None)\n self.assertEqual(journey.destination_settlement, None)\n self.assertEqual(journey.stationary_settlement, destination)\n\n\nclass CommentaryLineTest(unittest.TestCase):\n def test_commentary_line_str_uses_text(self):\n expected = 'Test line text'\n line = models.CommentaryLine(text=expected)\n self.assertEqual(str(line), expected)\n\n\nclass AdventureTest(unittest.TestCase):\n # Mock the json serializer method because it's dumb\n @patch('json.dumps')\n @patch('system.models.FloorRecord')\n @patch('system.models.AdventureEventLine')\n @patch('system.models.AdventureEvent')\n @patch('system.models.AdventurerRecord')\n def test_persist(self, adventurer_record, event, event_line, floor_record,\n json):\n \"\"\"Adventure persist creates the right objects\"\"\"\n json.return_value = []\n\n adventure = models.Adventure()\n\n # ...we don't want it saving to the actual database now do we...\n adventure.save = Mock()\n\n events = []\n # Add an event with four event lines\n events.append(Mock(event_lines=[MagicMock() for i in range(4)]))\n events.append(Mock(event_lines=[MagicMock() for i in range(2)]))\n events.append(Mock(event_lines=[MagicMock() for i in range(7)]))\n\n entity_list = [MagicMock(name='test_entity')]\n floor = MagicMock(name='floor 1')\n floor.__getitem__.side_effect = \\\n lambda key: entity_list if key == 'entities' else None\n mock_floor_data = [floor]\n\n adventure.persist([MagicMock()], events, mock_floor_data)\n\n # Check the right amount of each model has been created\n self.assertEqual(adventurer_record.call_count, 1)\n self.assertEqual(event.call_count, 3)\n self.assertEqual(event_line.call_count, 13)\n self.assertEqual(floor_record.call_count, 1)\n\n adventure.save.assert_called_once()\n\n\nclass AccountTest(unittest.TestCase):\n def test_str_returns_user_name(self):\n from django.contrib.auth.models import User\n\n account = models.Account()\n user = Mock(username='test user name')\n user.__class__ = User\n\n account.user = user\n\n self.assertEqual(str(account), 'test user name')\n\n @patch('system.models.Permission.objects')\n def test_get_permissions(self, permissions):\n expected = [Mock(), Mock()]\n\n permissions.filter().distinct.return_value = expected\n\n account = models.Account()\n actual = account.permissions\n\n self.assertEqual(actual, expected)\n\n\nclass ItemTest(unittest.TestCase):\n def test_str(self):\n item = models.Item(name=\"Cheese\")\n result = str(item)\n self.assertIn(\"Cheese\", result)\n\n\nclass AdventurerItemTest(unittest.TestCase):\n @patch('django.db.router.allow_relation')\n def test_str(self, router):\n owner = Mock(name=\"Wanderer\")\n owner.__class__ = models.Adventurer\n item = Mock(name=\"Cheese\", description=None, parent=None)\n item.__class__ = models.Item\n\n adv_item = models.AdventurerItem(quantity=4, quality=25)\n adv_item.owner = owner\n adv_item.item = item\n result = str(adv_item)\n self.assertIn(\"Cheese\", result)\n self.assertIn(\"Wanderer\", result)\n self.assertIn(\"4\", result)\n self.assertIn(\"25\", result)\n","sub_path":"system/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":7737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"624323790","text":"from subprocess import TimeoutExpired, run\n\n# A Known Python Injection:\n# 1\"\"\")\n# import os\n# print(os.system(\"ls -a\"))\n# (\"\"\"\n\n\ndec_code = \\\n'''\nfrom sim_parser import ExpressionEvaluator\nparser = ExpressionEvaluator()\nparser.parse(\"\"\"\n{}\n\"\"\")\n'''\n\ndef djex(request, sim_code_str, file_path='', timeout=30):\n '''exec code_str and return print output or exception text'''\n\n # run code and return output and errors\n py_code = dec_code.format(sim_code_str)\n try:\n # f'docker exec rinpydocker_excecutor_1 python -c \\'{py_code}\\''\n o = run(f'docker run --rm rinpydocker_excecutor python -c \\'{py_code}\\'', \n shell=True, \n capture_output=True, \n timeout=timeout\n )\n output = o.stdout.decode(\"utf-8\")\n output += o.stderr.decode(\"utf-8\")\n # timeout exception\n except TimeoutExpired:\n output = f'You program exceded time limit of {timeout} seconds and was terminated.'\n\n return output","sub_path":"RinPy/editor/services/djex.py","file_name":"djex.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"520790552","text":"\nfrom Tkinter import Frame, Canvas, Label, Entry, Button\nfrom Tkconstants import N, S, E, W, HORIZONTAL, NW\nfrom AutoScrollbar import AutoScrollbar\nfrom DBServices import DBServices\nfrom Mineral import Mineral\n\nclass SiteUpdateApp(Frame):\n def __init__(self, master, mineral, temp):\n \n Frame.__init__(self, master)\n self.master = master\n self.master.geometry(\"%sx%s\" % (700 ,200))\n \n vscrollbar = AutoScrollbar(self.master)\n vscrollbar.grid(row=0, column=1, sticky=N+S)\n hscrollbar = AutoScrollbar(self.master, orient=HORIZONTAL)\n hscrollbar.grid(row=1, column=0, sticky=E+W)\n \n self.canvas = Canvas(self.master,\n yscrollcommand=vscrollbar.set,\n xscrollcommand=hscrollbar.set)\n self.canvas.grid(row=0, column=0, sticky=N+S+E+W)\n \n vscrollbar.config(command=self.canvas.yview)\n hscrollbar.config(command=self.canvas.xview)\n \n # make the canvas expandable\n self.master.grid_rowconfigure(0, weight=1)\n self.master.grid_columnconfigure(0, weight=1)\n \n frame = self.createFrame(mineral, temp)\n frame.rowconfigure(1, weight=1)\n frame.columnconfigure(1, weight=1) \n \n self.canvas.create_window(0, 0, anchor=NW, window=frame)\n \n frame.update_idletasks()\n \n self.canvas.config(scrollregion=self.canvas.bbox(\"all\"))\n \n \n \n def createFrame(self, mineral, temp): \n scrollFrame = Frame(self.canvas)\n \n dataList = DBServices().getInfoUpdate(mineral, temp)\n \n Label(scrollFrame, text = \"Mineral: \").grid(row = 0, column = 0, sticky = NW)\n Label(scrollFrame, text = mineral).grid(row = 0, column = 1, sticky = NW)\n Label(scrollFrame, text = \"Temperature range: \").grid(row = 1, column = 0, sticky = NW)\n Label(scrollFrame, text = temp + \" (K)\").grid(row = 1, column = 1, sticky = NW) \n \n Label(scrollFrame, text = \" --------------------------\").grid(row = 2, columnspan= 7, column = 0)\n headerTitle = [\"No\\t\", \"Ref\\t\", \"Source\\t\",\"IS\\t\",\"Data\\t\",\"QS\\t\", \"Hn\\t\", \"Site\"]\n \n for i in range(8): \n Label(scrollFrame, text = headerTitle[i]).grid(row = 3, column = i)\n \n numberOfData = len(dataList)\n self.entries = []\n \n for row1 in range(0, numberOfData): \n Label(scrollFrame, text = dataList[row1][0], width = 10).grid(row = row1+6, column = 0)\n Label(scrollFrame, text = dataList[row1][1], width = 10).grid(row = row1+6, column = 1)\n Label(scrollFrame, text = dataList[row1][2], width = 10).grid(row = row1+6, column = 2)\n Label(scrollFrame, text = dataList[row1][3], width = 10).grid(row = row1+6, column = 3)\n Label(scrollFrame, text = dataList[row1][4], width = 10).grid(row = row1+6, column = 4)\n Label(scrollFrame, text = dataList[row1][5], width = 10).grid(row = row1+6, column = 5)\n Label(scrollFrame, text = dataList[row1][6], width = 10).grid(row = row1+6, column = 6)\n \n entry = Entry(scrollFrame, width = 10)\n \n if(dataList[row1][7]!= None):\n entry.insert(1, dataList[row1][7])\n \n entry.grid(row = row1+ 6, column = 7, padx = 10)\n \n self.entries.append(entry) \n \n lastRow = numberOfData + 7\n \n Button(scrollFrame, text = \"Update\", command=lambda: self.update(dataList)).grid(row = lastRow, column = 7) \n return scrollFrame\n \n def update(self, dataList): \n index = 0\n arrayObj = []\n for data in dataList:\n keyNum = data[0] \n ref = data[1]\n source = data[2]\n isomer = data[3]\n qs = data[4]\n hn = data[5]\n name = data[7]\n temp = data[8] \n site = self.entries[index].get()\n index += 1 \n \n objMineral = Mineral(keyNum, name, ref, source, isomer, qs, hn, site, temp) \n arrayObj.append(objMineral) \n \n DBServices().updateMineralSite(arrayObj)\n \n","sub_path":"SiteUpdateApp.py","file_name":"SiteUpdateApp.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"114312104","text":"###################################################################\n#\n# Four corners\n#\n# As a graphics program that involves making decisions, this\n# exercise extends the demonstration program from the lecture\n# with colour.\n#\n# The program below is a minor variant of the \"jump to the left\"\n# Turtle graphics demonstration from Lecture 4. The\n# main difference is that a thick line is drawn as the cursor\n# moves around the screen.\n#\n# Your task is to modify the program so that the line changes\n# colour depending on what quadrant of the screen the cursor is in.\n# We assume the drawing surface is divided into four segments as\n# follows, with coordinate (0, 0) at the centre.\n#\n# |\n# Quadrant 1 | Quadrant 2\n# |\n# |\n# ---------------+---------------\n# |\n# Quadrant 3 | Quadrant 4\n# |\n# |\n#\n# Select a colour for each quadrant and extend the code below\n# so that the line being drawn changes colour depending on the\n# quadrant. The result will be an abstract painting with\n# four differently coloured squares.\n#\n# Hint: The cursor's current coordinates are returned by\n# Turtle's \"xcor\" and \"ycor\" methods.\n#\n\n\n# Import the necessary pre-defined functions\nfrom turtle import *\nfrom random import randint\nQ1 = 'red';\nQ2 = 'purple';\nQ3 = 'yellow';\nQ4 = 'blue';\n\n########################################\n# This Boolean-valued function returns True if the turtle is near\n# any of the drawing window's four edges \n#\nborder = 75 # how close we must get to be considered \"near\" the edge\nmax_x_coord = (window_width() // 2) - border # how far we can go left or right\nmax_y_coord = (window_height() // 2) - border # how far we can go up or down\n\ndef near_edge():\n x_distance_from_home = abs(xcor())\n y_distance_from_home = abs(ycor())\n return x_distance_from_home > max_x_coord or \\\n y_distance_from_home > max_y_coord\n\n########################################\n# Define some fixed values to control the simulation\n#\nstep_size = 10 # how far the turtle moves in each step, in pixels\nturn_angle = 20 # how far to turn to avoid the edge, in degrees\n\n########################################\n# Set up the drawing window and other overall parameters\n#\ntitle(\"Four Corners\")\nhideturtle()\nbgcolor(\"white\")\nwidth(10)\nspeed(\"fastest\")\n\n########################################\n# Start by pointing the turtle in a random direction\n#\nsetheading(randint(0,359))\n\n########################################\n# In each step check to see if we're near an edge and,\n# if so, turn to the right. Also change colour depending\n# on the current segment.\n#\nfor step in range(3000): # how many steps to take in the simulation\n if near_edge():\n right(turn_angle)\n forward(step_size)\n if(xcor() < 0 and ycor() > 0):\n color(Q1);\n elif(xcor() > 0 and ycor() > 0):\n color(Q2);\n elif(xcor() < 0 and ycor() < 0):\n color(Q3);\n elif(xcor() > 0 and ycor() < 0):\n color(Q4);\n else:\n color('black');\n\n########################################\n# Release the drawing window when finished\n#\ndone()\n","sub_path":"Workshop04-questions/3_four_corners.py","file_name":"3_four_corners.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"457011602","text":"# -*- coding: utf-8 -*-\nimport os\nimport tensorflow.compat.v1 as tf\nimport tensor_5_mnist_forward as forward\nimport tensor_5_mnist_backward as backward\nfrom PIL import Image\nimport numpy as np\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ndef restore_model(pic_arr):\n print(pic_arr.shape)\n with tf.Graph().as_default() as tg:\n print(\"type: %s \" % (type(tg)))\n x = tf.placeholder(tf.float32,[\n 1,\n forward.IMAGE_SIZE,\n forward.IMAGE_SIZE,\n forward.NUM_CHANNELS])\n\n y = forward.forward(x,False,None)\n pre_val = tf.argmax(y,1)\n\n variable_averages = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess,ckpt.model_checkpoint_path)\n reshaped_pic = np.reshape(pic_arr,(\n 1,\n forward.IMAGE_SIZE,\n forward.IMAGE_SIZE,\n forward.NUM_CHANNELS))\n pre_val = sess.run(pre_val,feed_dict={x:reshaped_pic})\n return pre_val\n else:\n print(\"No checkpoint file found.\")\n return -1\n\ndef pre_pic(pic_name):\n img = Image.open(pic_name).convert('L')\n re_id = img.resize((28,28),Image.ANTIALIAS)\n im_arr = np.array(re_id)\n threshold = 50\n for i in range(28):\n for j in range(28):\n im_arr[i][j] = 255 - im_arr[i][j]\n if im_arr[i][j] < threshold:\n im_arr[i][j] = 0\n else:\n im_arr[i][j] = 255\n nm_arr = im_arr.astype(np.float32)\n img_ready = np.multiply(nm_arr, 1.0 / 255.0)\n return img_ready\n\ndef main():\n test_num = input(\"Input the number of test pictures : \")\n for i in range(int(test_num)):\n test_pic = input(\"The path of test picture : \")\n test_pic_arr = pre_pic(test_pic)\n pre_val = restore_model(test_pic_arr)\n print(\"This prediction number is : \",pre_val)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tensor-note/tensor1.14/tensor_5_mnist_app.py","file_name":"tensor_5_mnist_app.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"66315048","text":"\"\"\"\nTests utilities\n\"\"\"\n\nfrom pathlib import Path\n\nimport pytest\n\nimport pygaps.utilities as util\n\n\n@pytest.mark.core\ndef test_convert_chemformula():\n assert util.string_utilities.convert_chemformula(\"N2\") == \"$N_{2}$\"\n assert util.string_utilities.convert_chemformula(\n \"C4H10\"\n ) == \"$C_{4}H_{10}$\"\n\n\n@pytest.mark.core\ndef test_convert_unitstr():\n assert util.string_utilities.convert_unitstr(\"mmol\") == \"mmol\"\n assert util.string_utilities.convert_unitstr(\"g\", True) == \"g^{-1}\"\n assert util.string_utilities.convert_unitstr(\"cm3\") == \"cm^{3}\"\n assert util.string_utilities.convert_unitstr(\"cm3(STP)\") == \"cm^{3}_{STP}\"\n assert util.string_utilities.convert_unitstr(\"cm3\", True) == \"cm^{-3}\"\n\n\n@pytest.mark.core\ndef test_file_paths():\n path = Path(__file__) / 'tst'\n\n with pytest.raises(Exception):\n util.python_utilities.get_file_paths(path)\n\n known_paths = [path / '1.tst', path / '2.tst']\n paths = util.python_utilities.get_file_paths(path, extension='tst')\n\n assert all([path in known_paths for path in paths])\n\n\n@pytest.mark.core\ndef test_deep_merge():\n source = {'hello1': 1}\n overrides = {'hello2': 2}\n util.python_utilities.deep_merge(source, overrides)\n assert source == {'hello1': 1, 'hello2': 2}\n\n source = {'hello1': 0}\n overrides = {'hello1': {'bar': 1}}\n util.python_utilities.deep_merge(source, overrides)\n assert source == {'hello1': {'bar': 1}}\n\n source = {'hello': 'to_override'}\n overrides = {'hello': 'over'}\n util.python_utilities.deep_merge(source, overrides)\n assert source == {'hello': 'over'}\n\n source = {'hello': {'value': 'to_override', 'no_change': 1}}\n overrides = {'hello': {'value': 'over'}}\n util.python_utilities.deep_merge(source, overrides)\n assert source == {'hello': {'value': 'over', 'no_change': 1}}\n\n source = {'hello': {'value': 'to_override', 'no_change': 1}}\n overrides = {'hello': {'value': {}}}\n util.python_utilities.deep_merge(source, overrides)\n assert source == {'hello': {'value': {}, 'no_change': 1}}\n\n source = {'hello': {'value': {}, 'no_change': 1}}\n overrides = {'hello': {'value': 2}}\n util.python_utilities.deep_merge(source, overrides)\n assert source == {'hello': {'value': 2, 'no_change': 1}}\n","sub_path":"tests/utilities/test_utilities.py","file_name":"test_utilities.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"61468626","text":"\"\"\"\nThis script searches for files with specified extensions and \ndisplays the folder name and file name and count\n\n\"\"\"\n\n\nimport os\npath= r\"D:\\GitHub\\Automation\"\t\t# provide location of files\nfilesToFind = ['.py'] \t\t\t# provide extension to look for\n\nfilesFound = []\nfor root, dirnames, filenames in os.walk(path):\n\tfor file in filenames:\n\t\t# split filename and extension\n\t\tfilename, ext = os.path.splitext(file)\n\t\tif ext.lower() in filesToFind:\n\t\t\tprint(f\"Folder: {root} \\nFileName: {file}\")\n\t\t\tfilesFound.append(file)\nprint(f\"Found {len(filesFound)} files \")\n\n","sub_path":"Find_automation_scripts.py","file_name":"Find_automation_scripts.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"354877101","text":"attendees = [\"Ken\", \"Alena\", \"Treasure\"]\n# attendees.append(\"Ashley\")\nattendees.extend([\"James\", \"Guil\"])\noptional_invitees = [\"Ben J.\", \"Dave\"]\npotential_attendees = attendees + optional_invitees\nprint(\"There are\", len(potential_attendees), \"potential attendees currently.\")\n\nto_line = \", \".join(attendees)\ncc_line = \", \".join(optional_invitees)\n\nprint(to_line)\nprint(cc_line)\n\nsplit_with_comma = to_line.split(\", \")\nnot_split_with_comma = to_line.split()\n\nprint(split_with_comma)\nprint(not_split_with_comma)","sub_path":"Beginning Python Track/2 - Introducing Lists/meeting.py","file_name":"meeting.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"186872769","text":"import os, glob\n\nenv = Environment(ENV={'PATH': os.environ['PATH']})\n\nenv.Replace(CXX='clang++')\nenv.Append(CPPPATH = ['/opt/local/include/', 'libdrunkard/include',\n 'src', 'src/game', 'src/ui', 'src/utils'])\nenv.Append(CCFLAGS='-Wall -Wextra -std=c++11 -g -fPIC')\nenv.Append(LINKFLAGS='-Wl,-rpath,.')\nenv.Append(LIBPATH=['.', 'libdrunkard/lib'])\n\nenv.Program('bilebio', glob.glob('src/*.cpp'), LIBS=['drunkard', 'sfml-graphics', 'sfml-system', 'sfml-window'])\n#['jc', 'allegro_main', 'allegro', 'allegro_font', 'allegro_image'])\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"571793807","text":"\"\"\"Blogly application.\"\"\"\n\nfrom flask import Flask, render_template, request, redirect\nfrom models import db, connect_db, User, Post\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///blogly'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\n\nconnect_db(app)\ndb.create_all()\n\n\n@app.route(\"/\")\ndef home():\n return redirect(\"/users\")\n\n\n@app.route(\"/users\")\ndef users_home():\n users = User.query.all()\n return render_template(\"users-home.html\", users=users)\n\n\n@app.route(\"/users/new\")\ndef render_user_form():\n return render_template(\"add-user-form.html\")\n\n\n@app.route(\"/users/new\", methods=[\"POST\"])\ndef add_user():\n # TO DO: get user data from form using request.form\n first_name = request.form[\"first_name\"].capitalize()\n last_name = request.form[\"last_name\"].capitalize()\n user_image = request.form[\"user_image\"]\n new_user = User(first_name=first_name,\n last_name=last_name, user_image=user_image)\n db.session.add(new_user)\n db.session.commit()\n return redirect(\"/users\")\n\n\n@app.route(\"/users//\")\ndef user_details_page(user_id):\n found_user = User.query.get_or_404(user_id)\n return render_template(\"user-details-page.html\", user=found_user)\n\n\n@app.route(\"/users//edit\")\ndef render_user_edit_form(user_id):\n found_user = User.query.get_or_404(user_id)\n return render_template(\"user-edit-form.html\", user=found_user)\n\n\n@app.route(\"/users//edit\", methods=[\"POST\"])\ndef edit_user_info(user_id):\n found_user = User.query.get_or_404(user_id)\n found_user.first_name = request.form[\"first_name\"]\n found_user.last_name = request.form[\"last_name\"]\n found_user.user_image = request.form[\"user_image\"]\n db.session.commit()\n return redirect(f\"/users/{user_id}\")\n\n\n@app.route(\"/users//delete/\")\ndef delete_user(user_id):\n User.query.filter(User.id == user_id).delete()\n db.session.commit()\n return redirect(f\"/users\")\n\n\n@app.route(\"/users//posts/new\")\ndef render_user_post_form(user_id):\n found_user = User.query.get_or_404(user_id)\n return render_template(\"new-post-form.html\", user=found_user)\n\n\n@app.route(\"/users//posts/new\", methods=[\"POST\"])\ndef create_new_post(user_id):\n title = request.form[\"post-title\"]\n content = request.form[\"post-content\"]\n found_user = User.query.get_or_404(user_id)\n new_post = Post(title=title, content=content, user=found_user)\n db.session.add(new_post)\n db.session.commit()\n return redirect(f\"/users/{user_id}\")\n\n\n@app.route(\"/posts//\")\ndef load_post_details_page(post_id):\n found_post = Post.query.get_or_404(post_id)\n return render_template(\"post-details-page.html\", post=found_post)\n\n\n@app.route(\"/posts//delete\")\ndef delete_post(post_id):\n Post.query.filter(Post.id == post_id).delete()\n db.session.commit()\n return redirect(\"/users\")\n\n\n@app.route(\"/posts//edit\")\ndef render_post_edit_form(post_id):\n found_post = Post.query.get_or_404(post_id)\n return render_template(\"post-edit-form.html\", post=found_post)\n\n\n@app.route(\"/posts//edit\", methods=[\"POST\"])\ndef edit_post_info(post_id):\n found_post = Post.query.get_or_404(post_id)\n found_post.title = request.form[\"new-post-title\"]\n found_post.content = request.form[\"new-post-content\"]\n db.session.commit()\n return redirect(f\"/users/{found_post.user_id}\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"458069299","text":"# https://www.geeksforgeeks.org/auto-complete-feature-using-trie/\nimport collections\n\n\nclass TrieNode:\n def __init__(self):\n self.child = collections.defaultdict(TrieNode)\n self.is_word = False\n\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word):\n node = self.root\n for c in word:\n node = node.child[c]\n\n node.is_word = True\n\n\nclass Solution:\n def findWords(self, board, words) -> []:\n trie = Trie()\n result, node = [], trie.root\n\n # build Trie\n for word in words:\n trie.insert(word)\n\n for i in range(len(board)):\n for j in range(len(board[0])):\n self._search_word(board, i, j, node, '', result)\n\n return result\n\n def _search_word(self, board, i, j, node, path, result):\n if node.is_word:\n result.append(path)\n node.is_word = False\n\n if not 0 <= i < len(board) or not 0 <= j < len(board[0]) or board[i][j] == '#':\n return\n\n temp = board[i][j]\n node = node.child.get(temp)\n if not node:\n return\n\n board[i][j] = '#'\n self._search_word(board, i + 1, j, node, path + temp, result)\n self._search_word(board, i - 1, j, node, path + temp, result)\n self._search_word(board, i, j + 1, node, path + temp, result)\n self._search_word(board, i, j - 1, node, path + temp, result)\n board[i][j] = temp\n","sub_path":"Problems/geeksforgeeks/Auto-complete_feature_using_Trie.py","file_name":"Auto-complete_feature_using_Trie.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"552421202","text":"import socket\nfrom time import sleep\nfrom protocols.v1 import domoticcom\nfrom protocols.v1 import packets\n\nclass tester():\n VERSION = 1 #version of the protocol this test is writtten for\n\n GROUNDWATER_TYPE = 0\n PUMP_TYPE = 1 \n time = None\n uuid = None\n dcid = None\n\n def __init__(self, host, port):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((host, port))\n\n def build_base_frame(self, command, args=b''):\n length = 5+len(args)\n version_bytes = self.VERSION.to_bytes(1, 'big')\n cmd_bytes = command.to_bytes(2, 'big')\n len_bytes = length.to_bytes(2, 'big')\n return version_bytes+len_bytes+cmd_bytes+args\n\n def build_module_frame(self, command, args = b''):\n uuid_bytes = self.uuid.to_bytes(8, 'big')\n self.build_base_frame(command, uuid_bytes+args)\n\n def send(self, msg):\n if isinstance(msg, packets.base_packet):\n msg = msg.serialize()\n #print(msg)\n self.sock.sendall(msg)\n\n def test(self):\n print(\"Launching test script\")\n msg = packets.request_id()\n self.send(msg)\n self.recv_response()\n msg = packets.request_info()\n self.send(msg)\n self.recv_response()\n packet = packets.module_info(self.uuid, self.GROUNDWATER_TYPE)\n self.send(packet)\n sleep(0.01)\n packet = packets.module_info(self.uuid, self.PUMP_TYPE)\n self.send(packet)\n sleep(0.01) #give server some time to handle data\n for sensor_val in range(1250, 1250+3*5, 5):\n packet = packets.send_value(self.uuid, self.GROUNDWATER_TYPE.to_bytes(2,'big'), sensor_val.to_bytes(2, 'big'))\n self.send(packet)\n sleep(1)\n #self.recv_response()\n sleep(2)\n self.send(b\"shutdownowplzuwu\")\n\n def recv_response(self):\n length = int.from_bytes(self.sock.recv(2), 'big')\n version = self.sock.recv(1)\n cmd = int.from_bytes(self.sock.recv(2), 'big')\n length -= 4\n rest = self.sock.recv(length)\n if cmd == domoticcom.from_dc.SET_ID.value:\n self.handle_set_id(length, rest)\n elif cmd == domoticcom.from_dc.SEND_INFO.value:\n self.handle_send_info(length, rest)\n\n def handle_send_info(self, dat_len, rest):\n self.dcid = int.from_bytes(rest[:8], 'big')\n self.time = int.from_bytes(rest[8:], 'big')\n print(f\"Received info: time {self.time}, id {self.dcid}\") \n\n def handle_set_id(self, dat_len, rest):\n self.uuid = int.from_bytes(rest, 'big')\n print(f\"Received ID {self.uuid}\")\n","sub_path":"DomoticControl/DomoticControl/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"350351058","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/leotrubach/development/django-saas-email/venv/lib/python3.7/site-packages/django_saas_email/migrations/0007_mailtemplate_sender_email.py\n# Compiled at: 2019-03-28 10:39:18\n# Size of source mod 2**32: 545 bytes\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('django_saas_email', '0006_add_to_and_from_names')]\n operations = [\n migrations.AddField(model_name='mailtemplate',\n name='sender_email',\n field=models.EmailField(blank=True, help_text='Email to use as sender address when the template is used. If not set then default address is used', max_length=254, null=True))]","sub_path":"pycfiles/django-saas-email-0.1.30.linux-x86_64.tar/0007_mailtemplate_sender_email.cpython-37.py","file_name":"0007_mailtemplate_sender_email.cpython-37.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"11052104","text":"import sys\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nfrom Flag import Flag\n\n\nclass table_3_5(QWidget):\n \"\"\" 중간 디스플레이 위젯 \"\"\"\n qss = \"\"\"\n QWidget {\n background: rgb(221, 221, 221); \n border : 0px solid; \n }\n QPushButton{\n background-color: rgb(221,221,221);\n border: 1px solid rgb(0,0,0); \n font-size: 14pt;\n font-weight: bold\n }\n QScrollBar::vertical {\n height:0px;\n margin-top: 0px;\n padding-top: 0px;\n }\n QCheckBox {\n margin-left:0px;\n font-size:15px;\n }\n QTableWidget {\n gridline-color : black;\n }\n QCheckBox::indicator {\n width: 60px;\n height: 60px;\n }\n QCheckBox::indicator::unchecked {\n width: 60px;\n height: 60px;\n border : 0px solid;\n }\n QCheckBox::indicator::checked {\n image : url(./check.png);\n height:30px;\n width:60px;\n }\n QTextEdit{\n font-size: 18pt;\n Color : black;\n border : 0px solid\n }\n QTextEdit#button{\n font-size: 12pt;\n font-weight:bold;\n Color : black;\n border : 0px solid\n }\n QTableView {\n gridline-color : black;\n }\n QHeaderView::section {\n background: black;\n }\n \"\"\"\n\n\n def __init__(self, parent=None):\n super(table_3_5, self).__init__()\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setContentsMargins(0, 0, 0, 0)\n self.setStyleSheet(self.qss)\n # 기본 속성\n layout = QVBoxLayout(self)\n label = QTextEdit(\"4.다. 부정적 영향을 완화하기 위한 조치들을 평가한다.\")\n label.setStyleSheet(\"font-size: 18pt;font-weight: bold\")\n label.setContentsMargins(10, 10, 10, 20)\n label.setDisabled(True)\n label.setFixedHeight(60) # QTextEdit 때문��� 설정해줘야함 (addStretch 안먹음)\n\n label2 = QTextEdit(\" - 발생 가능한 부정적 영향\")\n label2.setStyleSheet(\"font-size: 18pt;font-weight: bold\")\n label2.setContentsMargins(10, 10, 10, 20)\n label2.setDisabled(True)\n label2.setFixedHeight(45) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)\n self.setLayout(layout)\n\n self.scrollTop = QScrollArea()\n self.scrollTop.setFixedHeight(45)\n self.scrollTop.setWidgetResizable(True)\n self.scrollTop.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.scrollTop.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n self.scrollBottom = QScrollArea()\n self.scrollBottom.setWidgetResizable(True)\n self.scrollBottom.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n table_header = TableHeader(self)\n para_table = ParaTable(self)\n self.scrollTop.setWidget(table_header)\n self.scrollBottom.setWidget(para_table)\n\n layout.addWidget(label)\n layout.addWidget(label2)\n layout.addWidget(self.scrollTop)\n layout.addWidget(self.scrollBottom)\n\nclass TableHeader(QTableWidget):\n def __init__(self, parent):\n super(TableHeader, self).__init__(parent=parent)\n self.setAttribute(Qt.WA_StyledBackground, True)\n\n # 테이블 프레임 모양 정의\n self.horizontalHeader().setFixedHeight(1)\n self.verticalHeader().setFixedWidth(1)\n\n self.setColumnCount(4)\n self.setRowCount(1)\n # 편집 불가\n self.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.setFocusPolicy(Qt.NoFocus)\n self.setSelectionMode(QAbstractItemView.NoSelection)\n\n # 테이블 행 높이 조절\n self.setRowHeight(0, 40)\n self.setColumnWidth(0, 120)\n self.setColumnWidth(1, 220)\n self.setColumnWidth(2, 436)\n self.setColumnWidth(3, 60)\n\n # 테이블 헤더\n self.setItem(0, 0, QTableWidgetItem(\"부정적 영향\"))\n self.setItem(0, 1, QTableWidgetItem(\"적용시점\"))\n self.setItem(0, 2, QTableWidgetItem(\"완화조치\"))\n self.setItem(0, 3, QTableWidgetItem(\"선택\"))\n\n # 테이블 정렬\n delegate = AlignDelegate()\n self.setItemDelegate(delegate)\n\n fnt = self.font()\n fnt.setBold(True)\n fnt.setPointSize(12)\n self.setFont(fnt)\n\nclass ParaTable(QTableWidget):\n def __init__(self, parent):\n super(ParaTable, self).__init__(parent=parent)\n self.setAttribute(Qt.WA_StyledBackground, True)\n # 테이블 프레임 모양 정의\n self.horizontalHeader().setFixedHeight(1)\n self.verticalHeader().setFixedWidth(1)\n self.setContentsMargins(0, 0, 0, 0)\n self.setColumnCount(4)\n self.setRowCount(9)\n # 편집 불가\n self.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.setFocusPolicy(Qt.NoFocus)\n self.setSelectionMode(QAbstractItemView.NoSelection)\n\n # 테이블 행 높이 조절\n self.setColumnWidth(0, 120)\n self.setColumnWidth(1, 220)\n self.setColumnWidth(2, 436)\n self.setColumnWidth(3, 60)\n\n # SPAN 생성(부정적영향)\n self.setSpan(0, 0, 3, 1)\n self.setSpan(0, 1, 3, 1)\n self.setSpan(3, 0, 3, 1)\n self.setSpan(3, 1, 3, 1)\n self.setSpan(6, 0, 3, 1)\n self.setSpan(6, 1, 3, 1)\n\n self.setItem(0, 0, QTableWidgetItem(\"증기발생기\\n열충격\"))\n self.setItem(3, 0, QTableWidgetItem(\"증기발생기\\n튜브 누설로\\n핵분열 생성물\\n방출\"))\n self.setItem(6, 0, QTableWidgetItem(\"증기발생기\\n튜브 크립 파열\"))\n\n self.setItem(0, 1, QTableWidgetItem(\"급수가 고갈된 고온의 S/G에\\n급수할 때\\nS/G 광역수위가 [L02] 이하\\n\"))\n self.setItem(3, 1, QTableWidgetItem(\"튜브가 파손되거나 누출이\\n있는 S/G로 급수할 때\"))\n self.setItem(6, 1, QTableWidgetItem(\"급수가 고갈된 고온의 S/G를\\n감압할 때\\n감압중인 S/G\\n광역수위가 [L02]이하\\n일 떄와 RCS 압력이 S/G\\n압력 이상일 때\"))\n\n #QTextEdit enter 안먹음\n t1 = QTextEdit()\n t1.setPlainText(\"급수가 고갈된 고온의 S/G에\\n급수할 때\\nS/G 광역수위가 [L02] 이하\\n\\n\\n\\nSG1 L : \\nSG2 L : \")\n t1.setObjectName(\"button\")\n t2 = QTextEdit()\n t2.setPlainText(\"튜브가 파손되거나 누출이\\n있는 S/G로 급수할 때\\n\\n\\n\\n\\nSG1 R : \\nSG2 R : \")\n t2.setObjectName(\"button\")\n t3 = QTextEdit()\n t3.setPlainText(\"급수가 고갈된 고온의 S/G를\\n감압할 때\\n감압중인 S/G\\n광역수위가 [L02]이하\\n일 떄와 RCS 압력이 S/G\\n압력 이상일 때\\n\\nSG1 L : \\nSG2 L :\\nRCS T : \\nCET T :\\nPZR T : \\nRCS Hot-leg T : \\nRCS Cold-leg 1 T :\\nRCS Cold-leg 2 T :\")\n t3.setObjectName(\"button\")\n\n self.setCellWidget(0, 1, t1)\n self.setCellWidget(3, 1, t2)\n self.setCellWidget(6, 1, t3)\n\n\n self.setItem(0, 2, QTableWidgetItem(\"급수 주입의 초기에는 주입량을 적게 제한함\"))\n self.setItem(1, 2, QTableWidgetItem(\"S/G 튜브 파손의 영향이 최소화되는 시간에 S/G\\n광역수위가 최소로 지시될때까지 급수가 고갈된 단\\n1개의 S/G에 급수를 주입함\"))\n self.setItem(2, 2, QTableWidgetItem(\"S/G 튜브 파손의 영향을 최소화하기 위해 격리가\\n가능한 S/G에만 급수를 주입함.\"))\n\n self.setItem(3, 2, QTableWidgetItem(\"건전한 S/G에만 급수하고 비등시킴\"))\n self.setItem(4, 2, QTableWidgetItem(\"S/G 일차측에서 이차측으로 냉각재 누설을\\n최소화하기 위하여 RCS를 감압함\\n(완화-05, “원자로냉각재계통 감압“ 참조)\"))\n self.setItem(5, 2, QTableWidgetItem(\"복수기로 증기를 배출하여 S/G를 감압함\"))\n\n self.setItem(6, 2, QTableWidgetItem(\"S/G 튜브 파손의 영향이 최소화되는 시간에 급수가\\n고갈되고 고온인 S/G 1개만 감압함\"))\n self.setItem(7, 2, QTableWidgetItem(\"S/G 압력이 급수원의 체결수두 이하일 때 가능한 빨리\\n급수 주입량을 복구함. S/G에 급수 주입 초기에는\\n주입략을 적게 제한함\"))\n self.setItem(8, 2, QTableWidgetItem(\"RCS를 감압함\\n(완화-05, “원자로냉각재계통 감압“ 참조)\"))\n\n # 체크박스\n count = 0\n self.checkbox = []\n for i in range(0, self.rowCount()):\n self.checkbox.append(QCheckBox())\n self.setCellWidget(i, 3, self.checkbox[count])\n count = count + 1\n # 테이블 정렬\n delegate = AlignDelegate2()\n self.setItemDelegate(delegate)\n\n fnt = self.font()\n fnt.setBold(True)\n fnt.setPointSize(12)\n self.setFont(fnt)\n\n # timer section\n timer = QTimer(self)\n timer.setInterval(1000)\n timer.timeout.connect(self.dis_update)\n timer.start()\n\n def dis_update(self):\n for i in range(9):\n if Flag.s3_3[i]:\n self.setRowHidden(i, True)\n else:\n self.setRowHidden(i, False)\n count = [0] * 3\n for i in range(9):\n if i < 3:\n if not Flag.s3_3[i]:\n count[0] += 1\n elif i < 6:\n if not Flag.s3_3[i]:\n count[1] += 1\n else:\n if not Flag.s3_3[i]:\n count[2] += 1\n\n for i in range(3):\n if count[i] == 1:\n for j in range(i * 3, i * 3 + 3):\n self.setRowHeight(j, 190)\n if i == 2:\n self.setRowHeight(j, 330)\n elif count[i] == 2:\n for j in range(i * 3, i * 3 + 3):\n self.setRowHeight(j, 90)\n if i == 2:\n self.setRowHeight(j, 165)\n elif count[i] == 3:\n for j in range(i * 3, i * 3 + 3):\n self.setRowHeight(j, 65)\n if i == 2:\n self.setRowHeight(j, 110)\n\nclass AlignDelegate(QStyledItemDelegate):\n def initStyleOption(self, option, index):\n super(AlignDelegate, self).initStyleOption(option, index)\n option.displayAlignment = Qt.AlignCenter\n\nclass AlignDelegate2(QStyledItemDelegate):\n def initStyleOption(self, option, index):\n super(AlignDelegate2, self).initStyleOption(option, index)\n option.displayAlignment = Qt.AlignTop\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n app.setStyle(\"fusion\")\n window = table_3_5()\n window.show()\n font = QFontDatabase()\n font.addApplicationFont('./맑은 고딕.ttf')\n app.setFont(QFont('맑은 고딕'))\n app.exec_()","sub_path":"Table_3_5.py","file_name":"Table_3_5.py","file_ext":"py","file_size_in_byte":11239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"513720664","text":"from cx_Freeze import setup, Executable\nimport os\n \nexe = Executable(\n script=os.path.join(\"Cipher.pyw\"),\n base=\"Win32GUI\",\n icon=os.path.join(\"Files\", \"Icon.ico\")\n )\nincludeFiles= [os.path.join(\"Files\", file) for file in\n [\"Dictionary.txt\", \"cipher.options\", \"Icon.ico\", \"Logo.png\",\n \"Missing_Dependancies.html\", \"Cipher Info.txt\",\n \"Licence.txt\", \"About.txt\", \"Attributions.txt\"\n ]\n ]+[\n os.path.join(\"Files\", \"ButtonIcons\", file) for file in\n [\"Copy.png\", \"Cut.png\", \"Exit.png\", \"Info.png\",\n \"Paste.png\", \"Redo.png\", \"Undo.png\", \"Red X.png\",\n \"Green Check.png\", \"Open.png\", \"Save.png\", \n \"Save As.png\", \"Plus.png\", \"Minus.png\", \n \"Contact.png\", \"Bug.png\", \"About.png\"]\n ]\n\nexcludes = [\"Tkinter\"]\n\nsetup(\n name = \"Cipher\",\n version = \"1.1\",\n author = \"Dominick Johnson\",\n author_email = \"dmjohn235@gmail.com\",\n description = \"Basic Text Encryption Software\",\n executables = [exe],\n options = {'build_exe': {\"excludes\":excludes,\n \"include_files\":includeFiles}}\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"72931815","text":"from google.colab import files\r\nuploaded = files.upload()\r\n\r\nimport pandas as pd\r\nfrom sklearn.datasets import load_iris\r\n \r\n# Membaca file iris.csv\r\niris = pd.read_csv('Iris.csv')\r\n\r\n# melihat informasi dataset pada 5 baris pertama\r\niris.head()\r\n\r\n# menghilangkan kolom yang tidak penting\r\niris.drop('Id',axis=1,inplace=True)\r\n\r\n# memisahkan atribut dan label\r\nX = iris[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm' ]]\r\ny = iris['Species']\r\niris.head()\r\nprint(X)\r\nprint(y)\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n \r\n# membuat model Decision Tree\r\ntree_model = DecisionTreeClassifier() \r\n \r\n# melakukan pelatihan model terhadap data\r\ntree_model.fit(X, y)\r\n\r\n# prediksi model dengan tree_model.predict([[SepalLength, SepalWidth, PetalLength, PetalWidth]])\r\ntree_model.predict([[6.2, 3.4, 5.4, 2.3]])\r\n\r\nfrom sklearn.tree import export_graphviz\r\nexport_graphviz(\r\n tree_model,\r\n out_file = \"iris_tree.dot\",\r\n feature_names = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm'],\r\n class_names = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica' ],\r\n rounded= True,\r\n filled =True\r\n)\r\n\r\n","sub_path":"SKLearn_Decision_Tree.py","file_name":"SKLearn_Decision_Tree.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"155009644","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.2 (3180)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/__setup__/ally_core_http/support_php.py\n# Compiled at: 2013-10-02 09:54:40\n__doc__ = '\\nCreated on Jul 3, 2011\\n\\n@package: ally core http\\n@copyright: 2012 Sourcefabric o.p.s.\\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\\n@author: Gabriel Nistor\\n\\nProvides additional configurations for the Zend PHP client.\\n'\nfrom ..ally_core.encoder_decoder import content_types_json\nfrom ally.container import ioc\n\n@ioc.config\ndef php_zend_support() -> bool:\n \"\"\"Provides additional configurations for the Zend PHP client\"\"\"\n return False\n\n\n@ioc.before(content_types_json)\ndef updateContentTypesJSON():\n if php_zend_support():\n content_types_json()['application/x-www-form-urlencoded'] = 'application/x-www-form-urlencoded'","sub_path":"pycfiles/allzparkdemo-1.0.23-py2.py3-none-any/support_php.cpython-32.py","file_name":"support_php.cpython-32.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"638976431","text":"import numpy as np\nimport time\nfrom collections import Counter\nnp.random.seed(12)\ndef rand_choice_center(a, size, center):\n # DONT USE THIS (too slow)\n choices = np.random.normal(center, a/6, size=size).astype(np.uint8) % a\n while np.unique(choices).shape != size:\n choices = np.random.normal(center, a/6, size=size).astype(np.uint8) % a\n return choices\n\n#def rand_choice_center_fast(a, size, center):\n# def sub_func():\n# double_choices = np.random.normal(center, a/6, size=(2*size,)).astype(np.uint8) % a\n# used = set()\n# actual_choices = np.empty(size)\n# current = 0\n# for c in double_choices:\n# if current != size:\n# if c not in used:\n# used.add(c)\n# actual_choices[current] = c\n# current += 1\n# else:\n# break\n# return actual_choices, current\n# choices, actual_size = sub_func()\n# while actual_size != size:\n# choices, actual_size = sub_func()\n# return choices\n\ndef rand_choice_center_fast(a, size, center):\n return np.random.choice(a, size=size, replace=False)\n\ndef truncated_normal_perms(center, inpts, inpt_size):\n return np.clip(np.random.normal(0.52-0.05*mod_dist(center, inpts, inpt_size)/float(inpt_size), 1e-2), 0.3, 0.7)\n\ndef rand_choice_center_bias(a, size, center):\n # DONT USE THIS (gives biased results [there is replacement])\n choices = np.random.normal(center, a/6, size=size).astype(np.uint8) % a\n return choices\n\ndef mod_dist(a, b, c):\n diffs = abs(a-b)\n return np.where(diffs < c//2, diffs, c - diffs)\n\n\ndef _binary_insert(l, lo, hi, val, old_mid):\n mid = (lo+hi)//2\n if mid == old_mid:\n if val < l[lo]:\n return mid-1\n elif val > l[hi]:\n return mid+1\n else:\n return mid\n# print(\"lo\", l[lo], \"hi\", l[hi], \"val\", val, \"mid\", l[mid])\n if val <= l[mid]:\n# print(\"first path\")\n return _binary_insert(l, lo, mid, val, mid)\n else:\n# print(\"second path\")\n return _binary_insert(l, mid, hi, val, mid)\n\ndef binary_insert(l , val, key=None):\n if key is not None:\n l_actual = list(map(key, l))\n try:\n val = key(val)\n except Exception:\n pass\n return _binary_insert(l_actual, 0, len(l)-1, val, -1) + 1\n return _binary_insert(l, 0, len(l)-1, val, -1) + 1\n\ndef binary_find(l, val, key=None):\n # use this one if you want to delete from the sorted array\n res = binary_insert(l, val, key=key)\n try:\n if res == 1 and key(val) <= key(l[0]): # this is the only difference between binary insert and binary find\n return 0\n except Exception:\n if res == 1 and val <= key(l[0]):\n return 0\n return res\n\ndef modular_radius(vals, mod, key=None):\n #sorted_vals = sorted(vals)\n #sorted_vals.append(sorted_vals[0])\n if key is not None:\n vals_actual = list(map(key, vals))\n vals_actual.append(key(vals[0]))\n else:\n vals_actual = vals\n vals_actual.append(vals[0])\n #print(vals_actual)\n lowest = mod\n for x,y in zip(vals_actual, vals_actual[1:]):\n # print(x,y, \"=>\", mod - (y-x)%mod)\n lowest = min(lowest, mod - (y-x)%mod)\n return lowest\n\ndef simple_recptive_size(vals, col, mod, key=None): # depreceated since it gives values that are too large which leads to issues\n if key is not None:\n vals_actual = np.array(list(map(key, vals)))\n else:\n vals_actual = np.array(vals)\n return np.square(mod_dist(vals_actual, col, mod)).sum()\n\n\ndef main():\n np.set_printoptions(precision=3)\n #starter = [5, 123, -23, 4, 0, 4,2, 13, 5, 2, 4, 123,5,3,3,3,3,523,43,212, 12, 13, 13, 13, 14, 15, 16]\n #starter.sort()\n #print(starter)\n #result = binary_insert(starter, 5)\n #starter.insert(result, 5)\n #result = binary_insert(starter, 27)\n #starter.insert(result, 27)\n #result = binary_insert(starter, -500)\n #starter.insert(result, -500)\n #result = binary_insert(starter, 5000)\n #starter.insert(result, 5000)\n #result = binary_insert(starter, 50)\n #starter.insert(result, 50)\n #print(starter)\n N = 50000\n\n\n #print(mod_dist(1,2,50), '1,2')\n #print(mod_dist(24,27,50), '24,27')\n #print(mod_dist(25,26,50), '25,26')\n #print(mod_dist(24,25,50), '24,25')\n #print(mod_dist(4,29,50), '4,29')\n #print(mod_dist(1,10,50), '0,10')\n #print(mod_dist(0,50,50), '0,50')\n #print(mod_dist(0,49,50), '0,49')\n #print(mod_dist(10,1,50), '10,1')\n #print(mod_dist(50,0,50), '50,0')\n #print(mod_dist(49,0,50), '49,0')\n #quit()\n #start = time.time()\n #for _ in range(N):\n # rand_choice_center(50, (15,), 12)\n #print((time.time() - start))\n\n #start = time.time()\n #for _ in range(N):\n # rand_choice_center_bias(50, (15,), 12)\n #t1 = ((time.time() - start))\n\n #start = time.time()\n #for _ in range(N):\n # rand_choice_center_fast(50, 15, 12)\n #t2 = ((time.time() - start))\n #print(t1)\n #print(t2)\n #print(t2/t1)\n total_conn = 0\n total_discon = 0\n distr = Counter()\n total_distr = Counter()\n for _ in range(N):\n res = rand_choice_center_fast(50, 15, 0)\n perms = truncated_normal_perms(0, res, 50)\n amounts = np.where(perms > 0.5, 0, 1).sum()\n\n for count, perm in zip(res, perms):\n if perm > 0.5:\n distr[int(count)] += 1\n total_distr[int(count)] += 1\n\n total_discon += amounts\n total_conn += (15 - amounts)\n res = rand_choice_center_fast(50, 15, 0)\n perms = truncated_normal_perms(0, res, 50)\n print(res)\n print(mod_dist(0, res, 50))\n print(perms)\n print(np.where(perms > 0.5, 0, 1).sum())\n print(total_conn)\n print(total_discon)\n print(distr, \">0.5 coutns\")\n print(total_distr, \"potential synapse coutns\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"HTM-basic/util_funcs.py","file_name":"util_funcs.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"573710135","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport stopWords as stpw\nimport re\nfrom sklearn import metrics\n\nclass dpg:\n\t#calculate p and delta\n\tdef calcParams(self, dataVecs, dc, kl):\n\t\twordParam = {}\n #calculate p as param[0], neighbours as param[1]\n\t\tmaxp = 0\n\t\tmaxData = 0\n\t\tmaxDist = 0.0\n\t\tfor i in range(0, len(dataVecs)):\n\t\t\tcnt = dataVecs[i][2]\n\t\t\tneighbours = []\n\t\t\tfor j in range(0, len(dataVecs)):\n\t\t\t\tif i!=j:\n\t\t\t\t\ttmp = stpw.euclidean(dataVecs[i][0:2], dataVecs[j][0:2])\n\t\t\t\t\ttmpDist = 0.\n\t\t\t\t\tif tmp <= dc: \n\t\t\t\t\t\t#normal regularization\n\t\t\t\t\t\tif kl == 'nl':\n\t\t\t\t\t\t\tcnt += (1-(tmp**2/dc**2))*dataVecs[j][2]; neighbours.append(j)\n\t\t\t\t\t\t#gaussian kernel\n\t\t\t\t\t\telif kl == 'gs':\n\t\t\t\t\t\t\tcnt += np.exp(-tmp**2/dc**2); neighbours.append(j)\n\t\t\t\t\t\t#normal distance\n\t\t\t\t\t\telif kl == 'non': \n\t\t\t\t\t\t\tcnt += 1; neighbours.append(j)\n\t\t\t\t\tif tmp > maxDist: maxDist = tmp\n\t\t\twordParam[i] = [dataVecs[i][2], neighbours]\n\t\t\tif maxp < dataVecs[i][2]: maxp = dataVecs[i][2]; maxData = i\n\t\t#calculate delta as param[2], nearest higher density point j\n\t\tfor i in range(0, len(dataVecs)):\n\t\t\tminDelta = maxDist\n\t\t\taffiliate = -1\n\t\t\tfor j in range(0, len(dataVecs)):\n\t\t\t\tif wordParam[j][0] > wordParam[i][0]: \n\t\t\t\t\t#euclidean distance\n\t\t\t\t\ttmp = np.linalg.norm(np.array(dataVecs[i][0:2]) - np.array(dataVecs[j][0:2]))\n\t\t\t\t\tif minDelta > tmp: \n\t\t\t\t\t\tminDelta = tmp\n\t\t\t\t\t\taffiliate = j\n\t\t\twordParam[i].extend([minDelta, affiliate])\n\t\t\n\t\treturn wordParam\n\n\tdef plotDG(self, wordParam, dx, dy):\n\t\tX = [wordParam[i][0] for i in wordParam]\n\t\tY = [wordParam[i][2] for i in wordParam]\n\t\tpRank = []\n\t\tdpcolorMap1 = ['g' for i in range(len(wordParam))]\n\t\tfor itm in wordParam:\n\t\t\tpRank.append([wordParam[itm][2] * wordParam[itm][0], itm])\n\t\tpRank.sort(reverse = True)\n\t\tcenters = [itm[1] for itm in pRank[0:2]]\n\t\tS = [20 for i in range(len(dx))]\n\t\tplt.figure(1)\n\t\tdpcolorMap1 = [(0.3, 0.3, 0.3) for i in range(len(dx))]\n\t\tdpcolorMap2 = dpcolorMap1.copy()\n\t\tfor cent in centers:\n\t\t\tdpcolorMap1[cent] = 'r'\n\t\t\tS[cent] = 100\n\t\tplt.scatter(X, Y, c=dpcolorMap1)\n\t\tplt.xlabel(r'$\\rho$', fontsize=15)\n\t\tplt.ylabel(r'$\\delta$', fontsize=15)\n\t\tcenterX = []\n\t\tcenterY = []\n\t\tfor cent in centers:\n\t\t\t#dpcolorMap1[cent] = 'r'\n\t\t\tcenterX.append(dx[cent])\n\t\t\tcenterY.append(dy[cent])\n\t\tplt.figure(2)\n\t\tplt.scatter(dx, dy, c=dpcolorMap2, s=20, alpha=0.5)\n\t\tplt.xlabel('X', fontsize=15)\n\t\tplt.ylabel('Y', fontsize=15)\n\n\t\t#plt.scatter(centerX, centerY, c='r', s=50)\n\t\tplt.show()\n\t\treturn\n\n\tdef getBoarderDc(self, aWords, bWords, clusterWord, wordParams, a, b, dc, dataVecs, lasso):\n\t\taBoarder = []\n\t\tbBoarder = []\n\t\taB = []\n\t\tbB = []\n\t\tflag = False\n\t\tpa = wordParams[a][0]\n\t\tpb = wordParams[b][0]\n\t\tpaAll = 0.\n\t\tpbAll = 0.\n\t\tfor itm in aWords: paAll += wordParams[itm][0]\n\t\tfor itm in bWords: pbAll += wordParams[itm][0]\n\t\tpaAverage = paAll/len(aWords)\n\t\tpbAverage = pbAll/len(bWords)\n\t\tfor word in aWords:\n\t\t\twordNeighbour = wordParams[word][1]\n\t\t\tpword = wordParams[word][0]\n\t\t\tfor cand in bWords:\n\t\t\t\tpcand = wordParams[cand][0]\n\t\t\t\tdist = stpw.euclidean(dataVecs[word][0:2], dataVecs[cand][0:2])\n\t\t\t\tif cand in wordNeighbour:\n\t\t\t\t\taB.append(word) \n\t\t\t\t\tbB.append(cand)\n\t\tif len(aB) == 0: return aB, bB, False\n\t\tpaB = 0.\n\t\tpbB = 0.\n\t\tcntA = 0\n\t\tcntB = 0\n\t\t\n\t\tfor itm in aB: \n\t\t\tpaB += wordParams[itm][0]\n\t\t\tcntA += 1\n\t\t\tfor point in wordParams[itm][1]:\n\t\t\t\tif point in clusterWord[a]:\n\t\t\t\t\tpaB += wordParams[point][0]\n\t\t\t\t\tcntA += 1\n\t\tfor itm in bB: \n\t\t\tpbB += wordParams[itm][0]\n\t\t\tcntB += 1\n\t\t\tfor point in wordParams[itm][1]:\n\t\t\t\tif point in clusterWord[b]:\n\t\t\t\t\tpbB += wordParams[point][0]\n\t\t\t\t\tcntB += 1\n\t\tpaBA = paB/cntA\n\t\tpbBA = pbB/cntB\n\t\tfor word in aWords:\n\t\t\twordNeighbour = wordParams[word][1]\n\t\t\tpword = wordParams[word][0]\n\t\t\tfor cand in bWords:\n\t\t\t\tpcand = wordParams[cand][0]\n\t\t\t\tdist = stpw.euclidean(dataVecs[word][0:2], dataVecs[cand][0:2])\n\t\t\t\tif cand in wordNeighbour:\n\t\t\t\t\tif paAll > pbAll:\n\t\t\t\t\t\tif ((dist*((pa/pword)*lasso))/dc + (dist*((pb/pcand)*lasso))/dc)/2 * np.exp(\n\t\t\t\t\t\t\tmin(\n\t\t\t\t\t\t\t\tabs(paAverage-pbAverage)/max(paAverage, pbAverage),\n\t\t\t\t\t\t\t\tabs(paBA-pbAll)/max(paBA, pbAll)))<= 1: \n\t\t\t\t\t\t\taBoarder.append(word) \n\t\t\t\t\t\t\tbBoarder.append(cand)\n\t\t\t\t\t\t\tflag = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tif ((dist*((pa/pword)*lasso))/dc + (dist*((pb/pcand)*lasso))/dc)/2 * np.exp(\n\t\t\t\t\t\t\tmin(\n\t\t\t\t\t\t\t\tabs(paAverage-pbAverage)/max(paAverage, pbAverage),\n\t\t\t\t\t\t\t\tabs(pbBA-paAll)/max(pbBA, paAll)))<= 1: \n\t\t\t\t\t\t\taBoarder.append(word) \n\t\t\t\t\t\t\tbBoarder.append(cand)\n\t\t\t\t\t\t\tflag = True\n\t\tif len(aBoarder) != 0 : flag = True\n\t\treturn aBoarder, bBoarder, flag\n\n\t#assign cluster in p's order, from high to low\n\t#if one cluster has a delta larger than dc, assign it a new cluster id\n\tdef assignCluster(self, wordParams, centers, dataVecs, dc, lasso):\n\t\tboarders = set()\n\t\t#coarsely assign cluster id based on centers\n\t\tpRank = [[wordParams[word][0], word] for word in wordParams]\n\t\tpRank.sort(reverse = True)\n\t\twordCluster = {word:-1 for word in wordParams}\n\t\tid = 0\n\t\tcentre2cluster = {}\n\t\tfor p in pRank:\n\t\t\tif wordCluster[p[1]] == -1: \n\t\t\t\tif p[1] in centers: wordCluster[p[1]] = p[1]; centre2cluster[p[1]] = p[1]\n\t\t\t\telse: \n\t\t\t\t\tif wordParams[p[1]][3] == -1: \n\t\t\t\t\t\t#print('error, increase dc and try again....\\n') \n\t\t\t\t\t\treturn wordCluster, [], [], [], False\n\t\t\t\t\twordCluster[p[1]] = wordCluster[wordParams[p[1]][3]]\n\t\t#merge false clusters\n\t\tclusterWord = {}\n\t\tcluster2centre = {centre2cluster[itm]: itm for itm in centre2cluster}\n\t\tfor word in wordCluster:\n\t\t\tif wordCluster[word] in clusterWord: clusterWord[wordCluster[word]].append(word)\n\t\t\telse: clusterWord[wordCluster[word]] = [word]\n\t\tmergedCluster = {id: [] for id in clusterWord}\n\t\tcentreDistMat = {i: {j: 0. for j in centers if j != i} for i in centers}\n\t\tfor i in centers:\n\t\t\tfor j in centers:\n\t\t\t\tif i == j or centreDistMat[i][j] != 0: continue\n\t\t\t\taBoarder, bBoarder, hasBoarder = self.getBoarderDc(clusterWord[centre2cluster[i]], clusterWord[centre2cluster[j]], clusterWord, wordParams,\n\t\t\t\t\t\t\t\t\t\t\t\t\t i, j, dc, dataVecs, lasso)\n\t\t\t\t#aBoarder, bBoarder, hasBoarder = self.getBoarder(clusterWord[centre2cluster[i]], clusterWord[centre2cluster[j]], clusterWord, wordParams)\n\t\t\t\tif hasBoarder:\n\t\t\t\t\t#distance of point 0-1 is (p0-p1)/dist(0,1)\n\t\t\t\t\tcentreDistMat[i][j] = 1\n\t\t\t\t\tcentreDistMat[j][i] = 1\n\t\t\t\t\tfor itm in aBoarder: boarders.add(itm)\n\t\t\t\t\tfor itm in bBoarder: boarders.add(itm)\n\t\t\n\t\t\n\t\trelation = {i: [] for i in centers}\n\t\tfor centre in centers:\n\t\t\tmaxDist = 0.\n\t\t\tparent = -1\n\t\t\tfor centreDist in centreDistMat[centre]:\n\t\t\t\tif centreDistMat[centre][centreDist] > 0.: relation[centre].append(centreDist); relation[centreDist].append(centre)\n\t\t\n\t\t#print(relation)\n\t\tmergedList = []\n\t\tvisited = {id: -1 for id in centers}\n\t\tfor id in centers:\n\t\t\tif visited[id] == -1:\n\t\t\t\tvisited[id] = 1\n\t\t\t\tmergedSet = set()\n\t\t\t\tif len(relation[id]) != 0:\n\t\t\t\t\tmergedSet = set(relation[id])\n\t\t\t\t\tmergedSet.add(id)\n\t\t\t\telse: continue\n\t\t\t\tque = mergedSet.copy()\n\t\t\t\twhile len(que) != 0:\n\t\t\t\t\tnewQue = set()\n\t\t\t\t\tfor link in que:\n\t\t\t\t\t\tif visited[link] == 1: continue\n\t\t\t\t\t\tvisited[link] = 1\n\t\t\t\t\t\tmergedSet.add(link)\n\t\t\t\t\t\tif len(relation[link]) != 0:\n\t\t\t\t\t\t for itm in relation[link]: [newQue.add(itm) for itm in relation[link] if itm not in mergedSet]\n\t\t\t\t\tque = newQue.copy()\n\t\t\t\ttmpList = []\n\t\t\t\t[tmpList.append(itm) for itm in mergedSet]\n\t\t\t\ttmpList.sort()\n\t\t\t\tmergedList.append(tmpList)\n\t\tclusterRel = {i: -1 for i in clusterWord.keys()}\n\t\tfor merge in mergedList:\n\t\t\tfor itm in merge:\n\t\t\t\tclusterRel[centre2cluster[itm]] = centre2cluster[merge[0]]\n\n\t\trealCluster = {word:-1 for word in wordParams}\n\t\tfor word in wordCluster:\n\t\t\tif clusterRel[wordCluster[word]] != -1: realCluster[word] = cluster2centre[clusterRel[wordCluster[word]]]\n\t\t\telse: realCluster[word] = cluster2centre[wordCluster[word]]\n\t\trealWord = {}\n\t\tfor word in realCluster:\n\t\t\tif realCluster[word] in realWord: realWord[realCluster[word]].append(word)\n\t\t\telse: realWord[realCluster[word]] = [word]\n\n\t\tfinalRes = {}\n\t\t\n\t\tfinalCluster = {}\n\t\tidx = 0\n\t\tfor itm in realWord:\n\t\t\tfinalRes[idx] = realWord[itm]\n\t\t\tidx += 1\n\t\t#print('number of clusters is : '+ str(idx))\n\t\tfor itm in finalRes:\n\t\t\tfor elem in finalRes[itm]:\n\t\t\t\tfinalCluster[elem] = itm\n\t\t#return wordCluster, realCluster, boarders\n\t\treturn wordCluster, finalCluster, boarders, finalRes, True\n\n\t#read dataset\n\tdef read(self, dir):\n\t\tfp = open('C:/study/clustering dataset/' + dir)\n\t\tdx = []\n\t\tdy = []\n\t\tid = []\n\t\tnum = 0\n\t\tclusters = []\n\t\tfor line in fp.readlines():\n\t\t\traw = re.split('[ |\\t]', line)\n\t\t\ttmp = [itm for itm in raw if itm != '']\n\t\t\tarr = [float(itm.replace('\\n', '')) for itm in tmp]\n\t\t\tdx.append(arr[0])\n\t\t\tdy.append(arr[1])\n\t\t\tif len(arr) == 3: \n\t\t\t\tid.append(int(arr[2]))\n\t\t\t\tif arr[2] not in clusters:\n\t\t\t\t\tclusters.append(arr[2])\n\t\t\t\t\tnum += 1\n\t\treturn dx, dy, id, num\n\n\tdef calcMaxDist(self, dataVecs, dc):\n\t\trawDists = set()\n\t\tfor i in range(len(dataVecs)):\n\t\t\tfor j in range(i + 1, len(dataVecs)):\n\t\t\t\tdist = stpw.euclidean(dataVecs[i][0:2], dataVecs[j][0:2])\n\t\t\t\trawDists.add(dist)\n\t\tdists = list(rawDists)\n\t\tdists.sort()\n\t\treturn dists\n\n\tdef getCenters(self, wordParams, dc):\n\t\tcenters = []\n\t\tpRank = []\n\t\tfor itm in wordParams:\n\t\t\tpRank.append([wordParams[itm][2], itm])\n\t\tpRank.sort()\n\t\tcenters = [itm[1] for itm in pRank if itm[0] > dc and len(wordParams[itm[1]][1]) >= 3]\n\t\treturn centers\n\n\tdef plotClusterRes(self, wordCluster, dataVecs, centres, finalRes):\n\t\tcolors = cm.rainbow(np.linspace(0, 1, len(finalRes) + 1))\n\t\tdpcolorMap2 = [colors[wordCluster[itm]] for itm in range(len(dataVecs))]\n\t\t'''\n\t\tfor itm in centres:\n\t\t\tdpcolorMap2[itm] = 'k'\n\t\t'''\n\t\tdx = [dataVecs[i][0] for i in range(len(dataVecs))]\n\t\tdy = [dataVecs[i][1] for i in range(len(dataVecs))]\n\t\tplt.scatter(dx, dy, c=dpcolorMap2, marker='.', s=100)\n\t\t#plt.xlabel('X')\n\t\t#plt.ylabel('Y')\n\t\tplt.xticks([])\n\t\tplt.yticks([])\n\t\tplt.show()\n\t\treturn\n\n\tdef plotCenter(self, rawCluster, wordCluster, dx, dy, id, num, centers, boarders, dc):\n\t\tcolors = cm.rainbow(np.linspace(0, 1, 20))\n\t\tS = [100 for i in range(len(dx))]\n\t\tm = ['.' for i in range(len(dx))]\n\t\tplt.figure(1)\n\t\tdpcolorMap1 = ['k' for i in range(len(dx))]\n\t\tcx = []\n\t\tcy = []\n\t\tdpcolorMap2 = []\n\t\tfor itm in centers:\n\t\t\t#dpcolorMap1[itm] = 'r'\n\t\t\t#S[itm] = 300\n\t\t\tm[itm] = '+'\n\t\t\tcx.append(dx[itm])\n\t\t\tcy.append(dy[itm])\n\t\t\tdpcolorMap2.append('r')\n\t\tplt.scatter(dx, dy, c=dpcolorMap1, s=S, marker='.', alpha=0.3)\n\t\tplt.scatter(cx, cy, c=dpcolorMap2, s=100, marker='*')\n\t\tplt.xticks([])\n\t\tplt.yticks([])\n\t\t#plt.xlabel('X',fontsize=15)\n\t\t#plt.ylabel('Y',fontsize=15)\n\t\t#plt.title('Histogram of IQ')\n\t\tplt.text(0, 6.15, r'$dc=$'+str(dc), fontsize=15, color='green')\n\t\tplt.show()\n\t\treturn\n\n\tdef plotCluster(self, rawCluster, wordCluster, dx, dy, id, num, centers, boarders):\n\t\tcolors = cm.rainbow(np.linspace(0, 1, 20))\n\t\tplt.figure(1)\n\t\tdpcolorMap1 = ['k' for i in range(len(dx))]\n\t\tfor itm in centers:\n\t\t\tdpcolorMap1[itm] = 'r'\n\t\tfor itm in boarders:\n\t\t\tif itm in centers: dpcolorMap1[itm] = 'g'\n\t\t\telse: dpcolorMap1[itm] = 'b'\n\t\tplt.scatter(dx, dy, c=dpcolorMap1)\n\t\tfor i in range(len(dx)):\n\t\t\tplt.annotate(rawCluster[i], (dx[i],dy[i]))\n\t\t#dpcolorMap = [colors[wordCluster[i]] for i in range(len(dx))]\n\t\tplt.figure(2)\n\t\tdpcolorMap2 = ['k' for i in range(len(dx))]\n\t\tfor itm in centers:\n\t\t\tdpcolorMap2[itm] = 'r'\n\t\tfor itm in boarders:\n\t\t\tif itm in centers: dpcolorMap2[itm] = 'g'\n\t\t\telse: dpcolorMap2[itm] = 'b'\n\t\tplt.scatter(dx, dy, c=dpcolorMap2)\n\t\tfor i in range(len(dx)):\n\t\t\tplt.annotate(wordCluster[i], (dx[i],dy[i]))\n\t\t'''\n\t\tplt.figure(3)\n\t\tdpcolorMap3 = ['k' for i in range(len(dx))]\n\t\tfor itm in centers:\n\t\t\tdpcolorMap3[itm] = 'r'\n\t\tfor itm in boarders:\n\t\t\tif itm in centers: dpcolorMap3[itm] = 'g'\n\t\t\telse: dpcolorMap3[itm] = 'b'\n\t\tplt.scatter(dx, dy, c=dpcolorMap2)\n\t\tfor idx, i in enumerate(range(len(dx))):\n\t\t\tplt.annotate(idx, (dx[i],dy[i]))\n\t\t'''\n\t\tplt.xticks([])\n\t\tplt.yticks([])\n\t\tplt.show()\n\t\treturn\n\n\n\n\t#density peak clustering flow\n\tdef run(self, dir, dc, lasso, kl):\n\t\tprint(dir)\n\t\tdx, dy, id, num = self.read(dir)\n\t\tdataVecs = [[dx[i], dy[i]] for i in range(len(dx))]\n\t\tdists = self.calcMaxDist(dataVecs, dc)\n\t\t#realDc = dists[round(dc*len(dists))]\n\t\trealDc = dists[-1]*dc\n\t\tprint('dc = ' + str(dc)+' realDc = '+str(realDc))\n\t\twordParams = self.calcParams(dataVecs, realDc, kl)\n\t\t#self.plotDG(wordParams, dx, dy)\n\t\tcenters = self.getCenters(wordParams, realDc)\n\t\trawCluster, wordCluster, boarders, finalRes, valid = self.assignCluster(wordParams, centers, dataVecs, realDc, lasso)\n\t\t#self.plotCenter(rawCluster, wordCluster, dx, dy, id, num, centers, boarders, dc)\n\t\t#self.plotClusterRes(wordCluster, dataVecs, centers, finalRes)\n\t\tlabel = [wordCluster[i] for i in range(len(dataVecs))]\n\t\tarsTmp = metrics.adjusted_rand_score(id, label)\n\t\tamiTmp = metrics.adjusted_mutual_info_score(id, label)\n\t\tprint(arsTmp)\n\t\tprint(amiTmp)\n\t\treturn wordCluster, label\n\n\t#for evaluation\n\tdef eval(self, dc, lasso, kl, dataVecs):\n\t\tdists = self.calcMaxDist(dataVecs, dc)\n\t\trealDc = dists[-1]*dc\n\t\twordParams = self.calcParams(dataVecs, realDc, kl)\n\t\tcenters = self.getCenters(wordParams, realDc)\n\t\trawCluster, wordCluster, boarders, finalRes, valid = self.assignCluster(wordParams, centers, dataVecs, realDc, lasso)\n\t\t#self.plotClusterRes(wordCluster, dataVecs, centers, finalRes)\n\t\tlabel = [wordCluster[i] for i in range(len(wordCluster))]\n\t\treturn label, valid, centers\n\n\n\tdef plotParams(self, params):\n\t\tp = []\n\t\tdelta = []\n\t\t#x = p, y = delta\n\t\tfor itm in params:\n\t\t\tp.append(params[itm][0])\n\t\t\tdelta.append(params[itm][1])\n\t\tplt.plot(p, delta, 'ro')\n\t\tplt.show()\n\t\treturn\n\nif __name__ == \"__main__\":\n\tinst = dp()\n\t#clusters = inst.run('Compound.txt', 0.05, 5, 'nl')\n\t#inst.run('flame.txt', 0.1, 0.65, 'nl')\n\t#inst.run('horn.txt', 0.05, 0.15, 'nl')\n\t#inst.run('tmp.txt', 0.015, 0, 'nl')\n\t#clusters = inst.run('Aggregation.txt', 0.1, 1.5, 'nl')\n\t#clusters = inst.run('D31.txt', 0.01, 0.2, 'gs')\n\t#clusters = inst.run('jain.txt', 0.1, 0.5, 'nl')\n\n\tclusters = inst.run('pathbased.txt', 0.08, 0.6, 'nl')\n\t'''\n\t#done normal distance\n\tclusters = inst.run('jain.txt', 0.10, 0.1, 'nl')\n\tclusters = inst.run('Compound.txt', 0.06, 0.65, 'nl')\n\tclusters = inst.run('pathbased.txt', 0.085, 0.8, 'nl')\n\tclusters = inst.run('Aggregation.txt', 0.036, 1, 'nl')\n\tclusters = inst.run('spiral.txt', 0.12, 1, 'nl')\n\tinst.run('flame.txt', 0.1, 1, 'nl')\n\tinst.run('a3.txt', 0.020, 2, 'nl')\n\n\t#done Gaussian distance\n\tclusters = inst.run('R15.txt', 0.040, 1, 'gs')\n\tinst.run('s1.txt', 0.028, 1, 'gs')\n\tclusters = inst.run('D31.txt', 0.021, 1, 'gs')\n\tclusters = inst.run('s4.txt', 0.06, 7, 'gs')\n\t'''\n\t'''\n\t#done normal distance\n\t#clusters = inst.run('jain.txt', 0.10, 0.1, 'nl')\n\t#clusters = inst.run('Compound.txt', 0.1, 1, 'nl')\n\t#clusters = inst.run('pathbased.txt', 0.085, 0.2, 'nl')\n\t#clusters = inst.run('Aggregation.txt', 0.09, 2, 'nl')\n\t#clusters = inst.run('spiral.txt', 0.12, 1, 'nl')\n\t#inst.run('flame.txt', 0.2, 1.3, 'nl')\n\t#inst.run('a3.txt', 0.020, 1.5, 'nl')\n\n\t#done Gaussian distance\n\t#clusters = inst.run('R15.txt', 0.040, 1, 'gs')\n\t#inst.run('s1.txt', 0.035, 0.7, 'gs')\n\t#clusters = inst.run('D31.txt', 0.021, 1, 'gs')\n\t#clusters = inst.run('s4.txt', 0.06, 7, 'gs')\n\t'''\n\tinst.run('flame.txt', 0.1, 0.65, 'nl')\n\tclusters = inst.run('pathbased.txt', 0.085, 0.6, 'nl')\n\tclusters = inst.run('jain.txt', 0.10, 0.5, 'nl')\n\tclusters = inst.run('Aggregation.txt', 0.05, 1.5, 'nl')\n\tclusters = inst.run('Compound.txt', 0.05, 1, 'nl')\n\tclusters = inst.run('spiral.txt', 0.06, 0.5, 'nl')\n\t#inst.run('a3.txt', 0.020, 1.5, 'nl')\n\n\t#done Gaussian distance\n\tclusters = inst.run('R15.txt', 0.040, 1, 'gs')\n\t#inst.run('s1.txt', 0.035, 0.7, 'gs')\n\tclusters = inst.run('D31.txt', 0.021, 1, 'gs')\n\t#clusters = inst.run('s4.txt', 0.06, 7, 'gs')\n\n\tprint('The end...')","sub_path":"mdpc/mdpc_extend/mdpc_extend/dphmgrey.py","file_name":"dphmgrey.py","file_ext":"py","file_size_in_byte":15699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"362180952","text":"class Solution:\n def numDecodings(self, s: str) -> int:\n if not s:\n return 0\n\n dp = [0 for _ in range(len(s) + 1)]\n dp[0] = 1\n dp[1] = 0 if s[0] == '0' else 1\n\n for i in range(2, len(s) + 1):\n if 0 < int(s[i - 1:i]) <= 9: # >0 and not >= 0 ; see note2\n dp[i] += dp[i - 1]\n if 10 <= int(s[i - 2:i]) <= 26:\n dp[i] += dp[i - 2]\n\n return dp[-1]\n\n\n\"\"\"\nNote 1:\nThis is my first post. Please let me know if this is helpful and if there's anything I can do to improve.\n\nProblem Reduction: variation of n-th staircase with n = [1, 2] steps.\n\nApproach: We generate a bottom up DP table.\n\nThe tricky part is handling the corner cases (e.g. s = \"30\").\n\nMost elegant way to deal with those error/corner cases, is to allocate an extra space, dp[0].\n\nLet dp[ i ] = the number of ways to parse the string s[1: i + 1]\n\nFor example:\ns = \"231\"\nindex 0: extra base offset. dp[0] = 1\nindex 1: # of ways to parse \"2\" => dp[1] = 1\nindex 2: # of ways to parse \"23\" => \"2\" and \"23\", dp[2] = 2\nindex 3: # of ways to parse \"231\" => \"2 3 1\" and \"23 1\" => dp[3] = 2\n\n\nNote2:\nWill fail for something like \"10\" where it will add an extra 1 for 0 and result in extra way\n\"\"\"\n","sub_path":"LeetCode/0091_DecodeWays/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"494885316","text":"def main():\n argument_spec = ovirt_facts_full_argument_spec(name=dict(default=None), host=dict(default=None), vm=dict(default=None))\n module = AnsibleModule(argument_spec)\n if (module._name == 'ovirt_affinity_labels_facts'):\n module.deprecate(\"The 'ovirt_affinity_labels_facts' module is being renamed 'ovirt_affinity_label_facts'\", version=2.8)\n check_sdk(module)\n try:\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n affinity_labels_service = connection.system_service().affinity_labels_service()\n labels = []\n all_labels = affinity_labels_service.list()\n if module.params['name']:\n labels.extend([l for l in all_labels if fnmatch.fnmatch(l.name, module.params['name'])])\n if module.params['host']:\n hosts_service = connection.system_service().hosts_service()\n if (search_by_name(hosts_service, module.params['host']) is None):\n raise Exception((\"Host '%s' was not found.\" % module.params['host']))\n labels.extend([label for label in all_labels for host in connection.follow_link(label.hosts) if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])])\n if module.params['vm']:\n vms_service = connection.system_service().vms_service()\n if (search_by_name(vms_service, module.params['vm']) is None):\n raise Exception((\"Vm '%s' was not found.\" % module.params['vm']))\n labels.extend([label for label in all_labels for vm in connection.follow_link(label.vms) if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])])\n if (not (module.params['vm'] or module.params['host'] or module.params['name'])):\n labels = all_labels\n module.exit_json(changed=False, ansible_facts=dict(ovirt_affinity_labels=[get_dict_of_struct(struct=l, connection=connection, fetch_nested=module.params.get('fetch_nested'), attributes=module.params.get('nested_attributes')) for l in labels]))\n except Exception as e:\n module.fail_json(msg=str(e), exception=traceback.format_exc())\n finally:\n connection.close(logout=(auth.get('token') is None))","sub_path":"Data Set/bug-fixing-5/0a6ab23f38f4afb8ecf11d37b34657e4f3bd1aba-
-fix.py","file_name":"0a6ab23f38f4afb8ecf11d37b34657e4f3bd1aba-
-fix.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"34715905","text":"#!/usr/bin/python -t\n\n\n#Time: O(nlogn), space O(1)\n\nclass Solution(object):\n def containsDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n n = len(nums)\n nums.sort()\n \n index = 0\n \n for i in range(1, n):\n if nums[i] == nums[index]:\n return True\n else:\n index = index + 1\n \n return False\n\n \n\n#Time: O(n), space O(n)\n\nclass Solution(object):\n def containsDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n n = len(nums)\n d = set()\n \n for i in range(n):\n if nums[i] in d:\n return True\n else:\n d.add(nums[i])\n \n return False\n\n","sub_path":"lc/python/217_contains_duplicate.py","file_name":"217_contains_duplicate.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"617363679","text":"\"\"\"\nReporting functions\n\"\"\"\nimport yaml\nimport glob\nfrom datetime import datetime\nimport pytz\nimport os\n\nimport click\n\ntz = pytz.timezone('Europe/London')\n\n\nimport otter\nimport otter.bootstrap as bt\n\nfrom asimov.cli import connect_gitlab, known_pipelines\nfrom asimov import gitlab\nfrom asimov import config\n\n@click.group()\ndef report():\n pass\n\n@click.option(\"--location\", \"webdir\", default=None, help=\"The place to save the report to\")\n@click.argument(\"event\", default=None, required=False)\n@report.command()\ndef html(event, webdir):\n \"\"\"\n Return the ledger for a given event.\n If no event is specified then the entire production ledger is returned.\n \"\"\"\n server, repository = connect_gitlab()\n if not webdir:\n webdir = config.get(\"report\", \"report_root\")\n click.echo(\"Getting events...\")\n events = gitlab.find_events(repository,\n milestone=config.get(\"olivaw\", \"milestone\"),\n subset=[event],\n repo=False,\n update=False)\n click.echo(\"Got events\")\n if len(glob.glob(\"asimov.conf\"))>0:\n config_file = \"asimov.conf\"\n else:\n config_file = None\n\n report = otter.Otter(f\"{webdir}/index.html\", \n author=\"Olivaw\", \n title=\"Olivaw PE Report\", \n author_email=config.get(\"report\", \"report_email\"),\n config_file=config_file)\n\n with report:\n navbar = bt.Navbar(\"Asimov\", background=\"navbar-dark bg-primary\")\n report + navbar\n\n with report:\n time = bt.Container()\n\n time + f\"Report generated at {str(datetime.now(tz))}\"\n report + time\n\n cards = []\n container = bt.Container()\n container + \"# All PE Productions\"\n for event in events:\n click.secho(event.title, bold=True)\n\n event_report = otter.Otter(f\"{webdir}/{event.title}.html\", \n author=\"Olivaw\", \n title=f\"Olivaw PE Report | {event.title}\", \n author_email=\"daniel.williams@ligo.org\", \n config_file=config_file)\n\n with event_report:\n navbar = bt.Navbar(\"Asimov\", background=\"navbar-dark bg-primary\")\n event_report + navbar\n\n card = bt.Card(title=f\"{event.title}\")\n\n toc = bt.Container()\n\n for production in event.productions:\n toc + f\"* [{production.name}](#{production.name}) | {production.pipeline} |\"# + bt.Badge({production.pipeline}, \"info\")\n\n with event_report:\n title_c = bt.Container()\n title_c + f\"#{event.title}\"\n event_report + title_c\n event_report + toc\n\n production_list = bt.ListGroup()\n for production in event.productions:\n click.echo(f\"{event.title}\\t{production.name}\")\n if production.pipeline.lower() in known_pipelines:\n pipe = known_pipelines[production.pipeline.lower()](production, \"C01_offline\")\n\n event_log = otter.Otter(f\"{webdir}/{event.title}-{production.name}.html\", \n author=\"Olivaw\", \n title=f\"Olivaw PE Report | {event.title} | {production.name}\", \n author_email=\"daniel.williams@ligo.org\", \n config_file=config_file)\n\n \n\n \n \n status_map = {\"cancelled\": \"light\",\n \"finished\": \"success\",\n \"uploaded\": \"success\",\n \"processing\": \"primary\",\n \"running\": \"primary\",\n \"stuck\": \"warning\",\n \"restart\": \"secondary\",\n \"ready\": \"secondary\",\n \"wait\": \"light\",\n \"stop\": \"danger\",\n \"manual\": \"light\",\n \"stopped\": \"light\"}\n with event_report:\n container = bt.Container()\n container + f\"## {production.name}\"\n container + f\"\"\n container + \"### Ledger\"\n container + production.meta\n\n if production.pipeline.lower() == \"bilby\":\n container +f\"### Progress\"\n progress_line = []\n procs = pipe.check_progress()\n for proc, val in procs.items():\n container + f\"- {proc.split('_')[-1]}\\t{val[0]}\\t{val[1]}\"\n progress_line.append(f\"{val[1]}\")\n else:\n progress_line = []\n if production.status.lower() == \"running\":\n progress = str(bt.Badge(\"|\".join(progress_line)))\n else:\n progress = \"\"\n\n if production.status.lower() == \"uploaded\":\n link = os.path.join(\"https://ldas-jobs.ligo.caltech.edu\", config.get('general', 'webroot').replace(\"/home/\", \"~\").replace(\"public_html/\", \"\"), production.event.name, production.name, \"results\", \"home.html\")\n item_text = f\"{production.name}\" \n else:\n item_text = f\"{production.name}\" \n production_list.add_item(item_text\n + str(bt.Badge(f\"{production.pipeline}\", \"info\")) \n + progress\n + str(bt.Badge(f\"{production.status}\")), \n context=status_map[production.status])\n\n # logs = pipe.collect_logs()\n # container + f\"### Log files\"\n # container + f\"Log file page\"\n # with event_log:\n\n # for log, message in logs.items():\n # log_card = bt.Card(title=f\"{log}\")\n # log_card.add_content(\"
\"+message+\"
\")\n # event_log + log_card\n\n with event_report:\n event_report + container\n\n card.add_content(production_list)\n cards.append(card)\n\n\n\n with report:\n if len(cards) == 1:\n report + card\n else:\n for i, card in enumerate(cards):\n if i%2==0:\n deck = bt.CardDeck()\n deck + card\n if i%2==1:\n report + deck\n\n@click.argument(\"event\", default=None, required=False)\n@report.command()\ndef status(event):\n \"\"\"\n Provide a simple summary of the status of a given event.\n\n Arguments\n ---------\n name : str, optional\n The name of the event.\n\n \"\"\"\n server, repository = connect_gitlab()\n\n events = gitlab.find_events(repository,\n milestone=config.get(\"olivaw\", \"milestone\"),\n subset=[event],\n update=False,\n repo=False)\n for event in events:\n click.secho(f\"{event.title:30}\", bold=True)\n if len(event.event_object.meta['productions'])>0:\n click.secho(\"\\tProductions\", bold=True)\n for production in event.event_object.meta['productions']:\n click.echo(f\"\\t\\t{list(production.keys())[0]}\")\n if len(event.event_object.get_all_latest())>0:\n click.secho(\"\\tJobs waiting\", bold=True)\n waiting = event.event_object.get_all_latest()\n for awaiting in waiting:\n click.echo(f\"\\t\\t{awaiting.name}\\t{awaiting.status}\")\n\n@click.option(\"--yaml\", \"yaml_f\", default=None, help=\"A YAML file to save the ledger to.\")\n@click.argument(\"event\", default=None, required=False)\n@report.command()\ndef ledger(event, yaml_f):\n \"\"\"\n Return the ledger for a given event.\n If no event is specified then the entire production ledger is returned.\n \"\"\"\n\n server, repository = connect_gitlab()\n\n events = gitlab.find_events(repository,\n milestone=config.get(\"olivaw\", \"milestone\"),\n subset=[event],\n update=False,\n repo=False)\n\n total = []\n for event in events:\n total.append(yaml.safe_load(event.event_object.to_yaml()))\n\n click.echo(yaml.dump(total))\n\n if yaml_f:\n with open(yaml_f, \"w\") as f:\n f.write(yaml.dump(total))\n","sub_path":"asimov/cli/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":8677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"537944190","text":"# -*- coding: utf-8 -*-\n' py info '\n__author__ = 'xuyanjun@extracme.com'\n\nimport unittest, time\nfrom test_programe.driver.HTMLTestRunner import HTMLTestRunner\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nimport smtplib, os\nimport traceback\n\n\n# 发送测试报告,需要配置你的邮箱账号\ndef send_mail(file_new):\n f = open(file_new, 'rb')\n mail_body = f.read()\n f.close()\n msg = MIMEText(mail_body, 'html', 'utf-8')\n msg['Subject'] = Header(\"自动化测试报告\", 'utf-8')\n msg['From'] = '2237993760@163.com'\n msg['To'] = 'twolun@qq.com'\n\n fromaddr = \"2237993760@qq.com\"\n smtpaddr = \"smtp.qq.com\"\n toaddrs = [\"2237003760@qq.com\", \"twolun@qq.com\"]\n password = \"jrqduzszwmqheahi\"\n\n try:\n s = smtplib.SMTP_SSL(smtpaddr)\n s.set_debuglevel(1)\n s.login(fromaddr, password)\n s.sendmail(fromaddr, toaddrs, msg.as_string())\n s.quit()\n except Exception as ex:\n print('Error: unable to send mail')\n print(traceback.format_exc())\n\n print('email has send out!')\n\n\n# 查找测试报告目录,找到最新生成的测试报告文件\ndef new_report(testreport):\n lists = os.listdir(testreport)\n lists.sort(key=lambda fn: os.path.getmtime(testreport + '\\\\' + fn))\n file_new = os.path.join(testreport, lists[-1])\n return file_new\n\n\n# 指定测试用例为当前文件夹下的test_case目录\ntest_dir = './mail/test_case'\ntest_report = 'D:\\\\python-learning\\\\test_programe\\\\mail\\\\report'\ndiscover = unittest.defaultTestLoader.discover(test_dir, pattern='*_case.py')\n\nif __name__ == \"__main__\":\n now = time.strftime(\"%Y-%m-%d %H_%M_%S\")\n filename = test_report + '/' + now + 'result.html'\n fp = open(filename, 'wb')\n # runner = unittest.TextTestRunner()\n runner = HTMLTestRunner(stream=fp,\n title='测试报告',\n description=\"运行环境:windows 7, Chrome\")\n runner.run(discover)\n fp.close()\n\n new_report = new_report(test_report)\n send_mail(new_report)\n\n","sub_path":"test_programe/run_all_test.py","file_name":"run_all_test.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"72283067","text":"\"\"\"\n191. Maximum Product Subarray\nFind the contiguous subarray within an array\n(containing at least one number) which has the largest product.\n\nExample\nFor example, given the array [2,3,-2,4],\nthe contiguous subarray [2,3] has the largest product = 6.\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param nums: An array of integers\n @return: An integer\n \"\"\"\n\n def maxProduct(self, nums):\n\n n = len(nums)\n maxs = [0 for _ in range(n)]\n mins = [0 for _ in range(n)]\n\n # initialization\n maxs[0] = nums[0]\n mins[0] = nums[0]\n\n res = nums[0]\n\n for i in range(1, n):\n mins[i] = nums[i]\n maxs[i] = nums[i]\n if nums[i] > 0:\n maxs[i] = max(maxs[i], maxs[i - 1] * nums[i])\n mins[i] = min(mins[i], mins[i - 1] * nums[i])\n elif nums[i] < 0:\n maxs[i] = max(maxs[i], mins[i - 1] * nums[i])\n mins[i] = min(mins[i], maxs[i - 1] * nums[i])\n\n res = max(res, maxs[i])\n return res\n\n def maxProduct2(self, nums):\n if not nums or nums is None:\n return 0\n\n maxProduct = 1\n prod = 1\n for i in range(1, len(nums)):\n if nums[i] * nums[i - 1] > prod:\n prod = nums[i] * nums[i - 1]\n else:\n prod = 1\n\n maxProduct = max(maxProduct, prod)\n return maxProduct\n\n\ns = Solution()\n\nprint(s.maxProduct([2, 3, -2, 4]))\n","sub_path":"DynamicProgramming/MaximumProductSubarray.py","file_name":"MaximumProductSubarray.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"336847775","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nKaty Barnhart\n\"\"\"\nimport os\nimport pandas as pd\nimport numpy as np\n\nfrom landlab.io import read_esri_ascii\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n###############\n# Define filepaths. Here these are given as lists, for cross platform\n# compatability\noutlet_id = 178576\ninput_template_folderpath = ['..', '..', 'templates']\ndriver_folderpath = ['..', '..', 'drivers', 'models']\n\nparameter_dict_folderpath = ['..', '..', 'auxillary_inputs']\ninitial_dem_filepath = ['..', '..', 'auxillary_inputs', 'dems', 'sew', 'modern', 'dem24fil_ext.txt']\nchi_mask_filepath = ['..', '..', 'auxillary_inputs', 'chi_mask', 'sew' , 'chi_mask.txt']\nrock_till_filepath = ['..', '..', 'auxillary_inputs', 'rock_till', 'sew' , 'bdrx_24.txt']\n\nmetric_folder_path = ['..', '..', 'auxillary_inputs', 'modern_metric_files']\n\nmodern_dem_name = os.path.join(*initial_dem_filepath)\n\n# get model driver template\ndriver_template_filepath = os.path.abspath(os.path.join(*(input_template_folderpath+['sew_prediction_breach_uncert_model_driver_template.txt'])))\nwith open(driver_template_filepath, 'r') as mdfp:\n model_driver_lines = mdfp.readlines()\n\n# Get model space information\nmodel_parameter_input_file = os.path.abspath(os.path.join(*(parameter_dict_folderpath+['model_parameter_match_calibration_sew.csv'])))\nmodel_param_df = pd.read_csv(model_parameter_input_file)\n\n# construct model time and model dictionary:\n# first get all model ID numbers and same them to a dict with the equivalent\n# lenght three padded strings\nmids = {i: str(i).rjust(3, '0') for i in model_param_df['ID'].values}\nmname = {i: model_param_df.loc[model_param_df['ID']==i]['Model Name'].values[0] for i in mids.keys()}\nmused = {i: model_param_df.loc[model_param_df['ID']==i]['Model Used'].values[0] for i in mids.keys()}\n\n# initialize data structures\nmodel_dictionary = {}\nnum_variable_dictionary = {}\n\n#%%\n\ndt_ind = np.where(model_param_df.columns == 'dt')[0][0]\nparameter_names = list(model_param_df.columns[dt_ind:].values)\n\nmid_names = {}\nmid_used = {}\nfor mid in mids.keys():\n\n # make the model key\n key = 'model_'+mids[mid]\n mid_names[key] = mname[mid]\n mid_used[key] = mused[mid]\n if type(mid_used[key]) == str:\n # construct model_dictionary\n param_info = {}\n num_var = 0\n for param in parameter_names:\n if model_param_df.loc[model_param_df['ID']==mid][param].values[0]!='na':\n\n if model_param_df.loc[model_param_df['ID']==mid][param].values[0]=='variable':\n param_info[param] = '{'+param+'}'\n num_var += 1\n else:\n param_info[param] = model_param_df.loc[model_param_df['ID']==mid][param].values[0]\n num_variable_dictionary[key] = num_var\n model_dictionary[key] = param_info\n\n\n# modern DEM filepath, chi mask filepath, and rock-till filepath\nmodern_dem = os.path.abspath(os.path.join(*initial_dem_filepath))\nchi_mask = os.path.abspath(os.path.join(*chi_mask_filepath))\nrock_till = os.path.abspath(os.path.join(*rock_till_filepath))\n\n# read modern grid to get modern outlet elevation\n(temp_grid, temp_z) = read_esri_ascii(modern_dem,\n name='topographic__elevation',\n halo=1)\n\nmodern_outlet_elevation = temp_z[outlet_id]\n\n# identify a few special models that need extra arguments in the input file.\nrt_models = ['model_'+ mid for mid in ['800', '802', '804', '808', '810', '840', '842', 'A00', 'C00']]\nst_models = ['model_'+ mid for mid in ['100', '102', '104', '108', '110', '180', '300', '380']]\nhy_models = ['model_'+ mid for mid in ['010', '012', '014', '018', '030', '110', '210', '410', '810']]\n\n# for model in models\nfor model_name in model_dictionary.keys():\n\n # only write template and driver if model exists.\n if type(mid_used[model_name]) == str:\n # First, make the input template\n lines = ['# Inputs for Model: ' + model_name,\n '# ' + mid_names[model_name],\n '# use ' + mid_used[model_name],\n 'run_duration: 10000.0',\n 'output_interval: 100.0',\n 'save_first_timestep: True',\n #'opt_var_precip: True',\n 'meters_to_feet: True',\n 'DEM_filename: {inital_DEM_file}',\n 'outlet_id: ' + str(outlet_id),\n 'outlet_lowering_file_path: {lowering_history_file}',\n 'output_filename: {output_filename}_']\n\n if model_name in rt_models:\n lines.append('rock_till_file__name: '+ rock_till)\n if model_name in st_models:\n lines.append('opt_stochastic_duration: False')\n if model_name in hy_models:\n lines.append('solver: adaptive')\n model_params = model_dictionary[model_name]\n\n for param in model_params.keys():\n lines.append(param+': '+str(model_params[param]))\n\n # Second, make the model driver\n input_template_filepath = os.path.abspath(os.path.join(*(input_template_folderpath+['sew_prediction_inputs_template_'+model_name+'.txt'])))\n with open(input_template_filepath,'w') as f:\n f.write(\"\\n\".join(lines))\n\n # Modify model driver template\n model_lines = []\n for line in model_driver_lines:\n line = line.replace('{ModelID}', model_name.split('_')[1])\n line = line.replace('{ModelName}', mid_names[model_name])\n line = line.replace('{ModelUsed}', mid_used[model_name])\n line = line.strip('\\n\\r')\n model_lines.append(line+'\\n')\n\n # CREATE FOLDER IF IT DOESN'T EXIST:\n if os.path.exists(os.path.join(*driver_folderpath)):\n pass\n else:\n os.makedirs(os.path.join(*driver_folderpath))\n\n # Write model driver\n model_driver_filepath = os.path.abspath(os.path.join(*(driver_folderpath+['sew_prediction_breach_uncert_'+model_name+'_driver.py'])))\n\n with open(model_driver_filepath, 'w') as mdfp:\n mdfp.writelines(model_lines)\n","sub_path":"prediction/sew/make_input_templates_and_drivers_breaching.py","file_name":"make_input_templates_and_drivers_breaching.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"568262741","text":"import argparse\nfrom .profiling import profile_ctx\n\n\ndef argument_parser():\n parser = argparse.ArgumentParser(\n description='Retrieve stdin and analyze it for vulnerabilities'\n )\n\n parser.add_argument(\n '-r', '--recreate', help='Wipe current db and recreate the schema',\n action='store_true',\n default=False\n )\n\n parser.add_argument(\n '-p', '--profile', help='Dump profile execution to the given PROFILE path',\n type=str,\n default='',\n )\n\n return parser.parse_args()\n\n\ndef main():\n args = argument_parser()\n\n with profile_ctx(args.profile):\n print(args)\n\n if args.recreate:\n from .dal.database import recreate\n recreate()\n\n # INFO: this needs to be after recreating the DB in order to avoid circular import errors\n from .dal.loader import cpe_loader, cve_loader\n cpe_loader()\n cve_loader()\n\n else:\n from .app import app\n\n app.run(\n host=app.config.HTTP_HOST,\n port=app.config.HTTP_PORT,\n workers=app.config.HTTP_WORKERS,\n debug=app.config.HTTP_DEBUG,\n )\n","sub_path":"patton/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"364124445","text":"# find kth biggest element from a unordered list in linear time\nimport random\nimport math\ndef select(A,k):\n if len(A)<=5:\n return sorted(A)[k]\n subs=[A[x:x+5] for x in range(0,len(A),5)] # slice A every 5 elements\n meds=[select(sub,math.floor((len(sub)-1)/2)) for sub in subs] # find meds in every subset\n med=select(meds,math.floor((len(meds)-1)/2)) # find med of meds\n small=[x for x in A if x med]\n if k>> Tensor Image input size: {}'.format(dimensions))\n\n wav_signal = None\n rows = -1\n cols = -1\n if row_index is not None and col_index is not None:\n rows = dimensions[row_index]\n cols = dimensions[col_index]\n wav_signal = wav\n else:\n # Remove 1-sized dimensions\n wav_signal = wav.squeeze()\n rows = wav_signal.shape[0]\n cols = wav_signal.shape[1]\n\n print('>>> Generating image (rows: {} cols: {})'.format(rows, cols))\n image = Image.new('RGB', (rows, cols))\n pixels = image.load()\n\n minimum = wav_signal[0,0]\n maximum = wav_signal[0,0]\n\n for x in range(0, rows):\n for y in range(0, cols):\n if wav_signal[x,y] > maximum:\n maximum = wav_signal[x,y]\n if wav_signal[x,y] < minimum:\n minimum = wav_signal[x,y]\n\n print('Minimum: ' + str(minimum))\n print('Maximum: ' + str(maximum))\n\n if maximum == minimum:\n maximum = 1 # nb: to prevent division by zero below\n\n for x in range(0, rows):\n for y in range(0, cols):\n v = wav_signal[x,y]\n scaled = int((v - minimum) / (maximum - minimum) * 255)\n pixels[x, y] = (scaled, scaled, scaled)\n\n # Image Show only works on local X server:\n #image.show()\n image.save(filename)\n\n","sub_path":"visualize/wav_images.py","file_name":"wav_images.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"364491465","text":"import argparse\nimport fnmatch\nimport os\nimport torch\nimport PIL.Image\nimport models\nimport torchvision.transforms as tr\nfrom Datasetter import HackatonDataset\nimport train\nimport numpy as np\n\ndef pred(path, dest, trainer):\n dirs = os.listdir(path)\n preprocess = tr.Compose([tr.Resize((450, 450)), tr.ToTensor()])\n for file in dirs:\n if fnmatch.fnmatch(file, '*.jpg'):\n img = PIL.Image.open(path + file)\n x = preprocess(img.convert('RGB'))\n x = torch.stack([x], 0).type(torch.FloatTensor)\n x.requires_grad=False\n res = trainer.eval(x)\n np.savetxt(dest + file.replace('jpg', 'csv'), res.numpy(), delimiter='\\n')\n \n \n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Texture Editing by PRN')\n\n parser.add_argument('--dest_path_pred', default='./datasets/dest/', type=str,\n help='path to prediction results folder')\n parser.add_argument('--img_path_pred', default='./datasets/test/', type=str,\n help='path to prediction data folder')\n parser.add_argument('--names_path_train', default='./datasets/train.txt', type=str,\n help='path to train files with names in folder')\n parser.add_argument('--img_path_train', default='./datasets/train/', type=str,\n help='path to train data folder')\n parser.add_argument('--names_path_test', default='./datasets/test.txt', type=str,\n help='path to test files with names in folder')\n parser.add_argument('--img_path_test', default='./datasets/train/', type=str,\n help='path to test data folder')\n parser.add_argument('--checkpoint_path', default='./data.ckpt', type=str,\n help='path to checkpoint data')\n parser.add_argument('--mode', default=1, type=int,\n help='1 - train, 0 - eval')\n parser.add_argument('--gpu', default=True, type=bool,\n help='set is gpu used')\n\n parser.add_argument('--batch_size', default='10', type=int,\n help='batches')\n\n parser.add_argument('--path_to_normalizer', default='./scaler.obj', type=str,\n help='loss which need to save')\n\n parser.add_argument('--global_error', default='10000', type=float,\n help='loss which need to save')\n\n FLAGS = parser.parse_args()\n\n dset_train = HackatonDataset(FLAGS.names_path_train, FLAGS.img_path_train, '.jpg')\n dset_test = HackatonDataset(FLAGS.names_path_test, FLAGS.img_path_test, '.jpg')\n\n train_data_size = dset_train.get_size()\n test_data_size = dset_test.get_size()\n\n use_cuda = FLAGS.gpu\n print(FLAGS)\n trainer = train.Trainer(use_cuda,dset_train.get_train_batch, dset_test.get_ordered_batch, checkpoint_path=FLAGS.checkpoint_path,global_loss=FLAGS.global_error)\n if FLAGS.mode == 1:\n trainer.model.train()\n trainer.train(1000, FLAGS.batch_size, train_data_size, test_data_size)\n elif FLAGS.mode == 0:\n pred(FLAGS.img_path_pred, FLAGS.dest_path_pred, trainer)\n","sub_path":"examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"230935576","text":"from flask import Flask, request, redirect, render_template, session, flash\nfrom mysqlconnection import MySQLConnector\napp = Flask(__name__)\nmysql = MySQLConnector(app,'email')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\ndef val(validation):\n if len(request.form['email']) < 1:\n flash('email not right you fool!')\n\n else:\n flash('{}.format(request.form[email])) logged!')\n return redirect ('/success')\n\n@app.route('/success', methods=['POST'])\ndef submit():\n query = \"SELECT * FROM email\"\n \n email = mysql.query_db(query)\n\n query = \"insert into email (email, created_at, updated_at)values ( :email, NOW(), NOW())\"\n\n data = {\n 'email':request.form['email'],\n }\n \n mysql.query_db(query, data)\n\n\n return render_template('success.html', email=email)\n\n\n\n\napp.run(debug=True)","sub_path":"keith_sanders/email/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"387345696","text":"import os\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.template import loader\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.db import transaction, connection\nfrom django.utils import timezone\nfrom .models import *\nimport json\n\n\n@xframe_options_exempt\ndef load_base(request, project_name):\n project_settings = json.load(open(\"/root/DynamicCrowd/settings/projects/{}.json\".format(project_name)))\n context = {\n \"base_title\": project_settings[\"DynamicCrowd\"][\"Title\"],\n \"base_instruction\": loader.get_template(\"./{}/instruction.html\".format(project_name)).render({}, request),\n \"nanotasks_per_hit\": project_settings[\"DynamicCrowd\"][\"NanotasksPerHIT\"],\n \"min_height\": project_settings[\"AMT\"][\"FrameHeight\"]\n }\n ret = render(request, \"base.html\", context=context)\n return ret\n\n\n@csrf_exempt\ndef load_static_template(request, project_name, template_name):\n template_path = \"./{}/{}.html\".format(project_name,template_name)\n return render(request, template_path)\n\n@csrf_exempt\ndef load_nanotask(request, project_name):\n if \"preview\" in request.GET:\n template_path = \"./{}/preview.html\".format(project_name)\n template = loader.get_template(template_path)\n response = {\n \"info\": {\"id\": None, \"project_name\": project_name, \"template_name\": \"__preview__\" },\n \"html\": template.render({}, request)\n } \n return JsonResponse(response)\n\n else:\n request_json = json.loads(request.body)\n mturk_worker_id = request_json[\"mturk_worker_id\"]\n session_tab_id = request_json[\"session_tab_id\"]\n user_agent = request_json[\"user_agent\"]\n nanotask = Nanotask.objects.using(project_name).filter(answer__mturk_worker_id=mturk_worker_id, answer__session_tab_id=session_tab_id, answer__value=None, project_name=project_name).order_by('id').first();\n if not nanotask:\n sql = \"update {4}.nanotask_answer set mturk_worker_id='{0}', session_tab_id='{2}', user_agent='{3}', time_assigned='{5}' where mturk_worker_id is null and nanotask_id not in ( select nanotask_id from ( select distinct nanotask_id from {4}.nanotask_answer as a inner join {4}.nanotask_nanotask as n on a.nanotask_id=n.id where (a.mturk_worker_id='{0}' and n.project_name='{1}') or n.project_name<>'{1}') as tmp) order by nanotask_id asc, mturk_worker_id desc limit 1;\".format(mturk_worker_id,project_name, session_tab_id, user_agent, project_name, timezone.now())\n with connection.cursor() as cursor:\n cursor.execute(sql)\n connection.close()\n nanotask = Nanotask.objects.using(project_name).filter(answer__mturk_worker_id=mturk_worker_id, answer__session_tab_id=session_tab_id, answer__value=None, project_name=project_name).order_by('id').first();\n \n if nanotask:\n media_data = json.loads(nanotask.media_data)\n template_path = \"./{}/{}.html\".format(project_name, nanotask.template_name)\n template = loader.get_template(template_path)\n response = {\n \"info\": {\"id\": nanotask.id, \"project_name\": nanotask.project_name, \"template_name\": nanotask.template_name },\n \"html\": template.render(media_data, request)\n } \n ret = JsonResponse(response)\n else:\n ret = JsonResponse({\"info\": None, \"html\": None}) \n return ret\n\n\n@csrf_exempt\ndef create_nanotasks(request):\n # TODO:: replace the current script by this restful api\n pass\n\n@csrf_exempt\ndef save_answer(request):\n request_json = json.loads(request.body)\n id = request_json[\"id\"]\n sec = request_json[\"sec\"]\n ans = request_json[\"answer\"]\n mturk_worker_id = request_json[\"mturk_worker_id\"]\n project_name = request_json[\"project_name\"]\n\n with transaction.atomic():\n answer = Answer.objects.using(project_name).filter(nanotask_id=id, mturk_worker_id=mturk_worker_id).first()\n answer.value = json.dumps(ans)\n answer.time_submitted = timezone.now()\n answer.secs_elapsed = sec\n answer.save(using=project_name)\n\n return JsonResponse({})\n\n@csrf_exempt\ndef save_assignment(request):\n request_json = json.loads(request.body)\n ids = request_json[\"ids\"]\n mturk_assignment_id = request_json[\"mturk_assignment_id\"]\n mturk_hit_id = request_json[\"mturk_hit_id\"]\n mturk_worker_id = request_json[\"mturk_worker_id\"]\n project_name = request_json[\"project_name\"]\n amt_assignment = AMTAssignment(mturk_assignment_id = mturk_assignment_id,\n mturk_hit_id = mturk_hit_id,\n mturk_worker_id = mturk_worker_id)\n amt_assignment.save(using=project_name)\n sql = \"UPDATE {0}.nanotask_answer SET amt_assignment_id='{1}' WHERE nanotask_id IN ({2}) AND mturk_worker_id='{3}';\".format(project_name, amt_assignment.id, \",\".join(map(str,ids)), mturk_worker_id)\n with connection.cursor() as cursor:\n cursor.execute(sql)\n connection.close()\n return JsonResponse({})\n","sub_path":"nanotask/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"651151255","text":"\"\"\"Module with training\"\"\"\n\nfrom typing import Dict, List\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom modules.data.datasets import CustomDataset\nfrom modules.data.utils import (generate_period_values,\n generate_fibonacci_values,\n generate_exp_values,\n generate_factorial_values)\n\nfrom modules.network.jordan_network import Jordan\nfrom modules.train import train_model, train_model_min_error\nfrom modules.evaluate import eval_model\nfrom config import Config\n\n\ndef plot_errors(errors_list: List[float], sequence: str):\n fig, ax = plt.subplots(figsize=(12, 8))\n\n ax.set_title(f'Learning rate vs number of epochs to achieve {Config.min_error} MSE error. Sequence: {sequence}')\n\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Error')\n\n sns.lineplot(x=range(len(errors_list)), y=errors_list, ax=ax)\n\n plt.show()\n\n\nif __name__ == '__main__':\n config = Config()\n\n data_mapping: Dict = {\n 'factorial': generate_factorial_values(number_of_precalculated_values=config.num_of_precalculated_values),\n 'fibonacci': generate_fibonacci_values(number_of_precalculated_values=config.num_of_precalculated_values),\n 'period': generate_period_values(number_of_precalculated_values=config.num_of_precalculated_values),\n 'exponent': generate_exp_values(number_of_precalculated_values=config.num_of_precalculated_values),\n 'custom': config.data\n }\n\n dataset = CustomDataset(data=data_mapping[config.data_type],\n number_of_input_elements=config.num_of_input_elements,\n number_of_output_elements=config.num_of_output_elements)\n\n in_features = dataset.number_of_input_elements\n out_features = dataset.number_of_output_elements\n\n network = Jordan(lr=config.learning_rate,\n momentum=config.momentum,\n make_zero_context=config.make_zero_context,\n shape=[in_features, config.num_of_hidden_neurons, out_features])\n\n errors_list = train_model_min_error(network=network,\n dataset=dataset,\n n_epochs=config.num_epochs,\n min_error=config.min_error)\n\n accuracy = eval_model(network=network,\n dataset=dataset)\n\n plot_errors(errors_list=errors_list, sequence=config.data_type)\n\n print(f'Accuracy: {accuracy}')\n","sub_path":"start_training.py","file_name":"start_training.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"635689050","text":"#\n# Copyright (c) 2018 Red Hat\n# Licensed under The MIT License (MIT)\n# https://opensource.org/licenses/MIT\n#\nimport logging\nimport re\nimport six\nimport json\n\nfrom django.db.models.fields import NOT_PROVIDED\nfrom django.core.exceptions import FieldDoesNotExist\nfrom rest_framework.utils import formatting\nfrom rest_framework.reverse import reverse\nfrom rest_framework import serializers, relations, fields\n\nfrom cgi import escape\n\n_SERIALIZER_DATA_INDENTATION = ' '\n_MODEL_FIELD_REFERENCE_RE = re.compile(r'(^[A-Z][a-zA-Z]+)\\.([a-z_]+)$')\n_JSON_CODE_BLOCK = '
%s
'\n_SERIALIZER_DEFS = {\n 'BooleanField': 'boolean',\n 'NullBooleanField': 'boolean',\n 'CharField': 'string',\n 'IntegerField': 'int',\n 'HyperlinkedIdentityField': 'url',\n 'HyperlinkedRelatedField': 'url',\n 'DateTimeField': 'datetime',\n 'DateField': 'date',\n 'StringRelatedField': 'string',\n 'ReadOnlyField': 'data',\n 'EmailField': 'email address',\n 'SlugField': 'string',\n 'URLField': 'url',\n}\n\n\nclass SerializerFieldData(object):\n def __init__(self, values):\n self.values = values\n\n\ndef get_writable_serializer(view, method):\n serializers = get_serializer(view, include_read_only=False)\n\n if not serializers and method.startswith('bulk_'):\n nonbulk_method = method[5:].upper()\n return 'Same data as for %s.' % nonbulk_method\n\n return serializers\n\n\ndef get_serializer(view, include_read_only):\n \"\"\"\n For given view, return a Markdown code block with JSON description of the\n serializer. If `include_read_only` is `False`, only writable fields will be\n included.\n \"\"\"\n if hasattr(view, 'get_serializer'):\n try:\n serializer = view.get_serializer()\n serializer_json = describe_serializer(serializer, include_read_only)\n return _JSON_CODE_BLOCK % _serializer_data_to_string(serializer_json)\n except AssertionError:\n # Even when `get_serializer` is present, it may raise exception.\n pass\n\n return None\n\n\ndef describe_serializer(serializer, include_read_only):\n \"\"\"\n Try to get description of a serializer. It tries to inspect all fields\n separately, if the serializer does not have fields, it falls back to\n `doc_format` class attribute (if present). If all fails, an error is\n logged.\n \"\"\"\n if hasattr(serializer, 'get_fields'):\n data = {}\n for field_name, field in serializer.get_fields().iteritems():\n if not field.read_only or include_read_only:\n data[field_name] = _serializer_field_data(serializer, field_name, field, include_read_only)\n\n return data\n\n if hasattr(serializer.__class__, 'doc_format'):\n return serializer.doc_format\n\n logger = logging.getLogger(__name__)\n logger.error('Failed to get details for serializer %s' % serializer)\n return 'data'\n\n\ndef _serializer_field_attribute_to_string(css_class, field_data):\n return '%s' % (css_class, field_data)\n\n\ndef _serializer_field_link(text, field_name, base_name):\n return '%s' % (reverse(base_name + '-list'), field_name, text)\n\n\ndef _serializer_field_data_to_string(field_data, parent_fields):\n value = field_data.values.pop('value')\n value = _serializer_data_to_string(value, parent_fields)\n value = _serializer_field_attribute_to_string('value', value)\n\n detail_items = []\n for key in ['tags', 'help']:\n if key in field_data.values:\n detail_items.append(_serializer_field_attribute_to_string(key, field_data.values[key]))\n\n if detail_items:\n value += ' ' + _serializer_field_attribute_to_string('details', ' '.join(detail_items))\n\n return value\n\n\ndef _indented_text(text):\n return text.replace('\\n', '\\n' + _SERIALIZER_DATA_INDENTATION)\n\n\ndef _serializer_field_to_string(items, container_format):\n result = ','.join(['\\n' + _SERIALIZER_DATA_INDENTATION + item for item in items])\n return container_format % (result + '\\n')\n\n\ndef _serializer_field_dict_to_string(field, parent_fields):\n items = []\n for field_name, field_data in sorted(field.iteritems()):\n key = _serializer_field_attribute_to_string('name', '\"%s\"' % field_name)\n parent_fields.append(field_name)\n value = _serializer_data_to_string(field_data, parent_fields)\n field_id = 'field-' + '__'.join(parent_fields)\n parent_fields.pop()\n html = '%s: %s' % (field_id, key, _indented_text(value))\n items.append(html)\n\n return _serializer_field_to_string(items, '{%s}')\n\n\ndef _serializer_field_list_to_string(field, parent_fields):\n items = []\n for field_data in field:\n value = _serializer_data_to_string(field_data, parent_fields)\n items.append(_indented_text(value))\n\n if len(items) == 1:\n return '[ %s, … ]' % items[0]\n\n return _serializer_field_to_string(items, '[%s]')\n\n\ndef _model_field_reference_to_string(data):\n match = _MODEL_FIELD_REFERENCE_RE.match(data)\n if match is not None:\n model_name = match.group(1)\n field_name = match.group(2)\n\n for model_class, base_name in _models_and_base_names():\n if model_class.__name__ == model_name:\n return _serializer_field_link(data, field_name, base_name)\n\n return data\n\n\ndef _serializer_data_to_string(data, parent_fields=None):\n if parent_fields is None:\n parent_fields = []\n\n if isinstance(data, SerializerFieldData):\n return _serializer_field_data_to_string(data, parent_fields)\n\n if isinstance(data, dict):\n return _serializer_field_dict_to_string(data, parent_fields)\n\n if isinstance(data, list):\n return _serializer_field_list_to_string(data, parent_fields)\n\n if isinstance(data, six.string_types):\n return _model_field_reference_to_string(data)\n\n return escape(json.dumps(data))\n\n\ndef _get_type_from_docstring(value, default=None):\n \"\"\"\n Convert docstring into object suitable for inclusion as documentation. It\n tries to parse the docstring as JSON, falling back on provided default\n value.\n \"\"\"\n if value:\n try:\n return json.loads(value)\n except ValueError:\n return formatting.dedent(str(value))\n\n if default is not None:\n return default\n\n return None\n\n\ndef _models_and_base_names():\n from pdc.apps.utils.SortedRouter import router\n for _, viewset, base_name in router.registry:\n serializer_class = getattr(viewset, 'serializer_class', None)\n meta = getattr(serializer_class, 'Meta', None)\n serializer_model_class = getattr(meta, 'model', None)\n if serializer_model_class:\n yield serializer_model_class, base_name\n\n\ndef _get_details_for_slug(serializer, field_name, field):\n \"\"\"\n For slug field, we ideally want to get Model.field format. However, in some\n cases getting the model name is not possible, and only field name is\n displayed.\n\n Tries to guess the model from \"source\" or \"queryset\" attributes.\n \"\"\"\n if getattr(field, 'source', None) is not None and field.source.endswith('_set'):\n model_name = field.source[:-4].lower()\n for model_class, base_name in _models_and_base_names():\n if model_class.__name__.lower() == model_name:\n return '%s.%s' % (model_class.__name__, field.slug_field)\n\n if getattr(field, 'queryset', None) is not None:\n model = field.queryset.model\n if model:\n return '%s.%s' % (model.__name__, field.slug_field)\n\n return field.slug_field\n\n\ndef _get_field_type(serializer, field_name, field, include_read_only):\n \"\"\"\n Try to describe a field type.\n \"\"\"\n if not include_read_only and hasattr(field, 'writable_doc_format'):\n return _get_type_from_docstring(field.writable_doc_format)\n\n if hasattr(field, 'doc_format'):\n return _get_type_from_docstring(field.doc_format)\n\n if isinstance(field, (relations.ManyRelatedField, serializers.ListSerializer)):\n # Many field, recurse on child and make it a list\n if isinstance(field, relations.ManyRelatedField):\n field = field.child_relation\n else:\n field = field.child\n return [_get_field_type(serializer, field_name, field, include_read_only)]\n\n if field.__class__.__name__ in _SERIALIZER_DEFS:\n return _SERIALIZER_DEFS[field.__class__.__name__]\n\n if isinstance(field, serializers.SlugRelatedField):\n return _get_details_for_slug(serializer, field_name, field)\n\n if isinstance(field, serializers.SerializerMethodField):\n # For method fields try to use docstring of the method.\n method_name = field.method_name or 'get_{field_name}'.format(field_name=field_name)\n method = getattr(serializer, method_name, None)\n if method:\n docstring = getattr(method, '__doc__')\n return _get_type_from_docstring(docstring, docstring or 'method')\n\n if isinstance(field, serializers.BaseSerializer):\n return describe_serializer(field, include_read_only)\n\n logger = logging.getLogger(__name__)\n logger.error('Undocumented field %s' % field)\n return 'UNKNOWN'\n\n\ndef _get_default_value(serializer, field_name, field):\n \"\"\"\n Try to get default value for a field and format it nicely.\n \"\"\"\n value = field.default\n if hasattr(value, 'doc_format'):\n return _get_type_from_docstring(value.doc_format)\n if value == fields.empty:\n # Try to get default from model field.\n try:\n default = serializer.Meta.model._meta.get_field(field_name).default\n return default if default != NOT_PROVIDED else None\n except (FieldDoesNotExist, AttributeError):\n return None\n return value\n\n\ndef _serializer_field_data(serializer, field_name, field, include_read_only):\n \"\"\"\n Returns key for serializer JSON data description.\n \"\"\"\n key = {}\n key['value'] = _serializer_field_value(serializer, field_name, field, include_read_only)\n\n if not include_read_only:\n tags = _serializer_field_tags(serializer, field_name, field)\n if tags:\n key['tags'] = ', '.join(tags)\n\n if include_read_only and field.allow_null:\n key['tags'] = 'nullable'\n\n description = _serializer_field_help_text(serializer, field_name, field)\n if description:\n key['help'] = description\n\n return SerializerFieldData(key)\n\n\ndef _serializer_field_value(serializer, field_name, field, include_read_only):\n \"\"\"\n Returns value for serializer JSON data description (recursive).\n \"\"\"\n return _get_field_type(serializer, field_name, field, include_read_only)\n\n\ndef _serializer_field_tags(serializer, field_name, field):\n \"\"\"\n Returns list of tags for serializer field.\n\n A tag can be one of: optional, nullable, default=VALUE\n \"\"\"\n tags = []\n\n if not field.required:\n tags.append('optional')\n\n try:\n default = json.dumps(_get_default_value(serializer, field_name, field))\n if not (default is None and field.allow_null):\n tags.append('default=%s' % escape(default))\n except TypeError:\n pass\n\n if field.allow_null:\n tags.append('nullable')\n\n try:\n model_field = serializer.Meta.model._meta.get_field(field_name)\n if model_field.unique:\n tags.append('unique')\n except (AttributeError, FieldDoesNotExist):\n pass\n\n return tags\n\n\ndef _serializer_field_help_text(serializer, field_name, field):\n description = _field_help_text(field)\n if description:\n return description\n\n try:\n model_field = serializer.Meta.model._meta.get_field(field_name)\n return _field_help_text(model_field)\n except (AttributeError, FieldDoesNotExist):\n return ''\n\n\ndef _field_help_text(field):\n return getattr(field, 'help_text', None) or ''\n","sub_path":"pdc/apps/common/renderers_serializers.py","file_name":"renderers_serializers.py","file_ext":"py","file_size_in_byte":12024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"429349474","text":"from lxml import etree\nfrom os import path\n\n\ndef parseXML(xmlFile):\n \"\"\"\n Парсинг XML\n \"\"\"\n with open(xmlFile) as fobj:\n xml = fobj.read()\n\n root = etree.fromstring(xml)\n\n for appt in root.getchildren():\n for elem in appt.getchildren():\n if not elem.text:\n text = \"None\"\n else:\n text = elem.text\n\n print(elem.tag + \" => \" + text)\n\ndef createContent():\n path2xml = path.abspath(path.split(path.abspath(path.dirname(__file__)))[0]) + '\\\\bases\\\\webav\\\\webav.xml'\n print(path2xml)\n test = parseXML(path2xml)\n print(type(test))\n\n\ncreateContent()","sub_path":"webav/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"585786626","text":"from conans import ConanFile, CMake, tools\nimport os\n\n\nclass CgnsConan(ConanFile):\n name = \"cgns\"\n description = \"CGNS provides a standard for recording and recovering CFD data\"\n topics = \"conan\", \"cfd\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://cgns.github.io\"\n license = \"cgns\"\n generators = \"cmake\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False],\n \"fPIC\": [True, False],\n \"build_cgns_tools\": [True, False],\n \"build_testing\": [True, False],\n \"enable_64bit\": [True, False],\n \"enable_base_scope\": [True, False],\n \"enable_fortran\": [True, False],\n \"enable_hdf5\": [True, False],\n \"enable_legacy\": [True, False],\n \"enable_mem_debug\": [True, False],\n \"enable_scoping\": [True, False],\n \"enable_parallel\": [True, False],\n \"enable_tests\": [True, False],\n \"use_shared\": [True, False],\n \"hdf5_build_shared\": [True, False],\n \"hdf5_need_mpi\": [True, False],\n \"hdf5_need_zlib\": [True, False],\n \"hdf5_need_szip\": [True, False]\n }\n default_options = {\"shared\": False,\n \"fPIC\": True,\n \"build_cgns_tools\": False,\n \"build_testing\": False,\n \"enable_64bit\": True,\n \"enable_base_scope\": False,\n \"enable_fortran\": False,\n \"enable_hdf5\": True,\n \"enable_legacy\": True,\n \"enable_mem_debug\": False,\n \"enable_scoping\": False,\n \"enable_parallel\": False,\n \"enable_tests\": True,\n \"use_shared\": False,\n \"hdf5_build_shared\": True,\n \"hdf5_need_mpi\": False,\n \"hdf5_need_zlib\": False,\n \"hdf5_need_szip\": False\n }\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def requirements(self):\n if self.options.enable_hdf5:\n self.requires(\"hdf5/1.10.6\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version.replace('.', '_')\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"CGNS_BUILD_CGNSTOOLS\"] = self.options.build_cgns_tools\n cmake.definitions[\"CGNS_BUILD_SHARED\"] = self.options.shared\n cmake.definitions[\"CGNS_BUILD_TESTING\"] = self.options.build_testing\n cmake.definitions[\"CGNS_ENABLE_64BIT\"] = self.options.enable_64bit\n cmake.definitions[\"CGNS_ENABLE_BASE_SCOPE\"] = self.options.enable_base_scope\n cmake.definitions[\"CGNS_ENABLE_FORTRAN\"] = self.options.enable_fortran\n cmake.definitions[\"CGNS_ENABLE_HDF5\"] = self.options.enable_hdf5\n cmake.definitions[\"CGNS_ENABLE_LEGACY\"] = self.options.enable_legacy\n cmake.definitions[\"CGNS_ENABLE_MEM_DEBUG\"] = self.options.enable_mem_debug\n cmake.definitions[\"CGNS_ENABLE_PARALLEL\"] = self.options.enable_parallel\n cmake.definitions[\"CGNS_ENABLE_SCOPING\"] = self.options.enable_scoping\n cmake.definitions[\"CGNS_ENABLE_TESTS\"] = self.options.enable_tests\n cmake.definitions[\"CGNS_USE_SHARED\"] = self.options.use_shared\n cmake.definitions[\"HDF5_BUILD_SHARED_LIBS\"] = self.options.hdf5_build_shared\n cmake.definitions[\"HDF5_NEED_MPI\"] = self.options.hdf5_build_shared\n cmake.definitions[\"HDF5_NEED_ZLIB\"] = self.options.hdf5_need_zlib\n cmake.definitions[\"HDF5_NEED_SZIP\"] = self.options.hdf5_need_szip\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = [\"cgns\"]\n","sub_path":"recipes/cgns/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"277276665","text":"import numpy as np\nimport cv2\nimport math\n\ndef nothing(*arg):\n pass\n\ndef make_odd(val):\n if val % 2 == 0:\n val += 1\n\n return val\n\ndef findSignificantContours (img, edge_image):\n # image, \n\n contours, heirarchy = cv2.findContours(edge_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n if heirarchy is None:\n return None\n\n # Find level 1 contours\n level1 = []\n for i, tupl in enumerate(heirarchy[0]):\n # Each array is in format (Next, Prev, First child, Parent)\n # Filter the ones without parent\n if tupl[3] == -1:\n tupl = np.insert(tupl, 0, [i])\n level1.append(tupl)\n\n # From among them, find the contours with large surface area.\n significant = []\n # tooSmall = edge_image.size * 0.00005 # If contour isn't covering 5% of total area of image then it probably is too small\n # tooSmall = edge_image.size * 1 / 100 # If contour isn't covering 5% of total area of image then it probably is too small\n tooSmall = 0.005 * edge_image.size\n # tooSmall = 0.0\n for tupl in level1:\n contour = contours[tupl[0]];\n (x,y),radius = cv2.minEnclosingCircle(contour)\n area = math.pi*radius*radius\n # area = cv2.contourArea(contour)\n if area > tooSmall:\n significant.append([contour, area])\n\n # Draw the contour on the original image\n cv2.drawContours(img, [contour], 0, (0,255,0),2, cv2.cv.CV_AA, maxLevel=1)\n cv2.circle(img,(int(x),int(y)),int(radius),(0,255,0),2)\n\n significant.sort(key=lambda x: x[1])\n #print ([x[1] for x in significant]);\n return [x[0] for x in significant];\n\nwindow_nm = 'img_cntrls'\ncv2.namedWindow(window_nm)\ncv2.createTrackbar('blur_size', window_nm, 13 , 21, nothing)\n# cv2.createTrackbar('blur_size', window_nm, 13 , 101, nothing)\ncv2.createTrackbar('canny_high', window_nm, 200 , 250, nothing)\ncv2.createTrackbar('canny_low', window_nm, 50 , 250, nothing)\n\n# file_name = 'WIN_20161025_16_46_54_Pro'\nfile_name = 'WIN_20161025_16_54_51_Pro'\n# file_name = 'WIN_20161025_16_56_51_Pro'\n# file_name = 'WIN_20161025_17_01_08_Pro'\n# file_name = 'WIN_20161025_17_02_21_Pro'\n# file_name = 'WIN_20161025_17_05_50_Pro'\n# file_name = 'WIN_20161025_17_06_56_Pro'\n# img = cv2.imread('0008-0407-2011-1313_littering_pop_can_on_grass_pics_pictures_photos.jpg')\nvideo_capture = cv2.VideoCapture(file_name+'.mp4')\n# video_writer = cv2.VideoWriter('WIN_20161025_16_54_51_Pro_processed.mp4', cv2.cv.CV_FOURCC('H','2','6','4'), 29, (1280,720), 1)\n# video_writer = cv2.VideoWriter('WIN_20161025_16_54_51_Pro_processed.mp4', -1, 29, (1280,720), 1)\n# video_writer = cv2.VideoWriter(file_name+'_processed.avi', cv2.cv.CV_FOURCC('M','J','P','G'), 29, (1280,720), 1)\n\nframe_skip_factor = 1\nframe_count = 0\nwhile True:\n frame_count += 1\n ret, img = video_capture.read()\n if (ret == True):\n if frame_count%frame_skip_factor == 0:\n\n canny_high = cv2.getTrackbarPos('canny_high',window_nm)\n canny_low = cv2.getTrackbarPos('canny_low',window_nm)\n\n blur_size = cv2.getTrackbarPos('blur_size',window_nm)\n blur_size = make_odd(blur_size)\n\n # blurred = cv2.GaussianBlur(img, (blur_size, blur_size), 0) # Remove noise\n # blurred = cv2.GaussianBlur(img, (blur_size, blur_size), 0) # Remove noise\n # blurred = cv2.medianBlur(np.uint8(img), (blur_size, blur_size)) # Remove noise\n blurred = cv2.medianBlur(np.uint8(img), blur_size) # Remove noise\n # blurred = cv2.blur(img, (blur_size, blur_size))\n\n # img_grey = imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edgeImg = cv2.Canny(blurred,canny_low,canny_high)\n\n findSignificantContours(img, edgeImg)\n\n # cv2.imshow('Orig',img)\n # cv2.imshow('Blurred',blurred)\n cv2.imshow('edgeImg',edgeImg)\n # video_writer.write(img)\n\n else:\n # video_capture.set(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO,0)\n # video_writer.release()\n video_capture.release()\n cv2.destroyAllWindows()\n exit()\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything is done, release the capture\n# video_writer.release()\nvideo_capture.release()\ncv2.destroyAllWindows()","sub_path":"backgroundremoval1_video.py","file_name":"backgroundremoval1_video.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"367968465","text":"# coding=utf-8\ndef isnull(x,y):\n if (x is ''):\n return y\n else: \n return x+' or '\n \ndef ConvertVehicleType(Value,VehicleType):\n result=''\n if((Value&0x0001)==0x0001):\n result=isnull(result,'')+'('+VehicleType+'&0x20080000)=0x20080000'\n if((Value&0x1000)==0x1000):\n result=isnull(result,'')+'('+VehicleType+'&0x08080000)=0x08080000'\n if((Value&0x0004)==0x0004):\n result=isnull(result,'')+'('+VehicleType+'&0x10080000)=0x10080000' +' or (' + VehicleType + '&0x02080000)=0x02080000' \n if((Value&0x0002)==0x0002):\n result=isnull(result,'')+'('+VehicleType+'&0x01080000)=0x01080000'\n if((Value&0x0080)==0x0080):\n result=isnull(result,'')+'('+VehicleType+'&0x00100000)=0x00100000'\n if((Value&0x0010)==0x0010):\n result=isnull(result,'')+'('+VehicleType+'&0x00040001)=0x00040001'\n if((Value&0x0020)==0x0020):\n result=isnull(result,'')+'('+VehicleType+'&0x00040020)=0x00040020'\n if(result is ''):\n result='1=0'\n \n return result\n \ndef ConvertFuncVType(FuncVtype,Func):\n array=[]\n array=(FuncVtype.split(\"||\"))\n VtypeStr=array[0]\n Configstr=array[1]\n array1=[]\n array1=(VtypeStr.split(\",\"))\n FirstVtypeStr=array1[0]\n SecondVtypeStr=array1[1]\n# ThirdVtypeStr=array1[2]\n# ForthVtypeStr=array1[3]\n \n array2=[]\n array2=(Configstr.split(\",\"))\n FirstConfigstr=array2[0]\n SecondConfigstr=array2[1]\n# ThirdConfigstr=array2[2]\n# ForthConfigstr=array2[3]\n\n if (FirstVtypeStr == 0):\n where1 = '1=1'\n else:\n where1 = ' (' +ConvertVehicleType(int(FirstVtypeStr), 'N.VehicleType') + ')' \n \n if (int(FirstConfigstr)!=-1 ):\n where1 = where1 + ' AND configstatus = '+FirstConfigstr\n else:\n where1 = where1\n \n if (SecondVtypeStr == 0):\n where2 = '1=1'\n else:\n where2 = ' (' +ConvertVehicleType(int(SecondVtypeStr), 'N.VehicleType') + ')' \n \n if (SecondConfigstr!=-1 ):\n where2 = where2 + ' AND configstatus = '+SecondConfigstr\n else:\n where2=where2\n \n if (Func==1):\n return where1\n if (Func==2):\n return where2\n else:\n return 0\n","sub_path":"SixBanCheck/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"2509764","text":"#!/usr/bin/env python3\nimport numpy as np\n#from matplotlib.pyplot import *\nimport deltasigma as ds\nimport matplotlib.pyplot as plt\nimport sys\n\n\n# === GLOBALS ============================================================================\nfile_name = sys.argv[1] if len(sys.argv) > 1 else \"bit_stream.txt\"\nVmin = 0.0 # Minimum absolute possible input\nVmax = 3.0 # Maximum absolute possible input\nVref = 1.5 # Relative Vref\nVgnda = 1.5 # Absolute analog ground\nVin_min = 0. # Minimum absolute used input\nVin_max = 3. # Maximum absolute used input\nfs = 256.0e3 # Sampling frequency (SD-modulator)\nTs = 1/fs # Sampling period\nT_SaH = Ts*512 # Input signal sampling period (S/H-block)\nNbit = 12 # Bit accuracy (for LSB/2 plot)\nNORM_FACT = Vmax # Signals are always plottet normalized to one times this factor\ncic = {\n \"length\": 16, # Decimation (CIC) filter length\n \"lengths\": [16, 32, 64, 128, 256], # all possible filter lengths\n \"order\": 3 # Decimation filter order\n}\n\n# ========================================================================================\n# === LOAD DATA ==== Bit-stream and input signal =========================================\n#t_sdm, s_sdm_orig = np.loadtxt(open(\"bit_stream.txt\", \"rb\"), skiprows=0, unpack=True)\nt_sdm, s_sdm_orig = np.loadtxt(open(file_name, \"rb\"), skiprows=0, unpack=True)\ns_sdm = np.round(s_sdm_orig/np.max(s_sdm_orig)) # normalize to one\n\n# ========================================================================================\n# === FILTER DATA ==== Use decimation filter (CIC) =======================================\ns_sdm_cic = ds.sinc_decimate(s_sdm, cic[\"order\"], cic[\"length\"])\nt_sdm_cic = t_sdm[cic[\"length\"]:len(t_sdm):cic[\"length\"]]\ns_sdm_cic = s_sdm_cic[2:]\nt_sdm_cic = t_sdm_cic[2:]\n\nfig_out = plt.figure(figsize=(20, 8))\nax_out = fig_out.add_subplot(111)\nax_out.step(t_sdm_cic, s_sdm_cic*NORM_FACT, where='post', label=r\"$V_{out,cic}$\", zorder=4)\n#ax_out.scatter(t_sdm_cic, s_sdm_cic*NORM_FACT, label=r\"$V_{out,cic}$\")\n#xlim(0.2,1.6)\n\n# set title and axis\nax_out.set_title(\"Filtered Bit-Stream, Filter Length {}\".format(cic[\"length\"]))\nax_out.set_xlabel(\"Time (s)\")\nax_out.set_ylabel(\"Voltage (V)\")\nax_out.legend(loc='upper left')\n#ax_out.set_ylim([0.4,0.6])\nfig_out.tight_layout()\nplt.show()\n#fig_out.savefig(file_name.replace('txt','svg'))\n","sub_path":"measurements/measurements-dc/sigdel/bitstream.py","file_name":"bitstream.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"651495058","text":"#!/usr/bin/env python\n\nfrom subprocess import getoutput\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\n\nfrom ble2mqtt.__version__ import VERSION\n\n\nclass PostInstall(install):\n pkgs = ' http://github.com/hbldh/bleak/tarball/dbus-next-2#egg=bleak-0.11.0a1'\n\n def run(self):\n install.run(self)\n print(getoutput('python3 -m pip install' + self.pkgs))\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='ble2mqtt',\n version=VERSION,\n description='BLE to MQTT bridge',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author='Ivan Belokobylskiy',\n author_email='belokobylskij@gmail.com',\n url='https://github.com/devbis/ble2mqtt/',\n entry_points={\n 'console_scripts': ['ble2mqtt=ble2mqtt.__main__:main']\n },\n packages=find_packages(include=['ble2mqtt', 'ble2mqtt.*']),\n cmdclass={'install': PostInstall},\n install_requires=[\n 'aio-mqtt>=0.2.0',\n # 'bleak @ http://github.com/hbldh/bleak/tarball/dbus-next-2#egg=bleak-0.11.0a1',\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Utilities',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"403005398","text":"import logging\nimport itertools\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\n\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nfrom collections import Counter\nfrom utils import preprocess_fn, setup_logger, try_except\nfrom networkx.algorithms.community.modularity_max import greedy_modularity_communities\n\n\nlogger = logging.getLogger(__name__)\nlogger = setup_logger(logger)\n\n\nclass AuthorNetwork(object):\n \"\"\"\n A class to represent network of authors\n \"\"\"\n\n def __init__(self):\n \"\"\"\n initialize and build the network\n \"\"\"\n self.network = None\n self.communities = None\n self.authors = 0\n\n self.read_dataset()\n self.get_author_clusters()\n self.get_authors()\n self.get_author_article_counts()\n self.build_network()\n\n def read_dataset(self):\n \"\"\"\n load dataset to be used in the network\n \"\"\"\n self.data = pd.read_pickle(\"./data/processed_dataset.pkl\")\n\n def get_author_clusters(self):\n \"\"\"\n get author clusters. The list of authors of an article constitutes a cluster. \n \"\"\"\n author_clusters = []\n for _, row in self.data.iterrows():\n these_authors = row.authors\n author_clusters.append(these_authors)\n citations = row.cited_by\n if len(citations) > 0:\n for _, v in citations.items():\n cited_authors = [preprocess_fn(auth) for auth in v[\"authors\"]]\n author_clusters.append(cited_authors)\n self.author_clusters = author_clusters\n\n def get_author_article_counts(self):\n \"\"\"\n create a dict which contains number of articles published by each author in the dataset\n \"\"\"\n flattened_authors = list(itertools.chain(*self.author_clusters))\n author_counts = Counter(flattened_authors) # .most_common()\n self.author_to_num_articles = author_counts\n\n def get_authors(self):\n \"\"\"\n get the list of authors in the dataset\n \"\"\"\n flattened_authors = list(itertools.chain(*self.author_clusters))\n self.authors = list(set(flattened_authors))\n\n def build_network(self):\n \"\"\"\n build the author network using networkx\n \"\"\"\n G = nx.Graph()\n G.add_nodes_from(self.authors)\n\n # add edges\n list_edges = []\n for this_cluster in self.author_clusters:\n if len(this_cluster) < 10:\n combs = list(itertools.combinations(this_cluster, 2))\n for this_comb in combs:\n list_edges.append((this_comb[0], this_comb[1]))\n G.add_edges_from(list_edges)\n self.network = G\n self.num_nodes, self.num_edges = G.number_of_nodes(), G.number_of_edges()\n logger.info(\n \"Author Network is built with {} nodes and {} edges\".format(\n self.num_nodes, self.num_edges\n )\n )\n\n def find_neighbor(self, author, order=1):\n \"\"\"\n find neighbor of a author. If order is 1, this would be all co-authors of the author.\n If order=2, the neighbor will include all co-authors of co-authors of the author.\n\n :param author: author under consideration\n :type author: str\n :param order: number of hops to consider, defaults to 1\n :type order: int, optional\n :return: a list of neighboring author names\n :rtype: List[str]\n \"\"\"\n if author not in self.authors:\n logger.info(\"Error: Author not found!\")\n return None\n group = set([author])\n for proximity in range(order):\n group = set((nbr for member in group for nbr in self.network[member]))\n return list(group)\n\n @try_except(logger)\n def find_shortest_paths(self, source, target):\n \"\"\"\n all all possible paths from source to target author with minimum number of hops.\n\n :param source: source author\n :type source: str\n :param target: target author\n :type target: str\n :return: a list containing linking authors from source to target\n :rtype: List[str]\n \"\"\"\n paths = nx.all_shortest_paths(self.network, source=source, target=target)\n return list(paths)\n\n def plot_subnetwork(self, subset_nodes):\n \"\"\"\n visualize a network with a subset of authors\n\n :param subset_nodes: a list of authors under consideration\n :type subset_nodes: List[str]\n \"\"\"\n SG = self.network.subgraph(subset_nodes)\n pos = nx.circular_layout(SG)\n nx.draw(\n SG,\n with_labels=True,\n node_color=\"skyblue\",\n pos=pos,\n node_size=500,\n font_size=18,\n )\n plt.savefig(\"./data/sub_graph.png\", dpi=200)\n plt.clf()\n\n def get_network_stats(self):\n \"\"\"\n get stats of the author network\n \"\"\"\n # density\n self.density = round(nx.density(self.network), 5)\n # degree\n degree = self.network.degree()\n degree_list = []\n for (n, d) in degree:\n degree_list.append(d)\n self.ave_degree = sum(degree_list) / len(degree_list)\n\n # clustering coefficient\n local_coeffs = nx.algorithms.cluster.clustering(self.network)\n self.ave_clustering_coeff = round(\n sum(local_coeffs.values()) / len(local_coeffs), 5\n )\n\n # output\n logger.info(\"Author Network Density = {}\".format(self.density))\n logger.info(\"Author Network Average Degree = {}\".format(self.ave_degree))\n logger.info(\n \"Author Network Average Clustering Coefficient = {}\".format(\n self.ave_clustering_coeff\n )\n )\n\n def create_communities(self):\n \"\"\"\n create communities within the network. All members of a given community are closely related.\n \"\"\"\n author_com = greedy_modularity_communities(self.network)\n self.communities = list(author_com)\n self.num_communities = len(self.communities)\n logger.info(\"{} communities created.\".format(self.num_communities))\n\n def get_community_members(self, community_idx):\n \"\"\"\n Find members of a given community\n\n :param community_idx: community index\n :type community_idx: int\n :return: members of the community\n :rtype: List[str]\n \"\"\"\n if not self.communities:\n self.create_communities()\n if community_idx >= self.num_communities:\n logger.error(\"Community index is out of bound...\")\n return None\n members = list(self.communities[community_idx])\n return members\n\n def get_community_topics(self, community_idx):\n \"\"\"\n get community research interests\n\n :param community_idx: community index\n :type community_idx: int\n :return: a list of key words of research topics attributed to the community\n :rtype: List[str]\n \"\"\"\n members = self.get_community_members(community_idx)\n flags = (\n self.data[\"authors\"]\n .apply(lambda x: True if set(x).intersection(set(members)) else False)\n .values\n )\n tmp_df = self.data[flags].copy()\n kws = Counter(list(itertools.chain(*tmp_df.author_kws.values))).most_common(10)\n topics = []\n for word, _ in kws:\n topics.append(word)\n return topics\n\n def __repr__(self):\n return \"A Network of {} Authors\".format(len(self.authors))\n\n\nif __name__ == \"__main__\":\n an = AuthorNetwork()\n print(an.find_neighbor(\"biswas r\"))\n paths = an.find_shortest_paths(\"biswas r\", \"leclerc j\")\n for path in paths:\n print(path)\n\n an.get_network_stats()\n members = an.get_community_members(community_idx=35)\n print(members)\n an.plot_subnetwork(members)\n\n # topics = an.get_community_topics(community_idx=25)\n # print(topics)\n\n topics = an.get_community_topics(community_idx=35)\n print(topics)\n","sub_path":"word_embeddings/author_network.py","file_name":"author_network.py","file_ext":"py","file_size_in_byte":8088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"585279293","text":"#!/usr/bin/python\n\nimport sys\n\nclass LogWriter(object):\n eol = '\\n'\n method = sys.stdout.write\n \n def write(_, category, message):\n _.method(\"<%s> %s%s\" % (category, message, _.eol))\n \n if category == 'raise':\n #raise message\n sys.exit(1)\n\nlog = LogWriter()\n\n","sub_path":"lib/LogWriter.py","file_name":"LogWriter.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"644607311","text":"import sys\n\n# bookshelf\nfrom reporter import Reporter\nfrom poster import Poster\nimport time\nfrom dscauth import collect_dsc_data\n\nfrom app_reorganizedata import go as formatCraigslistData\n\ndef fixString(str, val):\n return str[:val].ljust(val)\n\ndef printTitle(titleString, maxLength = 79):\n if len(titleString) > maxLength:\n return titleString[:maxLength]\n remaining = maxLength - len(titleString)\n titleString += \" \"\n while remaining > 0:\n titleString += \"=\"\n remaining = remaining - 1\n print(titleString)\n\n# ===== RUN, FOREST, RUN! =====\ndef main(argv):\n datetime = time.localtime()\n months = { 1 : \"January\", 2 : \"February\",\n 3 : \"March\", 4 : \"April\",\n 5 : \"May\", 6 : \"June\",\n 7 : \"July\", 8 : \"August\",\n 9 : \"September\", 10 : \"October\",\n 11 : \"November\", 12 : \"December\"}\n printTitle(\"Online Inventory Data Log | \" + str(months[datetime.tm_mon]) + \" \"\\\n + str(datetime.tm_mday) + \", \" + str(datetime.tm_year))\n r = Reporter()\n printTitle(\"Craigslist Data\")\n clreport = \"\"\n clinventoryreport = r.report_data(listPages=[1,2], reportString = clreport)\n printTitle(\"Organized Craigslist Data\")\n formatCraigslistData(clreport.strip())\n\n printTitle(\"DSC Data\")\n dscinventoryreport = collect_dsc_data()\n #sort by Make and Model\n dscMakeSorted = {}\n for vin in dscinventoryreport.keys():\n currMake = dscinventoryreport[vin][\"make\"]\n currModel = dscinventoryreport[vin][\"model\"]\n # check if make is in dscMakesorted. If not, create dict entry for make\n if currMake not in dscMakeSorted.keys():\n dscMakeSorted[dscinventoryreport[vin][\"make\"]] = {}\n # check if model exists in dscMakeSorted[make] dictionary\n if currModel not in dscMakeSorted[dscinventoryreport[vin][\"make\"]].keys():\n dscMakeSorted[dscinventoryreport[vin][\"make\"]][dscinventoryreport[vin][\"model\"]] = []\n\n #insert if previous conditions are met. dscMakesorted[make][model] exists\n if vin not in dscMakeSorted[currMake][currModel]:\n dscMakeSorted[currMake][currModel].append(vin)\n\n print(fixString(\"Stock No.\", 11) + \"\\t\" + \\\n fixString(\"VIN\", 17) + \"\\t\" + \\\n fixString(\"Year\", 4) + \"\\t\" + \\\n fixString(\"Make\", 12) + \"\\t\" + \\\n fixString(\"Model\", 30) + \"\\t\" + \\\n fixString(\"Trim\", 20) + \"\\t\")\n\n for makeKey in sorted(dscMakeSorted.keys()):\n for modelKey in sorted(dscMakeSorted[makeKey].keys()):\n for vin in dscMakeSorted[makeKey][modelKey]:\n currStock = fixString(dscinventoryreport[vin][\"stock_number\"],11) + \"\\t\" +\\\n fixString(dscinventoryreport[vin][\"vin\"],17) + \"\\t\" + \\\n fixString(dscinventoryreport[vin][\"year\"],4) + \"\\t\" + \\\n fixString(dscinventoryreport[vin][\"make\"],12) + \"\\t\" + \\\n fixString(dscinventoryreport[vin][\"model\"],20) + \"\\t\" + \\\n fixString(dscinventoryreport[vin][\"trim\"],25)\n print(currStock)\n\n\n if False:\n print(fixString(\"Stock No.\", 11) + \"\\t\" + \\\n fixString(\"VIN\", 17) + \"\\t\" + \\\n fixString(\"Year\", 4) + \"\\t\" + \\\n fixString(\"Make\", 12) + \"\\t\" + \\\n fixString(\"Model\", 30) + \"\\t\" + \\\n fixString(\"Trim\", 20) + \"\\t\")\n for vin in dscinventoryreport.keys():\n currStock = fixString(dscinventoryreport[vin][\"stock_number\"],11) + \"\\t\" +\\\n fixString(dscinventoryreport[vin][\"vin\"],17) + \"\\t\" + \\\n fixString(dscinventoryreport[vin][\"year\"],4) + \"\\t\" + \\\n fixString(dscinventoryreport[vin][\"make\"],12) + \"\\t\" + \\\n fixString(dscinventoryreport[vin][\"model\"],20) + \"\\t\" + \\\n fixString(dscinventoryreport[vin][\"trim\"],25)\n print(currStock)\n\n printTitle(\"Inventory Check\")\n clinventorykeys = clinventoryreport.keys()\n dscinventorykeys = dscinventoryreport.keys()\n\n printTitle(\" [+] Checking for missing craigslist posts\")\n for vin in dscinventorykeys:\n #print(\"checking vin: \" + vin)\n if not vin in clinventorykeys:\n print(\"Missing \" + dscinventoryreport[vin][\"stock_number\"] + \" | \" \\\n + vin + \" | \" \\\n + dscinventoryreport[vin][\"year\"] + \" \" \\\n + dscinventoryreport[vin][\"make\"] + \" \" \\\n + dscinventoryreport[vin][\"model\"] + \" \" \\\n + dscinventoryreport[vin][\"trim\"] + \" \" \\\n + dscinventoryreport[vin][\"color\"] + \" |\")\n\n printTitle(\" [+] Checking for craigslist posts that need to be removed\")\n for vin in clinventorykeys:\n if not vin in dscinventorykeys:\n print(\"Craigslist post \" + clinventoryreport[vin][\"id\"] + \" | \" \\\n + vin + \" | \" \\\n + str(clinventoryreport[vin][\"title\"]) + \" | has been removed from DSC\")\n\n\n\n return 0\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"src/app_inventoryreport.py","file_name":"app_inventoryreport.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"457049429","text":"from Aula52.model.endereco_model import EnderecoModel\nfrom Aula52.dao.base_dao import BaseDao\n\nclass EnderecoDao(BaseDao):\n def __init__(self):\n super().__init__(\"01_MDG_ENDERECO\")\n\n def listar_todos(self):\n tuplas = super().listar_todos()\n lista = []\n for e in tuplas:\n model = EnderecoModel(e[1], e[2], e[3], e[4], e[5], e[6], e[0])\n lista.append(model.__dict__)\n return lista\n\n def buscar_por_id(self, id):\n tupla = super().buscar_por_id(id)\n model = EnderecoModel(tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], tupla[6], tupla[0])\n return model.__dict__\n\n def inserir(self, model: EnderecoModel):\n comando_sql = \"\"\"INSERT INTO {} \n (\n LOGRADOURO,\n NUMERO,\n COMPLEMENTO,\n BAIRRO,\n CIDADE,\n CEP\n )VALUES\n (\n '{}',\n '{}',\n '{}',\n '{}',\n '{}',\n '{}'\n )\n \"\"\".format(self.tabela, model.logradouro, model.numero, model.complemento, model.bairro, model.cidade, model.cep )\n model.id = super().inserir(comando_sql)\n return model.__dict__\n\n def alterar(self, model: EnderecoModel):\n comando_sql = \"\"\" UPDATE {}\n SET \n LOGRADOURO = '{}',\n NUMERO = '{}',\n COMPLEMENTO = '{}',\n BAIRRO = '{}',\n CIDADE = '{}',\n CEP = '{}'\n WHERE ID = {}\n \"\"\".format(self.tabela, model.logradouro, model.numero, model.complemento, model.bairro, model.cidade, model.cep, model.id )\n super().alterar(comando_sql)\n return model.__dict__\n\n def deletar(self, id):\n return super().deletar(id)\n\n","sub_path":"Aula52/dao/endereco_dao.py","file_name":"endereco_dao.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"619224756","text":"import pytest\n\nfrom forms.models import Field, FieldType, Section\nfrom forms.utils import clone_object\n\n\n@pytest.mark.django_db\ndef test_form_cloning(basic_template_form):\n section_count = Section.objects.all().count()\n field_count = Field.objects.all().count()\n fieldtype_count = FieldType.objects.all().count()\n\n new_form = clone_object(basic_template_form)\n\n new_section_count = Section.objects.all().count()\n new_field_count = Field.objects.all().count()\n new_fieldtype_count = FieldType.objects.all().count()\n\n assert new_form.id != basic_template_form\n assert new_section_count == section_count * 2\n assert new_field_count == field_count * 2\n assert new_fieldtype_count == fieldtype_count\n","sub_path":"forms/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"383422987","text":"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport time\r\nimport logging\r\n\r\nimport numpy as np\r\nfrom six.moves import xrange # pylint: disable=redefined-builtin\r\nimport tensorflow as tf\r\nfrom tensorflow.python.ops import variable_scope as vs\r\n\r\nfrom util import Progbar, minibatches\r\n\r\nfrom evaluate import exact_match_score, f1_score\r\n\r\nfrom qa_model import pad_sequences, my_f1_score, my_em_score\r\n\r\nfrom qa_data import PAD_ID\r\n\r\nimport datetime\r\n\r\nlogger = logging.getLogger(\"hw4\")\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\nmax_q_words = 25 # longest sequence to parse\r\nmax_c_words = 250\r\nembed_size = 100 #specified in qa_data.py. 50 is default. get_started.sh uses 100\r\nhidden_size = 75 #about halfway between the embedding size and the eventual output size (2)\r\nbatch_size = 64 #32 was default but apparantly azure has high memery so going higher is good\r\nnum_epochs = 1 #they say we should be running for 5-7 epochs. Keep it at 5 till you know it's your best model. IT should improve most after 1 anyway\r\n\r\n\r\ndef run(load_train_dir, save_train_dir):\r\n\r\n\tstart_time = datetime.datetime.now().replace(microsecond=0)\r\n\r\n\tprint(load_train_dir)\r\n\tprint(save_train_dir)\r\n\r\n\tprint(\"creating session\")\r\n\tsess = tf.Session()\r\n\r\n\tprint(\"running\")\r\n\tprint(\"reading in data\")\r\n\ttext_file = open(\"./data/squad/train.span\", \"r\")\r\n\tlabels = text_file.read().split(\"\\n\")\r\n\ttext_file.close()\r\n\r\n\tnum_entries = int(len(labels)/1)-1 #take out the /1000 when you run for real. ?? ? ? ?marks are just to remind me to do that\r\n\t#labels = tf.convert_to_tensor(labels)\r\n\t\r\n\r\n\tprint(\"num entries = \", num_entries)\r\n\tprint(\"num epochs = \", num_epochs)\r\n\tprint(\"batch size = \", batch_size)\r\n\r\n\t\r\n\r\n\tprint(\"creating model\")\r\n\tinput_q_placeholder = tf.placeholder(tf.float32, shape = (None, max_q_words, embed_size), name='input_q_placeholder') \r\n\tinput_c_placeholder = tf.placeholder(tf.float32, shape = (None, max_c_words, embed_size), name='input_c_placeholder')\r\n\tstart_answer_placeholder = tf.placeholder(tf.int32, shape = (None, max_c_words), name='start_answer_placeholder')\r\n\tend_answer_placeholder = tf.placeholder(tf.int32, shape = (None, max_c_words), name='end_answer_placeholder')\r\n\tmask_q_placeholder = tf.placeholder(tf.bool, shape = (None, max_q_words), name='mask_q_placeholder')\r\n\tmask_c_placeholder = tf.placeholder(tf.bool, shape = (None, max_c_words), name='mask_q_placeholder')\r\n\r\n\tprint(\"reading labels\")\r\n\r\n\t#start_answer = []\r\n\t#end_answer = []\r\n\tstart_true = []\r\n\tend_true = []\r\n\t#start_index = []\r\n\t#end_index =[]\r\n\r\n\tfor i in range(num_entries):\t#batch_size*batch_num, batch_size*(batch_num+1)\r\n\t\t#if (i%1 == 0):\r\n\t\t#\tprint(\"Label # \", i ,\" of \", num_entries)\r\n\t\tnums = labels[i].split()\r\n\t\tstart_true.append(int(nums[0]))\r\n\t\tend_true.append(int(nums[1]))\r\n\t\t#start_true.append(start_index) \r\n\t\t#end_true.append(end_index) \r\n\tstart_answer = (tf.one_hot(indices = start_true, depth = max_c_words, dtype = tf.int32).eval(session = sess))\r\n\tend_answer = (tf.one_hot(indices = end_true, depth = max_c_words, dtype = tf.int32).eval(session = sess))\r\n\t\r\n\r\n\ttext_file = open(\"./data/squad/train.ids.question\", \"r\")\r\n\tinputs_q = text_file.read().split(\"\\n\")\r\n\ttext_file.close()\r\n\r\n\tprint(\"reading questions\")\r\n\tmyMatrix_q = []\r\n\tfor i in range(num_entries):\r\n\t\t#if (i%1000 == 0):\r\n\t\t#\tprint(\"Question # \", i ,\" of \", num_entries)\r\n\t\tnums = inputs_q[i].split()\r\n\t\tmyArray = []\r\n\t\tfor j in range(len(nums)):\r\n\t\t\tmyArray.append(int(nums[j]))\r\n\t\tmyMatrix_q.append(myArray)\r\n\r\n\ttext_file = open(\"./data/squad/train.ids.context\", \"r\")\r\n\tinputs_c = text_file.read().split(\"\\n\")\r\n\ttext_file.close()\r\n\t\r\n\tprint(\"reading contexts\")\r\n\tmyMatrix_c = []\r\n\tfor i in range(num_entries):\r\n\t\t#if (i%1000 == 0):\r\n\t\t#\tprint(\"Context index # \", i ,\" of \", num_entries)\r\n\t\tnums = inputs_c[i].split()\r\n\t\tmyArray = []\r\n\t\tfor j in range(len(nums)):\r\n\t\t\tmyArray.append(int(nums[j]))\r\n\t\tmyMatrix_c.append(myArray)\r\n\r\n\t\t\r\n\ttext_file = open(\"./data/squad/train.context\", \"r\")\r\n\tinputs_c_text = text_file.read().split(\"\\n\")\r\n\ttext_file.close()\r\n\r\n\t#print(inputs_c_text[1])\r\n\r\n\tc_text = []\r\n\tfor i in range(num_entries):\r\n\t\twords = inputs_c_text[i].split(\" \")\r\n\t\tmyArray = []\r\n\t\tfor j in range(len(words)):\r\n\t\t\tmyArray.append(words[j])\r\n\t\tc_text.append(myArray)\r\n\r\n\t\t\r\n\r\n\tpadded_q_inputs, masks_q = zip(*pad_sequences(data = myMatrix_q, max_length = max_q_words))\r\n\tpadded_c_inputs, masks_c = zip(*pad_sequences(data = myMatrix_c, max_length = max_c_words))\r\n\r\n\r\n\r\n\tprint(\"loading embeddings\")\r\n\tembed_path = \"./data/squad/glove.trimmed.100.npz\"\r\n\tpretrained_embeddings = np.load(embed_path)\r\n\tlogger.info(\"Keys\")\r\n\tlogger.info(pretrained_embeddings.keys())\r\n\tlogger.info(\"Initialized embeddings.\")\r\n\tpretrained_embeddings = tf.constant(pretrained_embeddings.f.glove) \r\n\r\n\tembedded_q = (tf.nn.embedding_lookup(pretrained_embeddings, padded_q_inputs).eval(session = sess))\r\n\tembedded_c = (tf.nn.embedding_lookup(pretrained_embeddings, padded_c_inputs).eval(session = sess))\r\n\tprint(\"Done Embedding\")\r\n\r\n\t\r\n\r\n\tprint(\"encoding\")\r\n\r\n\tmask_int_q = tf.cast(mask_q_placeholder, tf.int32)\r\n\tsrclen_q = tf.reduce_sum(mask_int_q, 1)\r\n\tmask_int_c = tf.cast(mask_c_placeholder, tf.int32)\r\n\tsrclen_c = tf.reduce_sum(mask_int_c, 1)\r\n\t\r\n\tscope_q = \"scope_q\" \r\n\tscope_c = \"scope_c\" \r\n\tscope_decode = \"scope_decode\"\r\n\r\n\r\n\t\r\n\tLSTM_cell_q = tf.nn.rnn_cell.BasicLSTMCell(num_units = hidden_size)\t\r\n\tLSTM_cell_c = tf.nn.rnn_cell.BasicLSTMCell(num_units = hidden_size)\r\n\tLSTM_cell_decode = tf.nn.rnn_cell.BasicLSTMCell(num_units = hidden_size)\r\n\r\n\tprint(\"filtering\")\r\n\t\r\n\tnormed_q = tf.nn.l2_normalize(input_q_placeholder, dim=2)\r\n\tnormed_c = tf.nn.l2_normalize(input_c_placeholder, dim=2)\r\n\r\n\tmatrix = tf.matmul(normed_q, normed_c, transpose_b = True)\r\n\tattention = tf.reduce_max(matrix, axis = 1)\r\n\tattention = tf.expand_dims(attention, axis = 2)\r\n\tfiltered_c = input_c_placeholder*attention\r\n\r\n\toutputs_q, state_q = tf.nn.bidirectional_dynamic_rnn(LSTM_cell_q, LSTM_cell_q, input_q_placeholder, srclen_q, scope = scope_q, time_major = False, dtype = tf.float32)\r\n\toutputs_c, state_c = tf.nn.bidirectional_dynamic_rnn(LSTM_cell_c, LSTM_cell_c, filtered_c, srclen_c, scope = scope_c, time_major = False, dtype = tf.float32)\r\n\r\n\thidden_q = (state_q[0][1], state_q[1][1])\r\n\thidden_q = tf.concat(1, hidden_q)\r\n\r\n\tq_piazza_int = hidden_q\r\n\tq_piazza = tf.expand_dims(q_piazza_int, axis = 2)\r\n\r\n\tX_piazza = tf.concat(2, outputs_c)\r\n\tX_piazza = tf.transpose(X_piazza, [0,2,1])\r\n\r\n\tintermediate = q_piazza*X_piazza\r\n\r\n\tp_s_true = tf.reduce_sum(intermediate, axis = 1) #starting place probabilities\r\n\tp_s_true = tf.nn.softmax(p_s_true)\r\n\r\n\r\n\tp_s_false = 1-p_s_true\r\n\tp_s = tf.pack([p_s_false, p_s_true], axis = 2)\r\n\r\n\tp_e_true = p_s_true\r\n\tp_e_true = tf.nn.softmax(p_e_true)\r\n\tp_e_false = 1-p_e_true\r\n\tp_e = tf.pack([p_e_false, p_e_true], axis = 2)\r\n\r\n\ta_s = tf.argmax(p_s_true, axis=1)\r\n\ta_e = tf.argmax(p_e_true, axis=1)\r\n\ta_e = tf.maximum(a_s, a_e)\r\n\r\n\tl1 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = p_s, labels = start_answer_placeholder)\r\n\tl2 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = p_e, labels = end_answer_placeholder)\r\n\tloss = l1+l2\r\n\t\r\n\tloss_by_question = tf.reduce_sum(loss, axis = 1)\r\n\r\n\tavgLoss = tf.reduce_mean(loss_by_question)\r\n\r\n\ttrain_step = tf.train.AdamOptimizer().minimize(avgLoss)\r\n\ttf.global_variables_initializer().run(session = sess)\r\n\r\n\tprint(\"training\")\r\n\tnum_batches = int(np.floor(num_entries/batch_size))\r\n\tprint(\"num batches: \", num_batches)\r\n\r\n\tprint(\"Creating saver\")\r\n\tsaver = tf.train.Saver()\r\n\r\n\t\t\r\n\tfor j in range (num_epochs): #epochs\r\n\t\tbatch_error_total = 0\r\n\t\tprint(\"epoch %d of %d\" % (j+1, num_epochs))\r\n\t\tprint(\"not shuffling yet\")\r\n\r\n\t\tfor i in range(num_batches): \r\n\t\t\tif (i%100 == 0):\r\n\t\t\t\tprint(\"Batch # \", i ,\" of \", num_batches)\r\n\t\t\tbatch_q = embedded_q[i*batch_size:(i+1)*batch_size] \r\n\t\t\tbatch_c = embedded_c[i*batch_size:(i+1)*batch_size]\r\n\t\t\tbatch_mask_q = masks_q[i*batch_size:(i+1)*batch_size]\r\n\t\t\tbatch_mask_c = masks_c[i*batch_size:(i+1)*batch_size]\r\n\t\t\tstart_batch = []\r\n\t\t\tend_batch = []\r\n\t\t\tfor k in range(batch_size):\r\n\t\t\t\tstart_batch.append(start_answer[i*batch_size+k])\r\n\t\t\t\tend_batch.append(end_answer[i*batch_size+k])\t\t\t\r\n\t\t\t_, batch_error, p_s_true_ = sess.run([train_step, avgLoss, p_s_true], feed_dict={input_q_placeholder: batch_q, input_c_placeholder: batch_c, start_answer_placeholder: start_batch, end_answer_placeholder: end_batch, mask_q_placeholder: batch_mask_q, mask_c_placeholder: batch_mask_c})\t\t\t\r\n\t\t\tbatch_error_total += float(batch_error)/num_batches\r\n\t\tavg_train_error = batch_error_total\r\n\t\tprint(\"epoch %d has average training error of %f\" % (j+1, avg_train_error))\r\n\t\tprint(\"saving\")\r\n\t\ttf.add_to_collection('vars2', a_e)\r\n\t\ttf.add_to_collection('vars2', a_s)\r\n\t\ttf.add_to_collection('vars2', input_q_placeholder)\r\n\t\ttf.add_to_collection('vars2', input_c_placeholder)\r\n\t\ttf.add_to_collection('vars2', mask_q_placeholder)\r\n\t\ttf.add_to_collection('vars2', mask_c_placeholder)\r\n\r\n\t\tsaver.save(sess, 'my-model')\r\n\r\n\t\tprint(\"Evaluating EM and f1 scores\")\r\n\r\n\t\ttest_samples = np.minimum(num_entries, 100)\r\n\r\n\t\ttest_q = embedded_q[0:test_samples]\r\n\t\ttest_c = embedded_c[0:test_samples]\r\n\t\tbatch_mask_q = masks_q[0:test_samples]\r\n\t\tbatch_mask_c = masks_c[0:test_samples]\r\n\r\n\t\tstart_pred, end_pred = sess.run([a_s, a_e], feed_dict={input_q_placeholder: test_q, input_c_placeholder: test_c, mask_q_placeholder: batch_mask_q, mask_c_placeholder: batch_mask_c})\r\n\r\n\t\ttheir_f1 = []\r\n\t\ttheir_em = []\r\n\t\tmy_f1 = []\r\n\t\tmy_em = []\r\n\r\n\r\n\t\tfor i in range(test_samples):\r\n\t\t\tp = c_text[i]\r\n\t\t\tanswer = p[start_pred[i]: end_pred[i]+1]\r\n\t\t\tanswer = ' '.join(answer)\r\n\t\t\ttrue_answer = p[start_true[i]: end_true[i]+1]\r\n\t\t\ttrue_answer = ' '.join(true_answer)\r\n\t\t\ttheir_f1.append(f1_score(answer, true_answer))\r\n\t\t\ttheir_em.append(exact_match_score(answer, true_answer))\r\n\t\t\tmy_f1.append(my_f1_score(start_pred[i], end_pred[i], start_true[i], end_true[i]))\r\n\t\t\tmy_em.append(my_em_score(start_pred[i], end_pred[i], start_true[i], end_true[i]))\r\n\t\ttheir_f1_score = np.average(their_f1)\r\n\t\ttheir_em_score = np.average(their_em)\r\n\t\tf1 = np.average(my_f1)\r\n\t\tem = np.average(my_em)\r\n\r\n\t\tprint(\"Their f1 train score: \", their_f1_score, \" em score: \", their_em_score, \" on \", test_samples, \" samples\")\r\n\t\tprint(\"My f1 train score: \", f1, \" em score: \", em, \" on \", test_samples, \" samples\")\r\n\r\n\r\n\tprint(\"Evaluating EM and f1 scores on Validation set\")\r\n\r\n\ttest_samples = np.minimum(num_entries, 100)\r\n\r\n\tprint(\"reading labels\")\r\n\ttext_file = open(\"./data/squad/val.span\", \"r\")\r\n\tlabels = text_file.read().split(\"\\n\")\r\n\ttext_file.close()\r\n\r\n\tstart_true_val = []\r\n\tend_true_val = []\r\n\r\n\tfor i in range(test_samples):\t#batch_size*batch_num, batch_size*(batch_num+1)\r\n\t\tnums = labels[i].split()\r\n\t\tstart_true_val.append(int(nums[0]))\r\n\t\tend_true_val.append(int(nums[1]))\r\n\r\n\ttext_file = open(\"./data/squad/val.ids.question\", \"r\")\r\n\tinputs_q = text_file.read().split(\"\\n\")\r\n\ttext_file.close()\r\n\r\n\tprint(\"reading questions\")\r\n\tmyMatrix_q = []\r\n\tfor i in range(test_samples):\r\n\t\tnums = inputs_q[i].split()\r\n\t\tmyArray = []\r\n\t\tfor j in range(len(nums)):\r\n\t\t\tmyArray.append(int(nums[j]))\r\n\t\tmyMatrix_q.append(myArray)\r\n\r\n\ttext_file = open(\"./data/squad/val.ids.context\", \"r\")\r\n\tinputs_c = text_file.read().split(\"\\n\")\r\n\ttext_file.close()\r\n\t\r\n\tprint(\"reading contexts\")\r\n\tmyMatrix_c = []\r\n\tfor i in range(test_samples):\r\n\t\tnums = inputs_c[i].split()\r\n\t\tmyArray = []\r\n\t\tfor j in range(len(nums)):\r\n\t\t\tmyArray.append(int(nums[j]))\r\n\t\tmyMatrix_c.append(myArray)\r\n\r\n\t\t\r\n\ttext_file = open(\"./data/squad/val.context\", \"r\")\r\n\tinputs_c_text = text_file.read().split(\"\\n\")\r\n\ttext_file.close()\r\n\r\n\tc_text_val = []\r\n\tfor i in range(test_samples):\r\n\t\twords = inputs_c_text[i].split(\" \")\r\n\t\tmyArray = []\r\n\t\tfor j in range(len(words)):\r\n\t\t\tmyArray.append(words[j])\r\n\t\tc_text_val.append(myArray)\r\n\r\n\r\n\tpadded_q_inputs_val, masks_q_val = zip(*pad_sequences(data = myMatrix_q, max_length = max_q_words))\r\n\tpadded_c_inputs_val, masks_c_val = zip(*pad_sequences(data = myMatrix_c, max_length = max_c_words))\r\n\r\n\tembedded_q_val = (tf.nn.embedding_lookup(pretrained_embeddings, padded_q_inputs_val).eval(session = sess))\r\n\tembedded_c_val = (tf.nn.embedding_lookup(pretrained_embeddings, padded_c_inputs_val).eval(session = sess))\r\n\r\n\ttest_q = embedded_q_val\r\n\ttest_c = embedded_c_val\r\n\tbatch_mask_q = masks_q_val\r\n\tbatch_mask_c = masks_c_val\r\n\r\n\tstart_pred_val, end_pred_val = sess.run([a_s, a_e], feed_dict={input_q_placeholder: test_q, input_c_placeholder: test_c, mask_q_placeholder: batch_mask_q, mask_c_placeholder: batch_mask_c})\r\n\r\n\ttheir_f1 = []\r\n\ttheir_em = []\r\n\tmy_f1 = []\r\n\tmy_em = []\r\n\r\n\tprint(test_samples)\r\n\tprint(len(end_true_val))\r\n\tfor i in range(test_samples):\r\n\t\tp = c_text_val[i]\r\n\t\tanswer = p[start_pred_val[i]: end_pred_val[i]+1]\r\n\t\tanswer = ' '.join(answer)\r\n\t\ttrue_answer = p[start_true_val[i]: end_true_val[i]+1]\r\n\t\ttrue_answer = ' '.join(true_answer)\r\n\t\ttheir_f1.append(f1_score(answer, true_answer))\r\n\t\ttheir_em.append(exact_match_score(answer, true_answer))\r\n\t\tmy_f1.append(my_f1_score(start_pred_val[i], end_pred_val[i], start_true_val[i], end_true_val[i]))\r\n\t\tmy_em.append(my_em_score(start_pred_val[i], end_pred_val[i], start_true_val[i], end_true_val[i]))\r\n\ttheir_f1_score = np.average(their_f1)\r\n\ttheir_em_score = np.average(their_em)\r\n\tf1 = np.average(my_f1)\r\n\tem = np.average(my_em)\r\n\r\n\tprint(\"Their f1 valscore: \", their_f1_score, \" em score: \", their_em_score, \" on \", test_samples, \" samples\")\r\n\tprint(\"My f1 val score: \", f1, \" em score: \", em, \" on \", test_samples, \" samples\")\r\n\tend_time = datetime.datetime.now().replace(microsecond=0)\r\n\tprint(\"Elapsed Time: \", end_time-start_time)\r\n\r\n\t\t\r\nload_train_dir = \"./train\"\r\nsave_train_dir = \"./train\"\r\nrun(load_train_dir, save_train_dir)","sub_path":"code/procedural_model_attention.py","file_name":"procedural_model_attention.py","file_ext":"py","file_size_in_byte":13794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"99628415","text":"#\n# NAME:\n# run_proflike.py\n#\n# PURPOSE:\n# To use J-factor catalog maps (e.g. 2MASS) to obtain DM ID limits from\n# Fermi data or simulated data\n# Here using DarkSky\n#\n# HISTORY:\n# Written by Nick Rodd, MIT, 25 November 2016\n\nimport numpy as np\nimport healpy as hp\nfrom global_variables import *\nimport sys\nsys.path.insert(0, work_dir + 'PerformScan/code/')\nimport calc_llflux_tmp as clf\nimport argparse\n\n### Command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--ebin\",\n action=\"store\", dest=\"ebin\", default=0, type=int)\nparser.add_argument(\"--tag\",\n action=\"store\",dest='tag', default=\"ds_boost_run\",type=str)\nparser.add_argument(\"--mcfile\",\n action=\"store\",dest='mcfile', default=\"mcfile\",type=str)\nparser.add_argument(\"--jmap\",\n action=\"store\",dest='jmap', default=\"DarkSky_J_true\",type=str)\n\nresults=parser.parse_args()\nebin=results.ebin\ntag=results.tag\nmcfile=results.mcfile\njmap=results.jmap\n\n### Basic variables\nband_mask_val = 30 # At what distance to mask the plane\nnside=128\n\n# Load J-factor map\nJ_map_arr = np.load('/tigress/smsharma/public/DarkSkyMaps/CutMaps/' + jmap + '.npy')\n\n# Load fake data\nfake_data = np.load(work_dir + 'FakeMaps/' + mcfile)\n\n# Now run\ncli = clf.calc_llflux(J_map_arr=J_map_arr,tag=tag,band_mask=band_mask_val,external_data=fake_data,calc_flux_array=True,flux_array_ebin=ebin,bin_min=-8,bin_max=1,nbins=50)\n","sub_path":"Scan-FullSky/PerformScan/run_proflike_tmp.py","file_name":"run_proflike_tmp.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"564759276","text":"\nfrom torchvision import models\nimport torch.nn as nn\nfrom torch.nn.modules.batchnorm import BatchNorm1d\n\nfrom modules.layer.grl import GradientReverseLayer\n\n\nclass SingleLayerFeatureExtractor(nn.Module):\n def __init__(self, feature_dim=2048, output_dim=256, final_bn=True):\n super(SingleLayerFeatureExtractor, self).__init__()\n self.classifier_layer_1 = nn.Linear(feature_dim, output_dim)\n self.classifier_layer_1.weight.data.normal_(0, 0.005)\n self.classifier_layer_1.bias.data.fill_(0.1)\n self.bn = BatchNorm1d(output_dim)\n\n def forward(self, inputs):\n outputs = inputs\n outputs = self.classifier_layer_1(outputs)\n high_features = self.bn(outputs) \n return high_features\n\n\nclass SimpleFeatureExtractor(nn.Module):\n def __init__(self, feature_dim=2048, output_dim=256, final_bn=True):\n super(SimpleFeatureExtractor, self).__init__()\n self.classifier_layer_1 = nn.Linear(feature_dim, feature_dim//2)\n self.classifier_layer_2 = nn.Linear(feature_dim//2, output_dim)\n self.classifier_layer_1.weight.data.normal_(0, 0.005)\n self.classifier_layer_1.bias.data.fill_(0.1)\n self.classifier_layer_2.weight.data.normal_(0, 0.005)\n self.classifier_layer_2.bias.data.fill_(0.1)\n self.bn = BatchNorm1d(feature_dim//2)\n\n def forward(self, inputs):\n outputs = inputs\n outputs = self.classifier_layer_1(outputs)\n outputs = self.bn(outputs) \n outputs = self.classifier_layer_2(outputs)\n return outputs\n\n\n\nclass VGGFeatureExtractor(nn.Module):\n def __init__(self, final_bn=False):\n super(VGGFeatureExtractor, self).__init__()\n ## set base network\n model_vgg16 = models.vgg16(pretrained=True)\n self.features = model_vgg16.features\n self.classifier = model_vgg16.classifier[:-1]\n self.high_dim = model_vgg16.classifier[-1].in_features\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n high_features = self.classifier(x)\n return high_features\n\n def output_dim(self):\n return self.high_dim\n\n\nclass FeatureClassifier(nn.Module):\n def __init__(self, feature_dim=256, class_num=31):\n super(FeatureClassifier, self).__init__()\n self.classifier_layer_1 = nn.Linear(feature_dim, feature_dim // 2)\n self.classifier_layer_2 = nn.Linear(feature_dim // 2, class_num)\n self.relu1 = nn.ReLU()\n self.dropout1 = nn.Dropout(0.5)\n self.softmax = nn.Softmax(dim=1)\n\n ## initialization\n self.classifier_layer_1.weight.data.normal_(0, 0.005)\n self.classifier_layer_1.bias.data.fill_(0.1)\n self.classifier_layer_2.weight.data.normal_(0, 0.01)\n self.classifier_layer_2.bias.data.fill_(0.0)\n\n def forward(self, inputs):\n outputs = inputs\n outputs = self.dropout1(self.relu1(self.classifier_layer_1(outputs)))\n outputs = self.classifier_layer_2(outputs)\n softmax_outputs = self.softmax(outputs)\n return outputs, softmax_outputs\n\n\nclass HSFeatureClassifier(nn.Module):\n def __init__(self, in_dim, out_dim, bottle_neck_dim=256):\n super(HSFeatureClassifier, self).__init__()\n self.bottleneck = nn.Linear(in_dim, bottle_neck_dim)\n self.fc = nn.Linear(bottle_neck_dim, out_dim)\n self.bn = nn.BatchNorm1d(bottle_neck_dim)\n self.relu = nn.LeakyReLU(0.2, inplace=True)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x):\n x = self.bottleneck(x)\n x = self.bn(x)\n x = self.relu(x)\n x = self.fc(x)\n out = self.softmax(x)\n return x, out\n\n\nclass SimpleFeatureClassifier(nn.Module):\n def __init__(self, feature_dim=256, class_num=31):\n super(SimpleFeatureClassifier, self).__init__()\n self.classifier_layer_1 = nn.Linear(feature_dim, class_num)\n self.softmax = nn.Softmax()\n\n ## initialization\n self.classifier_layer_1.weight.data.normal_(0, 0.005)\n self.classifier_layer_1.bias.data.fill_(0.1)\n\n def forward(self, inputs):\n outputs = inputs\n outputs = self.classifier_layer_1(outputs)\n softmax_outputs = self.softmax(outputs)\n return outputs, softmax_outputs\n\nclass SimpleDiscriminator(nn.Module):\n def __init__(self, feature_dim, hidden_dim, grl=True):\n super(SimpleDiscriminator, self).__init__()\n\n self.ad_layer1 = nn.Linear(feature_dim, hidden_dim)\n self.ad_layer2 = nn.Linear(hidden_dim, 1)\n\n self.relu = nn.ReLU()\n self.grl = grl\n if self.grl:\n self.grl_layer = GradientReverseLayer()\n self.sigmoid = nn.Sigmoid()\n self.drop_layer1 = nn.Dropout(0.5)\n\n self.ad_layer1.weight.data.normal_(0, 0.01)\n self.ad_layer2.weight.data.normal_(0, 0.3)\n self.ad_layer1.bias.data.fill_(0.0)\n self.ad_layer2.bias.data.fill_(0.0)\n \n def forward(self, inputs):\n outputs = inputs\n if self.grl:\n outputs = self.grl_layer(outputs)\n outputs = self.drop_layer1(self.relu(self.ad_layer1(outputs)))\n outputs = self.sigmoid(self.ad_layer2(outputs))\n return outputs\n\n\nclass StrongDiscriminator(nn.Module):\n def __init__(self, feature_dim, hidden_dim, grl=True):\n super(StrongDiscriminator, self).__init__()\n\n self.ad_layer1 = nn.Linear(feature_dim, hidden_dim)\n self.ad_layer2 = nn.Linear(hidden_dim,hidden_dim)\n self.ad_layer3 = nn.Linear(hidden_dim, 1)\n\n self.relu = nn.ReLU()\n self.grl = grl\n if self.grl:\n self.grl_layer = GradientReverseLayer()\n self.sigmoid = nn.Sigmoid()\n self.drop_layer1 = nn.Dropout(0.5)\n self.drop_layer2 = nn.Dropout(0.5)\n\n self.ad_layer1.weight.data.normal_(0, 0.01)\n self.ad_layer2.weight.data.normal_(0, 0.01)\n self.ad_layer3.weight.data.normal_(0, 0.3)\n self.ad_layer1.bias.data.fill_(0.0)\n self.ad_layer2.bias.data.fill_(0.0)\n self.ad_layer3.bias.data.fill_(0.0)\n \n def forward(self, inputs):\n outputs = inputs\n if self.grl:\n outputs = self.grl_layer(outputs)\n outputs = self.drop_layer1(self.relu(self.ad_layer1(outputs)))\n outputs = self.drop_layer2(self.relu(self.ad_layer2(outputs)))\n outputs = self.sigmoid(self.ad_layer3(outputs))\n return outputs\n\n\n\n","sub_path":"modules/net/classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":6416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"520194097","text":"# -*- coding: utf-8 -*-\n\"\"\"\napi module\n----------\n\nContains the general network interactions for net.\n\"\"\"\n\n# std imports\nimport re\nimport math\nimport threading\nimport subprocess\n\n# package imports\nimport net\n\n# local imports\nfrom net.imports import ConnectionRefusedError, PermissionError\n\n__all__ = [\n 'peers',\n 'peer_group'\n]\n\n\n# threading\nLOCK = threading.Lock()\nIP_REGEX = re.compile(r'\\d+\\.\\d+\\.\\d\\.\\d+')\n\n\n# cache\nPEERS = None\n\n\ndef peer_group(name=None, hubs_only=False, on_host=False):\n \"\"\"\n Get a list of peers that are have been detected by net. You can get all\n peers in the same group as the requesting peer just by calling this function\n with no filter name. You can also request to only get the hubs for a\n specific group.\n\n .. code-block:: python\n\n # Will get all peers in the same group that net.Peer() is in.\n peers = net.peer_group()\n\n # Peers in group1\n peers = net.peer_group(\"group1\")\n\n # Get the hubs only for the group net.Peer() is in.\n hubs = net.peer_group(hubs_only=True)\n\n # Get only the hubs in group1\n hubs = net.peer_group(\"group1\", hubs_only=True)\n\n :param name: name of the group\n :param hubs_only: bool\n :param on_host: bool\n :return: list of ``net.Peer``\n \"\"\"\n group_peers = []\n if not name:\n name = net.Peer().group\n\n found_peers = peers(groups=[name], hubs_only=hubs_only, on_host=on_host)\n\n group_data = found_peers.get(name)\n if group_data:\n return group_data\n return group_peers\n\n\ndef peers(refresh=False, groups=None, on_host=False, hubs_only=False):\n \"\"\"\n Get a list of all peers on your network. This is a cached values since the\n call to graph the network can be long. You can also limit this search to\n only look for operating peers on the localhost which does not require the\n long network scan, just set the ``on_host`` kwarg to True.\n\n Hubs act as the centers for certain application events or processes. In some\n cases, you may only want to subscribe or communicate with hubs. You can\n specify this through the ``hubs_only`` kwarg.\n\n The initial call to this will hang for a few seconds. Under the hood, it is\n making a shell call to ``arp -a`` which will walk your network and find all\n hosts.\n\n .. code-block:: python\n\n # Standard call to get the peers on your network.\n peers = net.peers()\n\n # Only search for peers on local host and not on the network.\n peers = net.peers(on_host=True)\n\n # Refresh all peers in the cache\n peers = net.peers(refresh=True)\n\n # Refresh the cache with peers in group1\n peers = net.peers(\"group1\", refresh=True)\n\n # Refresh the cache with peers in group1 and 2\n peers = net.peers([\"group1\", \"group2\"], refresh=True)\n\n # Refresh the cache with all of the hubs on the network regardless of group.\n peers = net.peers(hubs_only=True, refresh=True)\n\n # Refresh the cache with only hubs in group1 and 2\n peers = net.peers([\"group1\", \"group2\"], hubs_only=True, refresh=True)\n\n :param refresh: Bool\n :param groups: str\n :param on_host: Bool\n :param hubs_only: Bool\n :return: {\n\n # Peers\n 'peers': {\n '': ``net.Peer``,\n },\n\n # Groups\n None: [\n ''\n ],\n \"SomeOtherGroup\": [\n ''\n ]\n }\n \"\"\"\n if PEERS is None or refresh:\n get_peers(groups, on_host, hubs_only)\n\n return PEERS\n\n\ndef local_network():\n \"\"\"\n Runs ``arp -a`` to get all hosts.\n\n :return: list of ip address on the local network\n \"\"\"\n raw_output = bytes(subprocess.check_output('arp -a', shell=True)).decode('ascii')\n return IP_REGEX.findall(raw_output)\n\n\ndef find_peers_in_block(ips, groups=None, hubs_only=False):\n \"\"\"\n Sniffs out peers in the defined group based on the list of ip's\n\n :param ips: list of ip addresses\n :param groups: the list of groups you'd like to filter with. Defaults to the\n same as the current peer.\n :return: List of peer addresses\n \"\"\"\n global PEERS\n\n # pull in the local peer\n peer = net.Peer()\n server = peer.server\n\n if not groups:\n groups = [peer.group]\n\n # loop over all the addresses\n for address in ips:\n\n # loop over ports\n for port in server.ports():\n\n # skip self\n if port == server.port and address == server.host:\n continue\n\n try:\n # ping the peer and if it responds with the proper info,\n # register it. Shut off the logger for this so we dont spam\n # the console.\n net.LOGGER.disabled = True\n info = net.info(peer=(address, port), time_out=0.1)\n net.LOGGER.disabled = False\n\n # skip registering this if the info is already in the\n # registry.\n if info['tag'] in PEERS['peers'].values():\n continue\n\n # filter out peers that aren't in the groups requested.\n if info['group'] not in groups:\n continue\n\n # filter out non-hubs if that is what was requested\n if hubs_only and not info['hub']:\n continue\n\n # construct the peer representation\n new_peer = net.Peer(\n host=info['host'],\n port=info['port'],\n group=info['group'],\n hub=info['hub'],\n subscriptions=info['subscriptions'],\n connections=info['connections'],\n flags=info['flags'],\n )\n\n # acquire the lock and register\n LOCK.acquire()\n\n # register with the general information per peer\n PEERS['peers'][info['tag']] = new_peer\n\n # register with the group registry\n group_registry = PEERS.setdefault(info['group'], [])\n group_registry.append(new_peer)\n\n # release the shared resource\n LOCK.release()\n\n except (PermissionError, ConnectionRefusedError, OSError) as err:\n net.LOGGER.disabled = False\n\n\ndef get_peers(groups, on_host, hubs_only):\n \"\"\"\n Get a list of all valid remote peers.\n\n :param groups: List of groups\n :param on_host: Search only localhost\n :param hubs_only: Get Hubs only\n :return: List of peer addresses\n \"\"\"\n global PEERS\n PEERS = {\n 'peers': {}\n }\n\n # get this peer for pinging\n peer = net.Peer()\n server = peer.server\n\n # create subnet\n network = [net.HOST_IP]\n if not on_host:\n network = local_network()\n\n # logging help\n total_hosts = len(network)\n total_ports = len(server.ports())\n net.LOGGER.debug(\n \"Calculated network sweep: {0} hosts X {1} ports = {2} pings\".format(\n total_hosts, total_ports, total_hosts * total_ports\n )\n )\n\n # skip the threading integration if the environment does not call for it.\n if net.THREAD_LIMIT <= 0:\n return find_peers_in_block(network, groups, hubs_only)\n\n # calculate thread chunk. There should always be at least one thread chunk\n thread_chunks = max(int(math.ceil(total_hosts/net.THREAD_LIMIT)), 1)\n\n # loop over and spawn threads\n start = 0\n threads = []\n\n for chunk in range(0, net.THREAD_LIMIT):\n end = start + thread_chunks\n\n thread = threading.Thread(\n target=find_peers_in_block,\n args=(network[start:end], groups, hubs_only)\n )\n thread.setName(\"Network_Scanner_\" + str(chunk))\n thread.daemon = True\n threads.append(thread)\n thread.start()\n\n start = end\n\n # wait for all worker threads to finish\n for thread in threads:\n thread.join()\n\n return PEERS\n\n\ndef set_config(\n THREAD_LIMIT=None,\n PORT=None,\n PORT_RANGE=None,\n GROUP=None,\n IS_HUB=None,\n):\n \"\"\"\n Set a configuration value. These are configuration values that can be set at\n runtime to modify your net configuration.\n\n\n :return:\n \"\"\"\n","sub_path":"net/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"408287093","text":"import tensorflow as tf\r\nfrom tensorflow.python.ops import rnn, rnn_cell\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn import model_selection\r\n\r\nsequence_length = 14\r\nsequence_depth = 4\r\nn_classes = 2\r\n\r\nepochs = 40\r\nbatch_size = 512\r\nrnn_size = 40\r\n\r\ndata = pd.read_csv('data.csv', header = None, skiprows=1)\r\n\r\ninput_features = []\r\ninput_labels = []\r\nfor i in range(len(data[0])):\r\n sequence = []\r\n for j in range(len(data[0][i])):\r\n if data[0][i][j] == 'A':\r\n sequence.append([1,0,0,0])\r\n if data[0][i][j] == 'T':\r\n sequence.append([0,1,0,0])\r\n if data[0][i][j] == 'G':\r\n sequence.append([0,0,1,0])\r\n if data[0][i][j] == 'C':\r\n sequence.append([0,0,0,1])\r\n input_features.append(sequence)\r\nfor i in range(len(data[1])):\r\n if data[1][i] == 0:\r\n input_labels.append([1,0])\r\n if data[1][i] == 1:\r\n input_labels.append([0,1])\r\n\r\ninput_features = np.array(input_features)\r\ninput_labels = np.array(input_labels)\r\n\r\ntraining_features, testing_features, training_labels, testing_labels = model_selection.train_test_split(input_features, input_labels, test_size=0.15)\r\n\r\nx = tf.placeholder(tf.float32, [None, sequence_length, sequence_depth])\r\ny = tf.placeholder(tf.float32, [None, n_classes])\r\n\r\ndef recurrent_neural_network(x):\r\n x = tf.transpose(x, [1,0,2])\r\n x = tf.reshape(x, [-1, sequence_depth])\r\n x = tf.split(0, sequence_length, x)\r\n\r\n weights = tf.Variable(tf.random_normal([rnn_size, n_classes]))\r\n biases = tf.Variable(tf.random_normal([n_classes]))\r\n\r\n lstm_cell = tf.nn.rnn_cell.LSTMCell(rnn_size, state_is_tuple = True)\r\n outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)\r\n\r\n prediction = tf.matmul(outputs[-1], weights) + biases\r\n\r\n return prediction\r\n\r\nprediction = recurrent_neural_network(x)\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction,y))\r\noptimizer = tf.train.AdamOptimizer().minimize(cost)\r\n\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n feed_network = {x:training_features, y:training_labels}\r\n\r\n for epoch in range(epochs):\r\n epoch_loss = 0\r\n start = 0\r\n for _ in range(int(len(training_features)/batch_size)):\r\n epoch_x = training_features[start:start+batch_size]\r\n epoch_y = training_labels[start:start+batch_size]\r\n\r\n _,c = sess.run([optimizer, cost], feed_dict=feed_network)\r\n epoch_loss += c\r\n start += batch_size\r\n\r\n print(\"Epoch:\", str(epoch + 1) + \"/\" + str(epochs), \"Loss:\", epoch_loss)\r\n\r\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n print('Accuracy:',accuracy.eval({x:testing_features, y:testing_labels}))\r\n","sub_path":"LSTMClassification.py","file_name":"LSTMClassification.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"395864412","text":"# Instantiate the vectors\nsentences = []\nnext_chars = []\n# Loop for every sentence\nfor sentence in sheldon.split('\\n'):\n # Get 20 previous chars and next char; then shift by step\n for i in range(0, len(sentence) - chars_window, step):\n sentences.append(sentence[i:i + chars_window])\n next_chars.append(sentence[i + chars_window])\n\n# Define a Data Frame with the vectors\ndf = pd.DataFrame({'sentence': sentences, 'next_char': next_chars})\n\n# Print the initial rows\nprint(df.head())\n","sub_path":"Datacamp/Deep Learning for NLP in Python/Recurrent Neural Networks for Language Modeling in Python/chapter-4 ex-5.py","file_name":"chapter-4 ex-5.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"487324217","text":"# -*- coding: latin-1 -*-\n\nimport sqlite3\nfrom settings import GlacierParams\nimport datetime\n\n\ndef convert_dict_keys_to_alphanum(source_dict):\n return dict(map(\n lambda x: (x[0].lower().replace('-', '_'), x[1]),\n source_dict.items())\n )\n\n\nclass DBLogger():\n def __init__(self, database_path):\n self.database_path = database_path\n self.conn = None\n cursor = self.get_cursor()\n # cursor.execute('''drop table if exists requests''')\n # cursor.execute('''drop table if exists responses''')\n cursor.execute(\n '''create table if not exists requests (\n id integer primary key,\n content_length text,\n date text,\n host text,\n authorization text,\n x_amz_glacier_version text,\n x_amzn_requestid text,\n x_amz_date text,\n x_amz_content_sha256 text,\n x_amz_sha256_tree_hash text,\n headers text\n )'''\n )\n cursor.execute(\n '''create table if not exists responses (\n id integer primary key,\n content_length text,\n content_type text,\n date text,\n x_amzn_requestid text,\n x_amz_sha256_tree_hash text,\n headers text\n )'''\n )\n # Inventory table\n cursor.execute(\n '''create table if not exists inventory (\n id integer primary key,\n vault text,\n file_name text,\n file_size text,\n upload_date text,\n inventory_date text\n )'''\n )\n\n self.commit_and_close(cursor)\n\n def get_cursor(self):\n conn = sqlite3.connect(self.database_path)\n return conn.cursor()\n\n def commit_and_close(self, cursor):\n cursor.connection.commit()\n cursor.connection.close()\n\n def get_columns(self, table_name):\n cursor = self.get_cursor()\n stmt = cursor.execute(\"PRAGMA table_info(%s)\" % (table_name,))\n return [item[1] for item in stmt.fetchall()]\n\n def insert(self, table, headers, body, param):\n cursor = self.get_cursor()\n sql_friendly_headers = convert_dict_keys_to_alphanum(headers)\n # add the original headers dictionary to the table\n sql_friendly_headers['headers'] = str(headers)\n # add current timestamp\n sql_friendly_headers['record_date'] = datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')\n # add target url\n sql_friendly_headers['url'] = param.get(GlacierParams.URI)\n # add request method\n sql_friendly_headers['method'] = param.get(GlacierParams.METHOD)\n # add response body\n sql_friendly_headers['body'] = str(body)\n columns = self.get_columns(table)\n for header in sql_friendly_headers:\n # Add a column for each header\n if header not in columns:\n cursor.execute('ALTER TABLE %s ADD COLUMN %s text' % (table, header))\n # c.execute(\"PRAGMA table_info(requests)\")\n sql_columns = ', '.join(sorted(sql_friendly_headers.keys()))\n placeholders = ':' + ', :'.join(sorted(sql_friendly_headers.keys()))\n query = \"INSERT INTO %s (%s) VALUES (%s)\" % (table, sql_columns, placeholders)\n # print(query)\n # query = r\"\"\"INSERT INTO %s (request_id, date) VALUES (:request-id, :date)\"\"\" % (table,)\n # print(query)\n # print(sql_friendly_headers)\n result = cursor.execute(query, sql_friendly_headers)\n # print(result)\n self.commit_and_close(cursor)\n return result\n\n def insert_request(self, headers, param):\n return self.insert('requests', headers, None, param)\n\n def insert_response(self, headers, body, param):\n return self.insert('responses', headers, body, param)\n\n\nif __name__ == '__main__':\n logger = DBLogger('database.db')\n headers = {'authorization': datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')}\n logger.insert('requests', headers)\n # print(logger.get_columns('requests'))","sub_path":"dblogger.py","file_name":"dblogger.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"488125378","text":"'''\nfile: fifo_reward.py\nauthors: Kento Perera, Timothy Sah, and Dean Stratakos\ndate: December 13, 2019\n----------\nThis file contains our implementations of calculating total reward via our baseline FIFO algorithm\n'''\n\nimport util, math, random, csv, timeit\nfrom collections import defaultdict\nfrom util import ValueIteration\nfrom itertools import combinations\n\n\n'''\nfunction: main\n----------\nCalculates the reward using FIFO algorithm.\n'''\ndef main():\n start = timeit.default_timer()\n \n f = open(\"data/training_data_TEST2.csv\", 'r') # to read the file\n\n fileReader = csv.reader(f)\n data = []\n day = []\n currDate = 0\n bound = 7\n capacity = 13\n for lineNum, row in enumerate(fileReader):\n if lineNum == 0:\n continue\n \n time = row[4]\n if int(row[4]) < 10: timeStamp = row[3] + '00' + row[4]\n elif int(row[4]) < 100: timeStamp = row[3] + '0' + row[4]\n else: timeStamp = row[3] + row[4]\n \n daysUntilDue = (1 * (row[2] == 'Exp')) + (3 * (row[2] == 'Std'))\n reqType = row[2] # to build request string\n if row[1] == 'True': reqType += 'SMT' # to build request string\n else: reqType += 'Reg' # to build request string\n \n if lineNum == 1:\n day.append([reqType, daysUntilDue, int(timeStamp)])\n currDate = row[3]\n howManyRacquetsSeenInDay = 1\n else: # if not on first racquet\n if row[3] == currDate: # if current racquet is still on same day\n if howManyRacquetsSeenInDay < (capacity + bound):\n howManyRacquetsSeenInDay += 1\n day.append([reqType, daysUntilDue, int(timeStamp)])\n else: # if current racquet is on a new day\n data.append(sorted(day, key=lambda x: x[2]))\n day = []\n day.append([reqType, daysUntilDue, int(timeStamp)])\n currDate = row[3]\n howManyRacquetsSeenInDay = 1\n data.append(sorted(day, key=lambda x: x[2]))\n reward = 0\n strung = []\n unstrung = []\n for dayNumber in range(len(data)): # iterates for number of days in time frame\n unstrung.extend(data[dayNumber])\n index = 0\n # calculates revenue for completed racquets\n while index < capacity and len(unstrung) > 0: # while you can still string the racquet and there are racquets left to accept\n racquet = unstrung.pop(0)\n index += 1\n strung.append(racquet)\n \n # calculate reward\n if racquet[0] == 'SpdReg':\n reward += 40\n elif racquet[0] == 'ExpReg':\n reward += (30 + (1 - racquet[1]) * .01)\n elif racquet[0] == 'StdReg':\n reward += (20 + (3 - racquet[1]) * .01)\n elif racquet[0] == 'SpdSMT':\n reward += 18\n elif racquet[0] == 'ExpSMT':\n reward += (18 + (1 - racquet[1]) * .01)\n elif racquet[0] == 'StdSMT':\n reward += (18 + (3 - racquet[1]) * .01)\n \n for racquet in unstrung:\n if (racquet[1] < 0): reward += (20 * racquet[1])\n if (racquet[1] - 1 < 0): reward += (10 * (racquet[1] - 1))\n if dayNumber != len(data) - 1:\n racquet[1] -= 1\n data[dayNumber + 1].append(racquet)\n \n print('=' * 30, 'DAY', dayNumber, '=' * 30)\n for racquet in strung:\n print(racquet)\n \n print('Reward: ', reward)\n\n stop = timeit.default_timer()\n print('\\nTime:', stop - start, 'sec')\n \nif __name__ == '__main__':\n main()\n\n","sub_path":"baseline_fifo_reward.py","file_name":"baseline_fifo_reward.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"223485039","text":"from django.shortcuts import render\nimport requests\nimport random\n\nURL = requests.get('https://newsapi.org/v2/everything?q=bitcoin&from=2019-11-13&sortBy=publishedAt&apiKey=3f589812a9e1449dbef588464c4d90f4')\nnews =URL.json()\nmy_list = news[\"articles\"]\nrand_number = random.randrange(len(my_list))\n\nprint(my_list[rand_number]['author'])\n\n# Create your views here.\ndef home_function(request):\n URL = requests.get('https://newsapi.org/v2/everything?q=bitcoin&from=2019-11-13&sortBy=publishedAt&apiKey=3f589812a9e1449dbef588464c4d90f4')\n news =URL.json()\n my_list = news[\"articles\"]\n rand_number = random.randrange(len(my_list))\n context = {\n 'image': my_list[rand_number]['urlToImage'],\n 'title': my_list[rand_number]['title'],\n 'description': my_list[rand_number]['description'],\n 'date': my_list[rand_number]['publishedAt'],\n 'author': my_list[rand_number]['author'],\n }\n \n return render(request, 'news.html', context)\n\n","sub_path":"Random news/Random_News/myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"88955100","text":"import pytesseract\nimport cv2\nimport os\nimport glob\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nos.chdir('..')\nos.chdir('Images')\nf = open('./list.txt', 'r')\ncategories = f.readlines()\ncategories.sort()\n\nfor i, category in enumerate(categories):\n print(str(i) + ' : ' + category, end='')\n\nimgList = glob.glob('./' + categories[int(input())][:-1] + '/*')\ncv2.namedWindow('img', cv2.WINDOW_NORMAL)\nfor name in imgList:\n img = cv2.imread(name)\n print(pytesseract.image_to_string(img, lang='Hangul'))\n cv2.resize(img , (img.shape[0] // 2, img.shape[1] // 2))\n cv2.imshow('img', img)\n cv2.waitKey(0)","sub_path":"code/.ipynb_checkpoints/ocr-checkpoint.py","file_name":"ocr-checkpoint.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"142751574","text":"from TestingData import TestingData\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nfrom collections import namedtuple\nclass TestInterface(TestingData):\n\tmessagesPath={}\n\tdef __init__(self): # Parsing tha data into file \n\n\t\tself.__NPRDtnCreated=[]\n\t\tself.__NPRDtnCreated.append(TestingData.getPRTotalCreatedT(self))\n\t\tself.__NPRDtnCreated.append(TestingData.getPRTotalCreatedI(self))\n\t\tself.__NPRDtnCreated.append(TestingData.getPRTotalCreatedV(self))\n\n\t\tself.__NPRAdbDelivered=[0,0,0]\n\t\tself.__NPRCDDelivered=[0,0,0]\n\t\tself.__NPRGCDelivered=[0,0,0]\n\t\tself.__NPRWIFIRecieved=[0,0,0]\n\t\tself.__NPRMCSWIFIDelivered=[0,0,0]\n\t\tself.__NPRMCSADBDelivered=[0,0,0]\n\n\t\tself.__PRDtnCreated=[]\n\t\tself.__PRDtnCreated.append(TestingData.getPRTotalCreatedT(self))\n\t\tself.__PRDtnCreated.append(TestingData.getPRTotalCreatedI(self))\n\t\tself.__PRDtnCreated.append(TestingData.getPRTotalCreatedV(self))\n\t\tself.__PRAdbDelivered=[0,0,0]\n\t\tself.__PRCDDelivered=[0,0,0]\n\t\tself.__PRGCDelivered=[0,0,0]\n\t\tself.__PRWIFIRecieved=[0,0,0]\n\t\tself.__PRMCSWIFIDelivered=[0,0,0]\n\t\tself.__PRMCSADBDelivered=[0,0,0]\n\n\tdef get_DtnCreated(self):\n\t\treturn self.__NPRDtnCreated;\n\tdef get_AdbDelivered(self):\n\t\treturn self.__NPRAdbDelivered;\n\tdef get_CDDelivered(self):\n\t\treturn self.__NPRCDDelivered;\n\tdef get_GCDelivered(self):\n\t\treturn self.__NPRGCDelivered;\n\tdef get_WIFIRecieved(self):\n\t\treturn self.__NPRWIFIRecieved;\n\tdef get_MCSWIFIDelevered(self):\n\t\treturn self.__NPRMCSWIFIDelivered;\n\tdef get_MCSADBDelivered(self):\n\t\treturn self.__NPRMCSADBDelivered;\n\n\tdef AddMessage(self,line,Array):\n\t\t#print(type(Array))\n\t\tif(line[4][1]=='T'):\n\t\t\tArray[0]+=1\n\t\telif(line[4][1]=='I'):\n\t\t\tArray[1]+=1\n\t\telif(line[4][1]=='V'):\n\t\t\tArray[2]+=1\n\t\treturn Array\n\tdef CountDeliveredSent(self):\n\t\tfor line in TestingData.getNonPriorityDE(self):\n\t\t\tif(line[2][0]=='d'):\n\t\t\t\tif(line[3][0]=='A'):\n\t\t\t\t\t#print(\"Done 1\")\n\t\t\t\t\tself.__NPRAdbDelivered=self.AddMessage(line,self.__NPRAdbDelivered)\n\t\t\t\t\t\n\t\t\tif(line[2][0]=='C'):\n\t\t\t\t#print(\"Done 2\")\n\t\t\t\tself.__NPRGCDelivered=self.AddMessage(line,self.__NPRGCDelivered)\n\t\t\tif(line[2][0]=='A'):\n\t\t\t\tif(line[3][0]=='C'):\n\t\t\t\t\t#print(\"Done 3\")\n\t\t\t\t\tself.__NPRCDDelivered=self.AddMessage(line,self.__NPRCDDelivered)\n\t\t\t\tif(line[3][0]=='W'):\n\t\t\t\t\t#print(\"Done 4\")\n\t\t\t\t\tself.__NPRWIFIRecieved=self.AddMessage(line,self.__NPRWIFIRecieved)\n\t\t\tif(line[2][0]=='W'):\n\t\t\t\tif(line[3][0]=='W'):\n\t\t\t\t\t#print(\"Done 5\")\n\t\t\t\t\tself.__NPRMCSWIFIDelivered=self.AddMessage(line,self.__NPRMCSWIFIDelivered)\n\t\t\t\tif(line[3][0] == 'A'):\n\t\t\t\t\t#print(\"Done 6\")\n\t\t\t\t\tself.__NPRMCSADBDelivered=self.AddMessage(line,self.__NPRMCSADBDelivered)\n\t\t#self.__NPRDtnCreated=len(TestingData.getNonPriorityC(self))\n\n\t\tfor line in TestingData.getPriorityDE(self):\n\t\t\tif(line[2][0]=='d'):\n\t\t\t\tif(line[3][0]=='A'):\n\t\t\t\t\tself.__PRAdbDelivered=self.AddMessage(line,self.__PRAdbDelivered)\n\t\t\tif(line[2][0]=='C'):\n\t\t\t\tself.__PRGCDelivered=self.AddMessage(line,self.__PRGCDelivered)\n\t\t\tif(line[2][0]=='A'):\n\t\t\t\tif(line[3][0]=='C'):\n\t\t\t\t\tself.__PRCDDelivered=self.AddMessage(line,self.__PRCDDelivered)\n\t\t\t\tif(line[3][0]=='W'):\n\t\t\t\t\tself.__PRWIFIRecieved=self.AddMessage(line,self.__PRWIFIRecieved)\n\t\t\tif(line[2][0]=='W'):\n\t\t\t\tif(line[3][0]=='W'):\n\t\t\t\t\tself.__PRMCSWIFIDelivered=self.AddMessage(line,self.__PRMCSWIFIDelivered)\n\t\t\t\tif(line[3][0] == 'A'):\n\t\t\t\t\tself.__PRMCSADBDelivered=self.AddMessage(line,self.__PRMCSADBDelivered)\n\t\t#self.__PRDtnCreated=len(TestingData.getPriorityC(self))\n\n\tdef PrintDelivery(self,i):\n\t\tprint(\"Non Priority InterfaceWise\\n\")\n\t\tprint(\"Interface Created Delivered\\n\")\n\t\tprint(\"DTN \"+str(self.__NPRDtnCreated[i]),end=\" \")\n\t\tprint(self.__NPRAdbDelivered[i],end=\"\\n\")\n\t\tprint(\"ADB \"+str(self.__NPRAdbDelivered[i]),end=\" \")\n\t\tprint(self.__NPRCDDelivered[i])\n\t\tprint(\"CD \"+str(self.__NPRCDDelivered[i]),end=\" \")\n\t\tprint(self.__NPRGCDelivered[i])\n\t\tprint(\"GC \"+str(self.__NPRGCDelivered[i]),end=\" \")\n\t\tprint(self.__NPRWIFIRecieved[i])\n\t\tprint(\"GC_WIFI \"+str(self.__NPRWIFIRecieved[i]),end=\" \")\n\t\tprint(self.__NPRMCSWIFIDelivered[0])\n\t\tprint(\"MCS \"+str(self.__NPRMCSWIFIDelivered[i]),end=\" \")\n\t\tprint(self.__NPRMCSADBDelivered[i])\n\n\t\tprint(\"\\nPriority InterfaceWise\\n\")\n\t\t\n\t\tprint(\"Interface Created Delivered\\n\")\n\t\tprint(\"DTN \"+str(self.__PRDtnCreated[i]),end=\" \")\n\t\tprint(self.__PRAdbDelivered[i],end=\"\\n\")\n\t\tprint(\"ADB \"+str(self.__PRAdbDelivered[i]),end=\" \")\n\t\tprint(self.__PRCDDelivered[i])\n\t\tprint(\"CD \"+str(self.__PRCDDelivered[i]),end=\" \")\n\t\tprint(self.__PRGCDelivered[i])\n\t\tprint(\"GC \"+str(self.__PRGCDelivered[i]),end=\" \")\n\t\tprint(self.__PRWIFIRecieved[i])\n\t\tprint(\"GC_WIFI \"+str(self.__PRWIFIRecieved[i]),end=\" \")\n\t\tprint(self.__PRMCSWIFIDelivered[i])\n\t\tprint(\"MCS \"+str(self.__PRMCSWIFIDelivered[i]),end=\" \")\n\t\tprint(self.__PRMCSADBDelivered[i])\n\tdef plotDelivery(self,i):\n\t\tn_groups = 7\n\t\tnon_priorities = (self.__NPRDtnCreated[i],self.__NPRAdbDelivered[i],self.__NPRCDDelivered[i],self.__NPRGCDelivered[i],self.__NPRWIFIRecieved[i],self.__NPRMCSWIFIDelivered[i],self.__NPRMCSADBDelivered[i])\n\t\tpriorities = (self.__PRDtnCreated[i],self.__PRAdbDelivered[i],self.__PRCDDelivered[i],self.__PRGCDelivered[i],self.__PRWIFIRecieved[i],self.__PRMCSWIFIDelivered[i],self.__PRMCSADBDelivered[i])\n\t\tfig, ax = plt.subplots()\n\t\tindex = np.arange(n_groups)\n\t\tbar_width = 0.35\n\t\topacity = 0.4\n\t\trects1 = ax.bar(index, priorities, bar_width,\n\t\t\t\t\t\talpha=opacity, color='b',\n\t\t\t\t\t\tlabel='Priority')\n\n\t\trects2 = ax.bar(index + bar_width, non_priorities, bar_width,\n\t\t\t\t\t\talpha=opacity, color='r',\n\t\t\t\t\t\tlabel='Non Priorities')\n\n\t\tax.set_xlabel('Type of messages')\n\t\tax.set_ylabel('Ratio of Packet created and packets delivered')\n\t\tax.set_title('hihiii')\n\t\tax.set_xticks(index + bar_width / 2)\n\t\tax.set_xticklabels(('DTN','ADB', 'DataMules','GC','GC_WIFI','MCS_WIFI','MCS_ADB'))\n\t\tax.legend()\n\t\tfig.tight_layout()\n\t\tplt.show()","sub_path":"DeliveryAnalysis/TestingInterface.py","file_name":"TestingInterface.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"452542947","text":"from __future__ import absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail, get_connection\nfrom django.template import Context as _Context\nfrom django.template.loader import get_template\n\n\n\n# Context for with statement (will be in Django 1.7)\nclass Context(_Context):\n def update(self, data):\n super(Context, self).update(data)\n return self\n def __exit__(self, *args):\n self.pop()\n def __enter__(self):\n pass\n\n\n\nclass TemplateMailer(object):\n\n template = None\n template_name = None\n from_email = settings.SERVER_EMAIL\n recipient_list = None\n auth_user = None\n auth_password = None\n connection = None\n fail_silently = False\n context = None\n context_class = Context\n\n def __init__(self, **kwargs):\n for name in kwargs:\n if hasattr(self, name):\n setattr(self, name, kwargs.pop(name))\n self.kwargs = kwargs\n if not self.template:\n self.template = self.get_template()\n if not self.context:\n self.context = self.context_class(kwargs)\n if not self.connection:\n self.connection = get_connection(\n username=self.auth_user,\n password=self.auth_password,\n fail_silently=self.fail_silently\n )\n\n def get_template(self):\n return get_template(self.template_name)\n\n def get_context(self, dictionary):\n context = self.context_class(**self.kwargs)\n context.update(dictionary)\n return context\n\n def send_mail(self, from_email=None, recipient_list=None, **kwargs):\n with self.context.update(kwargs):\n content = self.template.render(self.context).split('\\n', 1)\n subject = content[0]\n try:\n message = content[1]\n except:\n message = ''\n send_mail(\n subject = subject,\n message = message,\n from_email = from_email or self.from_email,\n recipient_list = recipient_list or self.recipient_list,\n connection = self.connection,\n )\n\n","sub_path":"domecek/templatemailer.py","file_name":"templatemailer.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"234176","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Copyspecial Assignment\"\"\"\n\n# Copyright 2010 Google Inc.\n# Licensed under the Apache License, Version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Google's Python Class\n# http://code.google.com/edu/languages/google-python-class/\n\nimport re\nimport os\nimport shutil\nimport subprocess\nimport argparse\n\n# This is to help coaches and graders identify student assignments\n__author__ = \"knmarvel with madarp\"\n\n\ndef get_special_paths(dir):\n \"\"\"takes a directory and returns a list of the absolute\n paths of the special files in the given directory\"\"\"\n\n spec_paths = []\n for root, dirs, files in os.walk(dir):\n for name in files:\n if re.search(\"__(.+?)__\", name):\n if os.path.join(os.path.abspath(dir), name) not in spec_paths:\n spec_paths.append(os.path.join(os.path.abspath(dir), name))\n else:\n print(\"Error: duplicate special files.\")\n return spec_paths\n\n\ndef copy_to(paths, dir):\n \"\"\"given a list of the paths, copy those files into the\n given directory\"\"\"\n\n os.makedirs(dir)\n for file in paths:\n shutil.copy(file, dir)\n\n\ndef zip_to(paths, dir):\n \"\"\"given a list of paths, zip those files up into the\n given zipfile\"\"\"\n\n cmd = [\"zip\", \"-j\", dir] + paths\n print(\"Command I'm going to do \" + \"\\n\" + \" \".join(cmd))\n subprocess.call(cmd)\n\n\ndef parsing():\n \"\"\"parses arguments given as parameters in calling function\"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument('fromdir', default='./')\n parser.add_argument('--todir', help='dest dir for special files')\n parser.add_argument('--tozip', help='dest zipfile for special files')\n return parser.parse_args()\n\n\ndef main():\n \"\"\"Finds all files with names that include the format '__?___' in the fromdir.\n If no other arguments are provided, prints out those filenames. If a\n --todir is provided, copies those files to the todir given. If a\n --tozip is provided, zips those files to that location.\"\"\"\n\n args = parsing()\n spec_paths = get_special_paths(args.fromdir)\n\n if args.todir or args.tozip:\n if args.todir:\n copy_to(spec_paths, args.todir)\n\n if args.tozip:\n zip_to(spec_paths, args.tozip)\n else:\n print(\"\\n\".join(spec_paths))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"copyspecial.py","file_name":"copyspecial.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"76910210","text":"from django.shortcuts import render\nimport os\nfrom django.conf import settings\nfrom django.http import HttpResponse,Http404\nfrom django.core.files.storage import FileSystemStorage\nfrom .forms import uploadFileForm\nfrom TextToPPT.orchestrator import TextToPPTOrchestrator\n\ndef uploadtxtfiles(request):\n if request.method == 'POST':\n form = uploadFileForm(request.POST, request.FILES)\n doc = request.FILES['document']\n shape = form.data['shapeType']\n authorName = form.data['authorName']\n\n startDate = form.data['startDate']\n endDate = form.data['endDate']\n\n fontSize = form.data['fontSize']\n Left = form.data['Left']\n Top = form.data['Top']\n Height = form.data['Height']\n Width = form.data['Width']\n filestorage = FileSystemStorage()\n filename = filestorage.save(doc.name,doc)\n name = filename+\".pptx\"\n file_path = os.path.join(settings.MEDIA_ROOT,name)\n in_path = os.path.join(settings.MEDIA_ROOT,filename)\n out_path = os.path.join(settings.MEDIA_ROOT,filename+\".pptx\") #combine the directory with file\n TextToPPTLibrary_class = TextToPPTOrchestrator()\n TextToPPTLibrary_class.SetShapeType(shape)\n TextToPPTLibrary_class.SetMessageAuthor(authorName)\n\n TextToPPTLibrary_class.SetStartDate(startDate)\n TextToPPTLibrary_class.SetEndDate(endDate) \n\n TextToPPTLibrary_class.SetFontSize(fontSize)\n TextToPPTLibrary_class.SetShapeLeft(Left)\n TextToPPTLibrary_class.SetShapeTop(Top)\n TextToPPTLibrary_class.SetShapeHeight(Height)\n TextToPPTLibrary_class.SetShapeWidth(Width)\n TextToPPTLibrary_class.ConvertTextFileToPPT(in_path,out_path)\n if os.path.exists(file_path):\n with open(file_path,'rb') as file:\n response = HttpResponse(file.read(),content_type = \"application/vnd.openxmlformats-officedocument.presentationml.presentation\")\n response['Content-Disposition'] = 'inline;filename=' + os.path.basename(file_path)\n return response\n return render(request,'uploadmethod/upload.html')","sub_path":"uploadmethod/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"49829686","text":"import time \nimport random\nfrom tkinter import *\nroot = Tk()\nroot.geometry(\"400x300\")\nmessages = [\n\"Из всех деревьев мы врезались в то, которое смогло дать нам сдачи.\",\n\"Если не прекратить его попытки спасти вам жизнь, он вас убьет.\",\n\"Нашу сущность намного лучше демонстрируют действия, а не возможности.\",\n\"Я маг, а не размахивающий палкой бабуин.\",\n\"Величие порождает зависть, зависть — злобу, злоба — ложь.\",\n\"В мечтах мы попадаем в наш и только наш мир.\",\n\"Я убежден, что истина, как правило, предпочтительнее лжи.\",\n\"Казалось, что рассвет следует за полночью с неприличной поспешностью.\" \n]\nmessage = random.choice(messages)\ndef entt():\n ent = Entry(root)\n ent.grid()\n start_time = time.time()\n def time_of_writing():\n end_time = time.time()\n delta = end_time-start_time\n speed = len(ent.get()) / delta\n lbl_of_speed = Label(root,text=\"You introduced\" +str(len(ent.get()))+\"characters from\"+str(delta)+\"seconds\" )\n lbl_of_speed2 = Label(root,text=\"You introduced\"+str(speed)+\"characters in 1 second\")\n lbl_of_speed3 = Label(root,text=\"You shoulded to introduc string\"+message+\".And you introduced \"+ent.get() )\n lbl_of_speed.grid()\n lbl_of_speed2.grid()\n lbl_of_speed3.grid()\n\n btn = Button(root,text=\"enter\",command=time_of_writing)\n btn.grid()\ndef lbl1():\n lb1 = Label(root,text=\"Проверка скорости набора. Введите следующую фразу. Я засеку время…\")\n lb1.grid()\n root.after(2000,lbl2)\ndef lbl2():\n lb2 = Label(root,text=\"приготовиться\")\n lb2.grid()\n root.after(1000,lbl3)\ndef lbl3():\n lb3 = Label(root,text=\"сосредоточиться...\")\n lb3.grid()\n root.after(1000,lbl4)\ndef lbl4():\n lb4 = Label(root,text=\"начали\")\n lb4.grid()\n root.after(50,lbl5) \ndef lbl5():\n lb5 = Label(root,text=message)\n lb5.grid()\n root.after(50,entt)\n \nlbl1()\n\n","sub_path":"new/src/09.05.2020/time,datetime in Tkinter.py","file_name":"time,datetime in Tkinter.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"528309926","text":"import random\n\n\nclass IntIncrementor(object):\n\n def increment(self, number, column_spec):\n maxStep = column_spec['maxStep']\n positive_weighting = column_spec['positiveWeighting']\n change = random.randint(0, maxStep)\n direction = 1\n\n if (random.random() > positive_weighting):\n direction = -1\n\n change = change * direction\n return number + change\n","sub_path":"data_generator/int_incrementor.py","file_name":"int_incrementor.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"198447626","text":"from openpyxl import load_workbook\nfrom .sheet_managers import (CoursesSheetManager, CandidatesSheetManager,\n DistributionSheetManager)\n\n\nclass ExcelFileManager:\n def __init__(self, path):\n self.path = path\n self.wb = load_workbook(path)\n self._courses = None\n self._candidates = None\n self._distribution = None\n\n @property\n def courses(self):\n if self._courses is None:\n self._courses = CoursesSheetManager(self.wb)\n return self._courses.courses\n\n @property\n def candidates(self):\n if self._candidates is None:\n self._candidates = CandidatesSheetManager(self.wb)\n return self._candidates.candidates\n\n def write_distribution(self, distribution):\n if self._distribution is None:\n self._distribution = DistributionSheetManager(self.wb)\n self._distribution.write_distribution(distribution)\n self.save()\n\n def save(self):\n self.wb.save(self.path)\n","sub_path":"auxiclean/managers/excel_manager.py","file_name":"excel_manager.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"193337415","text":"import time\nimport cv2\nimport pyfiglet \n\n# ASCII Text Art\nprint(pyfiglet.figlet_format(\"Face Detect\", font = \"bulbhead\" ))\nprint(pyfiglet.figlet_format(\"Created By @FaLLenGuY\", font = \"digital\" ))\nprint(\"-\"*70,\"\\n\")\n\npath=input(\"Enter Image path: \")\nimage = cv2.imread(path)\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\nface_cascade = cv2.CascadeClassifier('Haarcascade\\haarcascade_frontalface_default.xml')\n\nfor (x,y,w,h) in face_cascade.detectMultiScale(gray,1.2,6):\n cv2.rectangle(image, (x,y), (x+h, y+h), (0, 255, 0), 2)\n \nprint(\"The number of faces found = \", len(face_cascade.detectMultiScale(gray,1.2,6)))\ncv2.imshow(\"Faces found\", image) \n\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()\n \n","sub_path":"Face Detect.py","file_name":"Face Detect.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"419415003","text":"import cv2\nimport numpy as np\nimport torchvision.transforms as transforms\nimport lmdb\nimport msgpack\nfrom torch.utils.data import Dataset\nfrom PIL import Image\n\n\nclass lmdbDataset(Dataset):\n def __init__(self, location, is_train):\n self.env = lmdb.open(location, subdir=False, max_readers=1, readonly=True, lock=False, readahead=False,\n meminit=False)\n self.txn = self.env.begin(write=False)\n self.length = self.txn.stat()['entries']\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n # train data augment\n if is_train:\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomCrop((224, 224)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n # test data augment\n else:\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n '''\n for key,data in self.txn.cursor():\n now_data = msgpack.loads(data,raw=False)\n data_img = now_data[0]\n label = now_data[1]\n now_arr = np.frombuffer(data_img[b'data'],dtype=np.uint8)\n print(now_arr)\n image_content = cv2.imdecode(now_arr, cv2.IMREAD_COLOR)\n print(image_content.shape)\n\n #print(type(_))\n break\n '''\n def __len__(self):\n return self.length - 1\n\n def __getitem__(self, index):\n new_index = str(index).encode()\n data = self.txn.get(new_index)\n now_data = msgpack.loads(data, raw=False)\n data_img = now_data[0]\n label = now_data[1]\n now_arr = np.frombuffer(data_img[b'data'], dtype=np.uint8)\n image_content = cv2.imdecode(now_arr, cv2.IMREAD_COLOR)\n image_content = cv2.cvtColor(image_content, cv2.COLOR_BGR2RGB)\n image_content = Image.fromarray(image_content)\n image_content = self.transform(image_content)\n return image_content, label\n\n\nif __name__ == '__main__':\n temp_dataset = lmdbDataset('indoor67.lmdb', True)\n print(temp_dataset[0])\n #print(i)\n #assert temp_dataset[i][0] is not None","sub_path":"ResNet50/50/src_code/lmdbdataset.py","file_name":"lmdbdataset.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"335803334","text":"# instead of a program limited to print just the 50 elements,\n# i present a program which asks for the number of terms to be printed.\nn = int(input(\"How many terms? \"))\n# we define the first two terms\nn1, n2 = 1, 1\ncount = 0 # set the initial count to 0\n# we first check if the number of terms is valid\nif n <= 0:\n print(\"Please enter a positive integer\")\nelif n == 1:\n print(\"Fibonacci sequence uptill\",n,\":\")\n print(n1)\nelse:\n print(\"Fibonacci sequence:\")\n while count < n: # count function limits the length of the list to be less than the number of terms entered.\n print(n1)\n nth = n1 + n2\n # now we assign the new updated values to the variable for the recursion to continue.\n n1 = n2\n n2 = nth\n count += 1 # this will keep on increasing the count of the sequence, till the max point of count < n is reached.","sub_path":"pythonEX3/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"189111026","text":"# coding=utf-8\n# Using pretrained vgg for classification\n#\nimport sys, os, glob\nimport numpy as np\nimport tensorflow as tf\nfrom tuned_vgg16 import vgg\nimport matplotlib.pyplot as plt\n#import cv2\n#\nslim = tf.contrib.slim\n\n# Loading tfrecords dataset\nclass data_generator():\n def __init__(self, FLAGS, name):\n self.steps = FLAGS.num_steps\n if name == 'train':\n self.name = 'training'\n self.b_size = FLAGS.train_batch\n self.tf_dir = FLAGS.training_path\n\n elif name == 'test':\n self.name = 'testing'\n self.b_size = FLAGS.test_batch\n self.tf_dir = FLAGS.testing_path\n\n elif name == 'validate':\n self.name = 'validate'\n self.b_size = FLAGS.valid_batch\n self.tf_dir = FLAGS.validate_path\n else:\n print('invalid name, name should be among; train, test, validate')\n\n def data_list_generator(self):\n # creating data list here\n record_iterator = tf.python_io.tf_record_iterator(self.tf_dir)\n data_list = []\n # list if (image, label) tuples\n for string_record in record_iterator:\n example = tf.train.Example()\n example.ParseFromString(string_record)\n height = int(example.features.feature[self.name + '/' +'height'].int64_list.value[0])\n width = int(example.features.feature[self.name + '/' + 'width'].int64_list.value[0])\n img_string = (example.features.feature[self.name + '/' + 'image'].bytes_list.value[0])\n lbl_string = (example.features.feature[self.name + '/' + 'label'].bytes_list.value[0])\n img_1d = np.fromstring(img_string, dtype=np.float32)\n lbl_1d = np.fromstring(lbl_string, dtype=np.float32)\n reconstructed_img = img_1d.reshape((height, width, -1))\n data_list.append((reconstructed_img, lbl_1d))\n return data_list\n\n\n def numpy_batch_generator(self):\n #\n data_list = self.data_list_generator()\n for j in range(self.steps):\n indexes = np.random.randint(0,len(data_list), self.b_size)\n data_img = np.array([data_list[indexes[i]][0] for i in range(len(indexes))])\n data_lbl = np.array([data_list[indexes[i]][1] for i in range(len(indexes))])\n yield data_img, data_lbl\n\nclass tuned_vgg16():\n def __init__(self, FLAGS):\n self.flag = FLAGS\n # preparing training data\n # with graph.as_default():\n def train_and_validate_vgg(self):\n graph = tf.Graph()\n with graph.as_default():\n #\n trn_images = tf.placeholder(dtype=tf.float32, shape=(None, 224, 224,3), name='trn_image')\n trn_labels = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='trn_label')\n #val_x = tf.placeholder(dtype=tf.float32, shape=(None, 224, 224,3), name='val_image')\n val_labels = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='val_label')\n\n # Load the pretrained VGG16 model from slim extract the fully connected layer\n # before the final output layer\n\n with slim.arg_scope(vgg.vgg_arg_scope()):\n logits, end_points = vgg.vgg_16(trn_images, num_classes=1000, is_training=False) # trn_images\n fc_6 = end_points['vgg_16/fc6']\n fc_7 = end_points['vgg_16/fc7']\n\n # Define the only set of weights that we will learn W1 and b1\n\n W1 =tf.Variable(tf.random_normal([4096,1], mean=0.0, stddev=0.02), name='W1')\n b1 = tf.Variable(tf.random_normal([1], mean=0.0, stddev=0.02), name='b1')\n # -training two layers of the prertained vgg\n W2 =tf.Variable(tf.random_normal([4096,self.flag.num_classes], mean=0.0, stddev=0.02), name='W2')\n b2 = tf.Variable(tf.random_normal([self.flag.num_classes], mean=0.0, stddev=0.02), name='b2')\n\n # Reshape the fully connected layer fc_7 and define\n # the logits and probability\n fc_6 = tf.reshape(fc_6, [-1,W1.get_shape().as_list()[0]])\n fc_6 = tf.nn.bias_add(tf.matmul(fc_6,W1),b1)\n #\n fc_7 = tf.reshape(fc_7, [-1, W2.get_shape().as_list()[0]])\n logitx = tf.nn.bias_add(tf.matmul(fc_7,W2),b2)\n softmax_out = tf.nn.softmax(logitx, name='smax_out')\n #\n # Define Cost and Optimizer\n # Only we wish to learn the weights Wn and b and hence included them in var_list\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=softmax_out, labels=trn_labels))\n # regularization ---\n regularizers = (tf.nn.l2_loss(W1) + tf.nn.l2_loss(b1) +\n tf.nn.l2_loss(W2) + tf.nn.l2_loss(b2) )\n loss += 1e-5 * regularizers\n #\n optimizer = tf.train.AdamOptimizer(learning_rate=self.flag.learning_rate).\\\n minimize(loss, var_list=[W1, b1, W2, b2])\n # measuring the accuracy\n with tf.variable_scope('accuracy') as scope:\n predictions = tf.argmax(tf.nn.softmax(softmax_out), 1, name='final_port')\n true_label = tf.argmax(val_labels, 1)\n equality = tf.equal(predictions, true_label)\n # my accuracy\n accuracy = tf.reduce_mean(tf.cast(equality, tf.float32))\n # accuracy with tf metrics\n tfmetrics = tf.metrics.accuracy(true_label, predictions, name=scope)[1]\n #\n # defining saver to keep checkpoints\n saver = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=2)\n # checking for existing meta files\n pre_chkpnt = tf.train.latest_checkpoint(self.flag.chkpnt_path)\n\n with tf.Session(graph=graph) as sess:\n # writing for tensorboard\n tn_board_writer = tf.summary.FileWriter(self.flag.log_path, sess.graph)\n # preparing summaries\n with tf.name_scope(\"summaries\"):\n tf.summary.scalar(\"loss\", loss)\n tf.summary.histogram(\"histogram_loss\",loss)\n tf.summary.scalar(\"accuracy\", accuracy)\n tf.summary.scalar(\"tf_accuracy\", tfmetrics)\n merge_all = tf.summary.merge_all()\n #\n init_op = [tf.global_variables_initializer(), tf.local_variables_initializer()]\n sess.run(init_op)\n #\n # Loading the pre-trained weights for VGG16\n initialize_weights = slim.assign_from_checkpoint_fn(\n os.path.join(self.flag.vgg_ckpt_path),\n slim.get_model_variables('vgg_16'))\n initialize_weights(sess)\n try:\n meta_file = pre_chkpnt.split('/')[-1]\n restorer = tf.train.import_meta_graph(self.flag.chkpnt_path + '/' + meta_file + '.meta')\n restorer.restore(sess, pre_chkpnt)\n print('saved model is loaded to continue training ...')\n\n except:\n print('meta file was not found, training from pre-trained vgg ...')\n\n for epoch in range(self.flag.epochs):\n # Data generator\n data_gen_train = data_generator(self.flag, 'train')\n data_gen_val = data_generator(self.flag, 'validate')\n try:\n for step in range(self.flag.num_steps):\n trn_x, trn_y = next(data_gen_train.numpy_batch_generator())\n val_x, val_y = next(data_gen_val.numpy_batch_generator())\n # sess.run(valid_iterator.initializer)\n val_feed = {trn_images:trn_x, trn_labels: trn_y}\n #\n cost_train, _= sess.run([loss, optimizer], feed_dict=val_feed)\n #measuring the accuracy (using validation data)\n if step % self.flag.showing_step == 0:\n val_feed = {trn_images: val_x, val_labels: val_y, trn_labels: trn_y}\n result_summery, my_acc, tf_acc = sess.run([merge_all, accuracy, tfmetrics], feed_dict=val_feed)\n # saving checkpoints here\n saver.save(sess, self.flag.chkpnt_path + '/vggmt', write_meta_graph=True,\n write_state=True, meta_graph_suffix= 'meta', global_step=step + self.flag.num_steps * epoch)\n #\n print('epoch {}, step {}, train loss: {}'.format(epoch, step, cost_train))\n print('-------- validation accuracy: {}, tf accuracy metic {}'.format(my_acc, tf_acc))\n # val_tensor_x, val_tensor_y = train_iterator.get_next()\n tn_board_writer.add_summary(result_summery, step + self.flag.num_steps * epoch)\n except:\n pass\n # trn_images, trn_labels = train_iterator.get_next()\n # saving at the end of epochs\n saver.save(sess, self.flag.chkpnt_path + '/vggmt', write_meta_graph=True, write_state=True,\n meta_graph_suffix= 'meta', global_step=step + self.flag.num_steps * epoch)\n\n os.system('tensorboard --logdir='+self.flag.log_path)\n\n # representation of test results\n\n def representer(self, img, treu_label, pred_label):\n #\n true_name = 'sushi' if int(np.argmax(treu_label))==0 else 'sandwich'\n # feeding data to trained network\n l_name = 'sushi' if np.argmax(pred_label[0])==0 else 'sandwich' # I just edited this part (silly mistake!) in June 26\n print('Predicted probability {}, Predicted label {},True name: {}'\n .format(np.max(pred_label[0][0]), l_name, true_name))\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n plt.title('Classified Image')\n color = 'g' if l_name==true_name else 'red'\n plt.text(-0, -10, r'Predic: ' + l_name, fontsize=10, color = color)\n plt.text(170, -10, r'True: ' + true_name, fontsize=10, color = 'g')\n plt.imshow(np.squeeze(img, axis=0))\n plt.show()\n\n def test_vgg(self):\n prediction_graph = tf.Graph()\n with prediction_graph.as_default():\n # loading network\n pre_chkpnt = tf.train.latest_checkpoint(self.flag.chkpnt_path)\n #\n with tf.Session(graph=prediction_graph) as sess:\n #\n meta_file = pre_chkpnt.split('/')[-1]\n restorer = tf.train.import_meta_graph(self.flag.chkpnt_path + '/' + meta_file + '.meta')\n restorer.restore(sess, pre_chkpnt)\n img_in = prediction_graph.get_tensor_by_name('trn_image:0')\n y_smax = prediction_graph.get_tensor_by_name('smax_out:0')\n y_out = prediction_graph.get_tensor_by_name('accuracy/final_port:0') # final_port\n uninitialized_vars = []\n for var in tf.all_variables():\n try:\n sess.run(var)\n except tf.errors.FailedPreconditionError:\n uninitialized_vars.append(var)\n\n init_new_vars_op = tf.initialize_variables(uninitialized_vars)\n sess.run(init_new_vars_op)\n\n for test in range(self.flag.num_tests):\n data_gen = data_generator(self.flag, 'test') # second option is to select dataset\n img, label = next(data_gen.numpy_batch_generator())\n\n output, out_smax = sess.run([y_out, y_smax], feed_dict={img_in : img })\n self.representer(img, label, out_smax)\n\n\n\n\n\n\n\n\n\n","sub_path":"Classification_with_transfer_learning_VGG/tuned_vgg16/Finetunning_vgg16.py","file_name":"Finetunning_vgg16.py","file_ext":"py","file_size_in_byte":12165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"530900494","text":"import argparse\nfrom baselines import bench, logger\nfrom baselines.common.misc_util import (\n set_global_seeds,\n boolean_flag,\n)\n\ndef test(env_id, num_timesteps, seed, restore_dir, render_eval):\n from baselines.common import set_global_seeds\n # from baselines.common.vec_env.vec_normalize import VecNormalize\n from baselines.ppo2 import testing\n from baselines.ppo2.policies import MlpPolicy\n import gym\n import tensorflow as tf\n # from baselines.common.vec_env.dummy_vec_env import DummyVecEnv\n ncpu = 1\n config = tf.ConfigProto(allow_soft_placement=True,\n intra_op_parallelism_threads=ncpu,\n inter_op_parallelism_threads=ncpu)\n tf.Session(config=config).__enter__()\n def make_env():\n env = gym.make(env_id)\n # env = bench.Monitor(env, logger.get_dir())\n return env\n\n env = make_env()\n \n set_global_seeds(seed)\n policy = MlpPolicy\n testing.test(policy=policy, env=env, nsteps=2048, nminibatches=32,\n lam=0.95, gamma=0.99, noptepochs=10,\n ent_coef=0.0,\n lr=3e-4,\n cliprange=0.2,\n total_timesteps=num_timesteps,\n restore_dir=restore_dir, render_eval = render_eval)\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--env', help='environment ID', default='Hopper-v1')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(1e6))\n parser.add_argument('--restore-dir', type=str, default='/home/arpit/new_RL3/baseline_results/Hopper-v1/run4')\n boolean_flag(parser, 'render-eval', default=True)\n args = parser.parse_args()\n # logger.configure()\n test(args.env, num_timesteps=args.num_timesteps, seed=args.seed, restore_dir=args.restore_dir, render_eval = args.render_eval)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print(\"Exiting!\")\n\n","sub_path":"baselines/ppo2/test_mujoco.py","file_name":"test_mujoco.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"261898624","text":"#!/slowfs/dcopt105/vasquez/cnda/Conda/bin/python\nimport os, re\nimport subprocess\nimport pprint\npp = pprint.PrettyPrinter(indent = 1, depth= 3)\n\ndef get_full_stat(farm ,job_num):\n \n farms = {\n 'gala' : 'source /remote/sge/default/galapagos/common/settings.csh; set qstat_real = qstat',\n 'snps' : 'source /remote/sge/default/snps/common/settings.csh; set qstat_real = qstat',\n }\n\n cmd = '%s; qstat -j %s'%(farms[farm],job_num)\n cmd_obj = subprocess.run(cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n cmd_ret = cmd_obj.stdout.decode(\"utf-8\").splitlines()\n\n stat = {}\n for line in cmd_ret:\n line = line.strip().split(':',1)\n\n if len(line) == 2:\n key = line[0].split()[0]\n \n stat[key] = str(line[1]).strip()\n \n return stat\n\ndef move_job(new_user, job_num, farm, debug):\n\n farms = {\n 'gala' : 'source /remote/sge/default/galapagos/common/settings.csh; set qstat_real = qstat',\n 'snps' : 'source /remote/sge/default/snps/common/settings.csh; set qstat_real = qstat',\n }\n\n old_stat = get_full_stat('gala',job_num)\n\n if 'job_state' not in old_stat.keys(): old_stat['job_state'] = '' \n\n if old_stat['job_state'] == '' or old_stat['job_state'] == 'Rq' or old_stat['job_state'] == 'qw':\n try:\n old_user = old_stat['owner']\n job_dir = old_stat['cwd']\n submit_line = old_stat['submit_cmd']\n\n design = job_dir.split('/')[-1]\n\n kill_old_cmd = 'rsh -l %s localhost \\\"%s; qdel %s; chmod 777 %s; chmod 777 %s/*;rm -rf %s/%s.grd.out;\\\"'%(old_user,farms[farm],job_num, job_dir, job_dir, job_dir,design)\n print(kill_old_cmd)\n if not debug:\n cmd_obj = subprocess.run(kill_old_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n \n #cmd_obj = subprocess.run(kill_old_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n #cmd_ret = cmd_obj.stdout.decode(\"utf-8\").splitlines()\n\n re_sub_cmd = 'rsh -l %s localhost \\\"%s; cd %s; %s \\\"'%(new_user,farms[farm],job_dir,submit_line)\n print(re_sub_cmd)\n if not debug:\n cmd_obj = subprocess.run(re_sub_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n cmd_ret = cmd_obj.stdout.decode(\"utf-8\").splitlines()\n\n new_job = ''\n\n for line in cmd_ret:\n line = line.strip().split()\n\n if len(line) >= 3:\n if (line[0]=='Your') and (line[1] == 'job'):\n new_job = line[2]\n\n if new_job != None:\n new_grd_cmd = 'rsh -l %s localhost \\\"echo \\'%s\\'| tee %s/%s.grd.out\\\"'%(new_user,new_job,job_dir,design)\n try:\n os.system(new_grd_cmd)\n except:\n print('%s didnt work'%new_grd_cmd)\n except:\n pass\n \n else:\n print('%s is not queued. jobstate: %s.'%(job_num, old_stat['job_state'])) \n\n # Your job 832003 (\"SRM_ICC2_timing_dcshell%dcp570_b33\") has been submitted\n\ndef kill_job(farm,job_num):\n\n farms = {\n 'gala' : 'source /remote/sge/default/galapagos/common/settings.csh; set qstat_real = qstat',\n 'snps' : 'source /remote/sge/default/snps/common/settings.csh; set qstat_real = qstat',\n }\n\n job_info = get_full_stat(farm, job_num)\n\n if job_info != {}:\n kill_cmd = 'rsh -l %s localhost \\\"%s; qdel %s \\\"'%(job_info['owner'], farms[farm], job_num)\n print(kill_cmd)\n cmd_obj = subprocess.run(kill_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n\n else:\n print('%s not exists.'%job_num) \n\ndef get_user_groups(user):\n \n groups_cmd = 'rsh -l %s localhost \\\"groups\\\"'%user\n\n cmd_obj = subprocess.run(groups_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n cmd_ret = cmd_obj.stdout.decode(\"utf-8\").split()\n\n # print(user, cmd_ret)\n return cmd_ret\n\ndef get_dir_group(user,dir):\n # needs to be consulted with the right user\n cmd = 'rsh -l %s localhost \\\"ll -d %s\\\"'%(user,dir)\n try :\n cmd_obj = subprocess.run(cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n cmd_ret = cmd_obj.stdout.decode(\"utf-8\").split()\n \n return cmd_ret[3]\n except:\n print('dir %s not found with user %s'%(dir, user))\n return ''\n \ndef get_user_load(farm, user):\n \n farms = {\n 'gala' : 'source /remote/sge/default/galapagos/common/settings.csh; set qstat_real = qstat',\n 'snps' : 'source /remote/sge/default/snps/common/settings.csh; set qstat_real = qstat',\n }\n\n cmd = '%s; qstat -u %s | wc'%(farms[farm],user)\n\n try :\n cmd_obj = subprocess.run(cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n cmd_ret = cmd_obj.stdout.decode(\"utf-8\").strip().split()\n \n return int(cmd_ret[0].strip()) - 2\n except:\n print('dir %s not found with user %s'%(dir, user))\n return ''\n\ndef change_conf(job_num, farm, config_file):\n\n farms = {\n 'gala' : 'source /remote/sge/default/galapagos/common/settings.csh; set qstat_real = qstat',\n 'snps' : 'source /remote/sge/default/snps/common/settings.csh; set qstat_real = qstat',\n }\n\n new_conf = open(config_file, 'r').read().splitlines()\n\n ptrn_cfg = 'des\\.(\\w+)\\.grd\\.opts:\\s*(.+)'\n\n des_conf = {}\n\n for line in new_conf:\n \n ptrn_cfg = 'des\\.(\\w+)\\.grd\\.opts:\\s*(.+)'\n \n m = re.match(ptrn_cfg, line)\n\n if m:\n des_conf[m.group(1)] = m.group(2)\n\n old_stat = get_full_stat('gala',job_num)\n \n\n if 'job_state' not in old_stat.keys(): old_stat['job_state'] = '' \n\n print(old_stat['job_state'])\n\n if old_stat['job_state'] == '' or old_stat['job_state'] == 'Rq' or old_stat['job_state'] == 'qw':\n try:\n old_user = old_stat['owner']\n new_user = old_user\n job_dir = old_stat['cwd']\n submit_line = old_stat['submit_cmd']\n \n design = old_stat['job_name'].split('%')[1] if '%' in old_stat['job_name'] else ''\n\n if design in des_conf:\n print('OLD: ', submit_line)\n ## lookng for the hconfig\n ptrn = '.+(-l\\s*hconfig=\\w+).+'\n m = re.match(ptrn, submit_line)\n\n if m:\n submit_line = submit_line.replace(m.group(1),des_conf[design])\n submit_line = submit_line.replace('l arch=glinux,os_bit=64', '')\n\n print('NEW: ', submit_line)\n\n kill_old_cmd = 'rsh -l %s localhost \\\"%s; qdel %s; chmod 777 %s; chmod 777 %s/*;rm -rf %s/%s.grd.out;\\\"'%(old_user,farms[farm],job_num, job_dir, job_dir, job_dir,design)\n print(kill_old_cmd)\n cmd_obj = subprocess.run(kill_old_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n \n #cmd_obj = subprocess.run(kill_old_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n #cmd_ret = cmd_obj.stdout.decode(\"utf-8\").splitlines()\n\n re_sub_cmd = 'rsh -l %s localhost \\\"%s; cd %s; %s \\\"'%(new_user,farms[farm],job_dir,submit_line)\n print(re_sub_cmd)\n\n cmd_obj = subprocess.run(re_sub_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n cmd_ret = cmd_obj.stdout.decode(\"utf-8\").splitlines()\n\n new_job = ''\n\n design = job_dir.split('/')[-1]\n\n for line in cmd_ret:\n line = line.strip().split()\n\n if len(line) >= 3:\n if (line[0]=='Your') and (line[1] == 'job'):\n new_job = line[2]\n\n if new_job != None:\n new_grd_cmd = 'rsh -l %s localhost \\\"echo \\'%s\\'| tee %s/%s.grd.out\\\"'%(new_user,new_job,job_dir,design)\n try:\n os.system(new_grd_cmd)\n except:\n print('%s didnt work'%new_grd_cmd)\n\n else:\n print('No hconfig in submission command for %s %s'%(job_num,design))\n\n else: \n print('No config found for %s'%design)\n\n\n # kill_old_cmd = 'rsh -l %s localhost \\\"%s; qdel %s; chmod 777 %s; chmod 777 %s/*;rm -rf %s/%s.grd.out;\\\"'%(old_user,farms[farm],job_num, job_dir, job_dir, job_dir,design)\n # print(kill_old_cmd)\n # cmd_obj = subprocess.run(kill_old_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n \n # #cmd_obj = subprocess.run(kill_old_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n # #cmd_ret = cmd_obj.stdout.decode(\"utf-8\").splitlines()\n\n # re_sub_cmd = 'rsh -l %s localhost \\\"%s; cd %s; %s \\\"'%(new_user,farms[farm],job_dir,submit_line)\n # print(re_sub_cmd)\n\n # cmd_obj = subprocess.run(re_sub_cmd, executable= '/bin/csh',shell = True,stdout=subprocess.PIPE)\n # cmd_ret = cmd_obj.stdout.decode(\"utf-8\").splitlines()\n\n # new_job = ''\n\n # design = job_dir.split('/')[-1]\n\n # for line in cmd_ret:\n # line = line.strip().split()\n\n # if len(line) >= 3:\n # if (line[0]=='Your') and (line[1] == 'job'):\n # new_job = line[2]\n\n # if new_job != None:\n # new_grd_cmd = 'rsh -l %s localhost \\\"echo \\'%s\\'| tee %s/%s.grd.out\\\"'%(new_user,new_job,job_dir,design)\n # try:\n # os.system(new_grd_cmd)\n # except:\n # print('%s didnt work'%new_grd_cmd)\n except:\n pass\n\n else:\n print('%s is not queued.'%job_num) \n\n # Your job 832003 (\"SRM_ICC2_timing_dcshell%dcp570_b33\") has been submitted\n\n#####################################################################\n\n#user_ls = 'vasquez rmorale chunwang'.split()\n\n#user_dict = {}\n\n#for user in user_ls:\n # user_dict[user] = {}\n # user_dict[user]['groups'] = get_user_groups(user)\n # user_dict[user]['load'] = get_user_load('gala',user)\n\n#pp.pprint(user_dict)\n#exit()\n#job_list = '9569729 9569730 9569731 9569733 9569734 9569737 9569738 9569741 9569744 9569751 9569752 9569754 9569755 9569756 9569758 9833159 9833171 9833176 9833180 2223469 2223470 2223471 2223472 2223473 2223474 2223475 2223476 2223477 2223478 2223479 2223480 2223481 2223482 2223483 2223484 2223485 2223486 2223487 2223488 2223489 2223490 2223491 2223492 2223493 2223494 2223495 2223496 2223497 2223499 2223501 2223502 2223503 2223504 2223507 2223508 2223509 2223510 2223511 2223512 2223513 2223514 2223516 2223517 2223518 2223521 2223522 2223523 2223525 2223526 2223527 2223528 2223531 2223532 2223534 2223535 2223536 2223537 2223538 2223540 2223543 2223544 2223545 2223546 2223548 2223549'.split()\n'''\njob_list = '4045238'.strip().split()\n\n# config = '/remote/pv/repo/dcnt/dcrt_prs_lib/gala_memusage_4c.cfg'\n\n# print('dealing with %s jobs'%str(len(job_list_1)))\n\nfor job in job_list:\n kill_job('gala', job)\n\n'''\n# # print(get_full_stat('snps', job)['submit_cmd'])\n# #move_job(new_user,job,'gala', False)\n# # initial checks before move\n# this_job= get_full_stat('gala', job)\n \n# if this_job != {}:\n# j_owner = this_job['owner']\n# j_cwd = this_job['cwd']\n# dir_group = get_dir_group(j_owner, j_cwd)\n \n\n# if dir_group != '':\n# for user in user_dict:\n# if dir_group in user_dict[user]['groups']:\n# print('%s runnable by %s'%(job, user))\n \n# move_job(user,job,'gala', False)\n \n \n# break\n# else:\n# print('%s not runnable by %s'%(job, user))\n\n# else:\n# print('job %s not found'%job)\n\n \n # print(get_dir_group(j_owner, j_cwd))\n\n'''\nrsh -l chunwang localhost \"source /remote/sge/default/galapagos/common/settings.csh; set qstat_real = qstat; cd /remote/platform_pv1/Secure_Data_Run/pv_armip/FAST/chunwang/slowfs/pv_scratch18/24x7/dc/P-2019.03-SP/nightly_prs/DC_ICC2_ex/D20191104_20_30/run.24x7_gala-icc2_ex.chunwang/SRMFm_ICC2_spg_opt_area/A57_Non_CPU; qsub -N SRMFm_ICC2_spg_opt_area%A57_Non_CPU -cwd -j y -l arch=glinux,os_bit=64 -ac psp -js 150 -v preexec=./*.cmd0.csh -l 'minslotcpu=4' -l 'minslotmem=2G' -l arch=glinux ./A57_Non_CPU.all.csh\n'''\n# new_user = 'rmorale'\n\n# job_list = ''' 87536 87540 87546 87550 87555 87559 87563 87569 87580 87585 87126 87127 87128 87129 87131 87132 87133 87134 87135 87136 87137 87138 87140 87141 87142 87143 87144 87146 87147 87148 87150 87151 87152 87153 87154 87155 87156 87157 87159 87160 87161 87162 87163 87164 87165 87166 87167 87168 87169 87170 87172 87174 87175 87176 87177 87178 87179 87180 87181 87183 87184 87185 87186 87187 87188 87189 87191 87192 87193 87194 87196 87197 87198 87200 87201 87202 87203 87204 87206 87207 87208 87209 87210 87211 87212 87214 87215 87216 87217 87218 87219 87220 87221 87223 87224 87225 87226 87227 87228 87229 87230 87231 87232 87233 87234 87235 87236 87237 87238 87240 87241 87242 87244 87245 87246 87247 87248 87250 87251 87252 87253 87254 87255 87256 87258 87259 87260 87261 87263 87264 87265 87266 87267 87268 87270 87271 87272 87273'''.split()\n\n# for job in job_list:\n# move_job(new_user,job,'gala')\n# '''\n# rsh -l estebanv localhost \"source /remote/sge/default/galapagos/common/settings.csh; set qstat_real = qstat; qdel 87125\"\n# rsh -l rmorale localhost \"source /remote/sge/default/galapagos/common/settings.csh; set qstat_real = qstat; cd /slowfs/pv_scratch18/24x7/dc/P-2019.03-SP/nightly_prs/DC_ICC2/D20191007_20_30/run.24x7_gala-icc2.estebanv/SRM_ICC2_timing_dcshell/dcp570_b33; qsub -N SRM_ICC2_timing_dcshell%dcp570_b33 -cwd -j y -l arch=glinux,os_bit=64 -ac psp -l hconfig=bwl24d2a4 -l arch=glinux ./dcp570_b33.all.csh \"\n# '''\n#print(get_full_stat('gala',job_num)['cwd'])\n","sub_path":"grid_utils/balancer.py","file_name":"balancer.py","file_ext":"py","file_size_in_byte":14578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"128121862","text":"\nimport openpyxl\n \n# 엑셀파일 열기\n# 엑셀 파일을 오픈하기 위해 openpyxl.load_workbook(엑셀파일명) 함수를 호출하여 Workbook 객체를 얻는다\nwb = openpyxl.load_workbook('scores.xlsx')\n \n# 현재 Active Sheet 얻기\nws = wb.active\n# ws = wb.get_sheet_by_name(\"Sheet1\")\n \n# 국영수 점수를 읽기\n# 각 행을 하나씩 가져오기 위해 for반복문으로 ws.rows로부터 한 row씩 가져와서 반복\n# 각 row는 그 행 안에 있는 cell 들의 집합으로 처음 cell은 r[0]과 같이 인덱스 0을 사용합니다. \n# 첫 cell 즉 r[0]의 값을 리턴하기 위해 r[0].value 을 사용합니다.\nfor r in ws.rows:\n \n row_index = r[0].row # 행 인덱스\n kor = r[1].value\n eng = r[2].value\n math = r[3].value\n sum = kor + eng + math\n \n # 합계 쓰기\n ws.cell(row=row_index, column=5).value = sum\n \n # 국,영,수,총점 출력\n print(kor, eng, math, sum)\n \n# 엑셀 파일 저장\nwb.save(\"scores_result.xlsx\")\nwb.close()","sub_path":"PythonPackages/src/openpyxl/02/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"579751503","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass TfvcVersionDescriptor(Model):\n \"\"\"TfvcVersionDescriptor.\n\n :param version:\n :type version: str\n :param version_option:\n :type version_option: object\n :param version_type:\n :type version_type: object\n \"\"\"\n\n _attribute_map = {\n 'version': {'key': 'version', 'type': 'str'},\n 'version_option': {'key': 'versionOption', 'type': 'object'},\n 'version_type': {'key': 'versionType', 'type': 'object'}\n }\n\n def __init__(self, version=None, version_option=None, version_type=None):\n super(TfvcVersionDescriptor, self).__init__()\n self.version = version\n self.version_option = version_option\n self.version_type = version_type\n","sub_path":"vsts/vsts/tfvc/v4_0/models/tfvc_version_descriptor.py","file_name":"tfvc_version_descriptor.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"120343086","text":"from graphviz import Digraph\r\nimport torch\r\nfrom torch.autograd import Variable\r\n\r\n# Acknowledgements: https://github.com/szagoruyko/functional-zoo/blob/master/resnet-18-export.ipynb\r\n\r\n# I need to constantly define the path environment for the GraphViz application. (I tried setting it in environment variablees, however it doesn't work)\r\n# Hence, set the path environment variable as:\r\nimport os\r\nos.environ[\"PATH\"] += os.pathsep + r'C:\\Program Files (x86)\\Graphviz2.38\\bin'\r\n\r\n# The function for making the actual graph:\r\ndef make_dot(var, params=None):\r\n \"\"\" \r\n Blue nodes are the Variables that require grad, grey are Tensors\r\n saved for backward in torch.autograd.Function i.e. this is where the gradient will be backpropagated\r\n Args:\r\n var: output Variable\r\n params: model.state_dict()\r\n \"\"\"\r\n if params is not None:\r\n #assert all(isinstance(p, Variable) for p in params.values()) \r\n param_map = {id(v): k for k, v in params.items()}\r\n\r\n\r\n node_attr = dict(style='filled',\r\n shape='box',\r\n align='left',\r\n fontsize='12',\r\n ranksep='0.1',\r\n height='0.2')\r\n dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\"))\r\n seen = set()\r\n\r\n def size_to_str(size):\r\n return '('+(', ').join(['%d' % v for v in size])+')'\r\n\r\n def add_nodes(var):\r\n if var not in seen:\r\n if torch.is_tensor(var):\r\n dot.node(str(id(var)), size_to_str(var.size()), fillcolor='grey')\r\n elif hasattr(var, 'variable'):\r\n u = var.variable\r\n #name = param_map[id(u)] if params is not None else ''\r\n #node_name = '%s\\n %s' % (name, size_to_str(u.size()))\r\n node_name = '%s\\n %s' % (param_map.get(id(u.data)), size_to_str(u.size()))\r\n dot.node(str(id(var)), node_name, fillcolor='lightblue')\r\n \r\n else:\r\n dot.node(str(id(var)), str(type(var).__name__))\r\n seen.add(var)\r\n if hasattr(var, 'next_functions'):\r\n for u in var.next_functions:\r\n if u[0] is not None:\r\n dot.edge(str(id(u[0])), str(id(var)))\r\n add_nodes(u[0])\r\n if hasattr(var, 'saved_tensors'):\r\n for t in var.saved_tensors:\r\n dot.edge(str(id(t)), str(id(var)))\r\n add_nodes(t)\r\n add_nodes(var.grad_fn)\r\n return dot\r\n# -------------------------------- Example use of the function ----------------------------------------\r\n# Example Use:\r\n\r\nfrom torchvision import models\r\n\r\ntorch.manual_seed(1)\r\ninputs = torch.randn(1,3,224,224)\r\nmodel = models.alexnet(pretrained=False)\r\ny = model(Variable(inputs))\r\nprint(y)\r\n\r\n\r\ng = make_dot(y, params=model.state_dict())\r\n#g.view()\r\ng\r\n","sub_path":"visualise_model_graph.py","file_name":"visualise_model_graph.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"445868167","text":"from ast import Str\nimport sys\nimport os\nfrom datetime import datetime\n\ntags = ['mobyy/navs','resource/biz','mobyy/nav/index/idx']\nappLogs = []\nepoch = datetime.utcfromtimestamp(0)\n\nclass UrlRunTime:\n start = epoch\n complete = epoch\n traceId = \"\"\n urltext = \"\"\n succeed = False\n\nclass AppRun:\n appRunDate = epoch\n urlList = []\n\ndef timeDiff(dt1, dt2):\n return (dt1 - dt2).total_seconds()\n\ndef urlFromLine(l):\n for x in tags:\n if x in l:\n return x;\n return ''\n\ndef timeFromLine(lw):\n ls = lw.split()\n if len(ls) <= 2:\n return epoch\n \n ts = ls[0] + ' ' + ls[1]\n\n a = ts.split('.')\n if len(a) > 1:\n if len(a[1]) == 1:\n ts = a[0]+'.00'+a[1]\n elif len(a[1]) == 2:\n ts = a[0]+'.0'+a[1]\n\n if len(ts) > 24:\n return datetime.utcfromtimestamp(100000)\n\n t = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f')\n return t\n\ndef findUrl(id, list):\n for x in list:\n if x.traceId == id:\n return x\n return None\n\ndef runDataFromLine(appRun, l):\n lw = l.lower()\n i = lw.find(\"traceid:\")\n if i < 0:\n return None\n i += len(\"traceid:\")\n id = lw[i:len(lw)].strip('\\n').strip('\\r').strip()\n\n r = findUrl(id, appRun.urlList)\n if r == None:\n r = UrlRunTime()\n appRun.urlList.append(r)\n\n r.traceId = id\n\n t = timeFromLine(lw)\n if t == epoch:\n return None\n\n if r.start == epoch:\n r.start = t\n else:\n r.complete = t\n \n urlText = urlFromLine(l)\n if len(r.urltext) == 0 and len(urlText) > 0:\n r.urltext = urlText\n\n if 'Request Success!!' in l:\n r.succeed = True\n return r\n\ndef appRunTimeFromLine(l):\n if 'app started' in l:\n return datetime.utcfromtimestamp(1)\n return epoch\n\ndef loadLog(path):\n print(\"loading \" + path)\n file = open(path, 'r')\n lines = file.readlines()\n\n appRun = AppRun()\n for l in lines:\n t = appRunTimeFromLine(l)\n if t != epoch:\n run = AppRun()\n run.appRunDate = t\n appLogs.append(run)\n run.urlList = []\n appRun = run\n elif appRun.appRunDate != epoch:\n for x in tags:\n if x in l:\n runDataFromLine(appRun, l)\n break\n\n\n\n print(\"app run times:\" + str(len(appLogs)))\n resultPath = path + \".csv\"\n \n f = open(resultPath, \"w\")\n f.write(\"apprun, Url,TraceId,start,complete,Duration,Result\\n\")\n index = 0\n for log in appLogs:\n index = index + 1\n for r in log.urlList:\n f.write(str(index)+\",\"+r.urltext + \",\" + r.traceId + \",\" + str(r.start) + \",\" + str(r.complete) +\",\" + str(timeDiff(r.complete, r.start)) + \",\" + str(r.succeed) + os.linesep)\n f.close()\n print(\"output: \" + resultPath)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n loadLog(os.path.abspath(sys.argv[1]))\n else :\n print(\"python analysis.py logfile_path\")","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"565751503","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.core.urlresolvers import reverse\nfrom datetime import datetime, timedelta\nfrom django.views import generic\n \nfrom myblog import models as m\nfrom myblog.forms import PostForm\n\ndef post_upload(request):\n if request.method == 'GET':\n return render(request, 'myblog/upload.html', {})\n elif request.method == 'POST':\n post = m.Post.objects.create(content=request.POST['content'],\n created_at=datetime.utcnow())\n # No need to call post.save() at this point -- it's already saved.\n return HttpResponseRedirect(reverse('myblog:post_detail', kwargs={'post_id': post.id}))\n\nclass PostIndexView(generic.ListView):\n template_name = 'myblog/index.html'\n context_object_name = 'post_list'\n model = m.Post\n \n def get_queryset(self):\n ''' Return posts that are created less than two days ago. '''\n two_days_ago = datetime.utcnow() - timedelta(days=2)\n return m.Post.objects.filter(created_at__gt=two_days_ago).all()\n\nclass PostDetailView(generic.DetailView):\n template_name = 'myblog/detail.html'\n model = m.Post\n \ndef index(request):\n two_days_ago = datetime.utcnow() - timedelta(days=2)\n \n # Retrieve a list of posts that are created less than two days ago\n recent_posts = m.Post.objects.filter(created_at__gt=two_days_ago).all()\n context = {'post_list':recent_posts}\n return render(request, 'myblog/index.html', context)\n\n# post_detail accepts two arguments: the normal request object and an integer\n# whose value is mapped by post_id defined in r'^post/(?P\\d+)/detail.html$'\ndef post_detail(request, post_id):\n try:\n post = m.Post.objects.get(pk=post_id)\n except m.Post.DoesNotExist:\n raise Http404(\"Post does not Exist\")\n return render(request, 'myblog/detail.html', {'post': post})\n\n# Add the following function to the end of myblog/views.py\n\ndef post_form_upload(request):\n if request.method == 'GET':\n form = PostForm()\n else:\n # A POST request: Handle Form Upload\n form = PostForm(request.POST) # Bind data from request.POST into a PostForm\n \n # If data is valid, proceeds to create a new post and redirect the user\n if form.is_valid():\n content = form.cleaned_data['content']\n created_at = form.cleaned_data['created_at']\n post = m.Post.objects.create(content=content,\n created_at=created_at)\n return HttpResponseRedirect(reverse('myblog:post_detail',\n kwargs={'post_id': post.id}))\n \n return render(request, 'myblog/post_form_upload.html', {\n 'form': form,\n })\n","sub_path":"myblog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"506729432","text":"from django import forms\n\nfrom apps.reader.models.loan_model import Loan\nfrom apps.book.models.book_model import Book\n\n\nclass LoanForm(forms.ModelForm):\n\n class Meta:\n model = Loan\n fields = ('reader', 'book')\n\n\nclass MultipleLoanForm(forms.ModelForm):\n\n books = forms.ModelMultipleChoiceField(\n queryset=None,\n required=True,\n widget=forms.CheckboxSelectMultiple,\n )\n\n class Meta:\n model = Loan\n fields = ('reader',)\n\n def __init__(self, *args, **kwargs):\n super(MultipleLoanForm, self).__init__(*args, **kwargs)\n self.fields['books'].queryset = Book.objects.all()\n","sub_path":"apps/reader/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"26845901","text":"from torch.autograd import Variable\nimport torch\nfrom torch import nn\nimport numpy as np\n\n\nclass DeformConv2D(nn.Module):\n def __init__(self, inc, outc, kernel_size=3, padding=1):\n super(DeformConv2D, self).__init__()\n self.kernel_size = kernel_size\n self.N = kernel_size**2\n self.padding = padding\n # Note: As illustrated in the paper, conv_offset's weights should be initialed with 0.\n self.conv_offset = nn.Conv2d(inc, 2*self.N, kernel_size=kernel_size, padding=padding)\n self.conv_kernel = nn.Conv2d(inc, outc, kernel_size=kernel_size, stride=kernel_size)\n\n def forward(self, x):\n # (b, 2N, h, w)\n offset = self.conv_offset(x)\n dtype = offset.data.type()\n b, c, h, w = x.size()\n ks = self.kernel_size\n N = self.N\n zero = Variable(torch.FloatTensor([0]).type_as(offset.data), requires_grad=True)\n\n # (1, 2N, 1, 1)\n p_n = self._get_p_n(dtype)\n\n # (1, 2N, h, w)\n p_0 = self._get_p_0(x.size(), dtype)\n\n p = (p_0 + p_n + offset).unsqueeze(dim=-1).unsqueeze(dim=-1)\n # (b, 2N, h, w, 1, 1)\n p = p.expand(-1, -1, -1, -1, 1, 1)\n\n # (h, w)\n q = self._get_q(x.size(), dtype)\n\n # (b, N, h, w, h, w)\n G = torch.max((1-torch.abs(p[:, :N, :, :, :, :] - q[0, :, :])), zero)\\\n * torch.max((1-torch.abs(p[:, N:, :, :, :, :] - q[1, :, :])), zero)\n # (b, N*h*w, h*w)\n G = G.contiguous().view(b, N*h*w, -1)\n # (b, h*w, c)\n x = x.permute(0, 2, 3, 1).contiguous().view(b, -1, c)\n # (b, c, h, w, N)\n x_offset = torch.bmm(G, x).contiguous().view(b, N, h, w, c).permute(0, 4, 2, 3, 1)\n # (b, c, h*kernel_size, w*kernel_size)\n x_offset = self._reshape_x_offset(x_offset, ks)\n out = self.conv_kernel(x_offset)\n\n return out\n\n def _get_p_n(self, dtype):\n p_n_x, p_n_y = np.meshgrid(range(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1),\n range(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1), indexing='ij')\n # (2N, 1), order[x1, x2, ..., y1, y2, ...]\n p_n = np.concatenate((p_n_x.flatten(), p_n_y.flatten()))\n p_n = np.reshape(p_n, (1, 2*self.N, 1, 1)) # .repeat(b, axis=0).repeat(h, axis=2).repeat(w, axis=3)\n p_n = Variable(torch.from_numpy(p_n).type(dtype), requires_grad=False)\n\n return p_n\n\n def _get_p_0(self, x_size, dtype):\n b, c, h, w = x_size\n p_0_x, p_0_y = np.meshgrid(range(0, h), range(0, w), indexing='ij')\n p_0_x = p_0_x.flatten().reshape(1, 1, h, w).repeat(self.N, axis=1)\n p_0_y = p_0_y.flatten().reshape(1, 1, h, w).repeat(self.N, axis=1)\n p_0 = np.concatenate((p_0_x, p_0_y), axis=1)\n p_0 = Variable(torch.from_numpy(p_0).type(dtype), requires_grad=False)\n\n return p_0\n\n def _get_q(self, x_size, dtype):\n b, c, h, w = x_size\n q_x, q_y = np.meshgrid(range(0, h), range(0, w), indexing='ij')\n q_x = q_x.flatten().reshape(1, h, w)\n q_y = q_y.flatten().reshape(1, h, w)\n q = np.concatenate((q_x, q_y))\n q = Variable(torch.from_numpy(q).type(dtype), requires_grad=False)\n\n return q\n\n def _reshape_x_offset(self, x_offset, ks):\n b, c, h, w, N = x_offset.size()\n x_offset = torch.cat([x_offset[..., s:s+ks].contiguous().view(b, c, h, w*ks) for s in range(0, N, ks)], dim=-1)\n x_offset = x_offset.contiguous().view(b, c, h*ks, w*ks)\n\n return x_offset\n","sub_path":"deform_conv.py","file_name":"deform_conv.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"454577894","text":"from WolfSheep import Wolf, Sheep, WolfSheepPredation\nfrom mesa.visualization.CanvasServer import CanvasServer\n\n\ndef wolf_sheep_portrayal(agent):\n if agent is None:\n return\n\n portrayal = {\"Shape\": \"circle\",\n \"x\": agent.x, \"y\": agent.y,\n \"Filled\": \"true\"}\n\n if type(agent) is Sheep:\n portrayal[\"Color\"] = \"#666666\"\n portrayal[\"r\"] = 0.8\n portrayal[\"Layer\"] = 0\n\n elif type(agent) is Wolf:\n portrayal[\"Color\"] = \"#AA0000\"\n portrayal[\"r\"] = 0.5\n portrayal[\"Layer\"] = 1\n return portrayal\n\nserver = CanvasServer(WolfSheepPredation, wolf_sheep_portrayal, 500, 500,\n \"WolfSheep\")\nserver.launch()\n","sub_path":"examples/WolfSheep/WolfSheepServer.py","file_name":"WolfSheepServer.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"441135915","text":"import tensorflow as tf\nfrom keras.models import model_from_json\nimport pandas as pd\nfrom keras.models import load_model\nimport numpy as np\n\nmodel_dir = '/media/kevinpeng/cdrive/Users/kevin.peng/code/Depthshift_Depthjump_corrected/Newdata/DepthLogs_cartesian_new/result'\n\n\n# The export path contains the name and the version of the model\n# tf.keras.backend.set_learning_phase(0) # Ignore dropout at inference\n\n# load json and create model\n# json_file = open('../models/keras_well/AnnModel/model_ann_full__2019_09_17_16_02_09.json', 'r')\n# loaded_model_json = json_file.read()\n# json_file.close()\n# model = model_from_json(loaded_model_json)\n# # load weights into new model\n# model.load_weights(\"../models/keras_well/AnnModel/model_ann_full__2019_09_17_16_02_09.h5\")\n# print(\"Loaded model from disk\")\n#\n#\n# model = tf.keras.models.load_model('../models/keras_well/AnnModel/model_ann_full__2019_09_17_16_02_09.json')\n# model = model\n\ndata = [-0.73907974, 0.30734958, -1.48759525, -1.41191009, 1.63201119,\n -3.08260906, 1.69228755, 0.14006269, -3.08371907, -0.29302639,\n 1.64547556, -0.21070207, -1.24723066, 1.20729805, -0.16440292,\n -1.14380009, 1.38035798, 0.58098913, -1.56153634, 1.52001141,\n -2.09498595, -0.67419747, 0.11461625, -3.71976738, -1.80643255]\n\n# Fetch the Keras session and save the model\n# The signature definition is defined by the input and output tensors\n# And stored with the default serving key\nwith tf.keras.backend.get_session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n tf.keras.backend.set_learning_phase(0) # Ignore dropout at inference\n model = load_model(f\"{model_dir}/ann.h5\")\n outputs = {t.name: t for t in model.outputs}\n export_path = '../models/keras_well/AnnModel/2'\n tf.saved_model.simple_save(\n sess,\n export_path,\n inputs={'input_image': model.input},\n outputs={t.name: t for t in model.outputs})\n\n\n\n\n","sub_path":"simple_tensorflow_serving/export_saved_keras_model.py","file_name":"export_saved_keras_model.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"423906211","text":"import sys\nimport os\nimport math\nimport pandas as pd\n\nfrom text.Message import Message\nfrom grammar.MessageTokenizer import MessageTokenizer\nfrom segmenter.ConversationSegmenter import ConversationSegmenter\nfrom text.JSONParser import JSONParser\n\nclass SegmenterRunner:\n def __init__(self, json_file_name, output_folder=None):\n self.json_file_name = json_file_name\n self.output_folder = output_folder\n self.topics_table = None\n\n\n def run(self):\n parser = JSONParser(self.json_file_name)\n self.messages = parser.getMessages()\n self.tokenizer = MessageTokenizer()\n windowSize = 3\n cosineSimilarityThreshold = 0.8\n segmenter = ConversationSegmenter(\n self.messages, windowSize, cosineSimilarityThreshold, self.tokenizer)\n self.topics = segmenter.segment()\n\n self.build_table()\n\n if self.output_folder is not None:\n self.report_table()\n else:\n self.report()\n\n\n def build_table(self):\n \"\"\" Builds a table for each of topics \"\"\"\n TOTAL_TOPICS = len(self.topics)\n\n for i, topic in enumerate(self.topics):\n\n ## TODO: get-topic-name\n topic_name = 'topic-{num:0{l}}'.format(num=i, l=int(math.floor(math.log10(TOTAL_TOPICS))) + 1)\n\n _messages = topic.getMessages()\n _reasons = topic.getReasons()\n\n topic_table = pd.DataFrame({'ID': map(lambda m: m.getID(), _messages),\n 'text': map(lambda m: m.getText(), _messages),\n 'reason': _reasons })\n topic_table['topic'] = topic_name\n\n # append to list of topic-tables\n if self.topics_table is not None:\n # merge topic into table\n self.topics_table = self.topics_table.append(topic_table, ignore_index=True)\n else:\n self.topics_table = topic_table\n\n\n def report_table(self):\n # Check existence of the output folder and create if necessary\n if not os.path.exists(self.output_folder):\n print(' - The specified folder was not found... folder will be created: \\033[36m{}\\033[0m'.format(self.output_folder))\n os.makedirs(self.output_folder)\n\n # Parse output path/file name and save table to topics_CHANNEL.csv\n filename = self.json_file_name.split('/')[-1].replace('.json', '')\n folderpath = self.output_folder[:-1] if self.output_folder.endswith('/') else self.output_folder\n out_path = '{path}/topics_{name}.csv'.format(path=folderpath, name=filename)\n self.topics_table.to_csv(out_path, encoding='utf-8')\n\n # Report output table\n print(' --> Output Topic table: \\033[32m {} \\033[0m'.format(out_path))\n\n\n def report(self):\n idGroups = []\n print(\"============================= detailed ========================\")\n for topic in self.topics:\n print(\"== Topic ==\")\n idGroup = []\n for (message, reason) in zip(topic.getMessages(), topic.getReasons()):\n idGroup.append(message.getID())\n print(\"\\n\\t------ id: \\t\" + str(message.getID()) + \"\\t\" + reason)\n print(\"\" + message.getText())\n print(\"\\n\")\n idGroups.append(idGroup)\n\n print(\"===============================\")\n\n print(\"============================= short ========================\")\n for topic in self.topics:\n print(\"== Topic ==\")\n for message in topic.getMessages():\n print(str(message.getID()) + \":\\t\" + message.getText())\n print(\"\\n\")\n\n print(idGroups)\n\n\n\ndef main(json_input, output_folder=None):\n SegmenterRunner(json_input, output_folder).run()\n\n\nif __name__ == '__main__':\n main(*sys.argv[1:]) # optionally might include a output_folder specification\n","sub_path":"run_segmenter.py","file_name":"run_segmenter.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"528036073","text":"#\n# Story Time App\n# Main flask app configuration and website route definitions\n#\n\nimport datetime\nimport json\nimport os\nimport random\nimport string\n\nimport httplib2\nimport requests\nfrom flask import Flask, flash, jsonify, make_response, redirect, render_template, request, \\\n session as login_session, url_for\nfrom flask_uploads import configure_uploads\nfrom oauth2client.client import FlowExchangeError, OAuth2Credentials, flow_from_clientsecrets\nfrom werkzeug.exceptions import HTTPException, NotFound, default_exceptions\n\nfrom storytime import story_time_service\nfrom storytime.file_storage_service import upload_set_photos\nfrom storytime.sec_util import AuthProvider, LoginSessionKeys, csrf_protect, do_authorization, is_user_authenticated, \\\n login_required, reset_user_session, store_user_session\nfrom storytime.story_time_db_init import Story, User\nfrom storytime.web_api import web_api\n\n# Auth\nGOOGLE_CLIENT_SECRETS_JSON = os.path.join(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config/client_secrets_google.json'))\nGOOGLE_CLIENT_ID = json.loads(open(GOOGLE_CLIENT_SECRETS_JSON, 'r').read())['web']['client_id']\nFACEBOOK_CLIENT_SECRETS_JSON = os.path.join(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config/client_secrets_facebook.json'))\nFACEBOOK_APP_ID = json.loads(open(FACEBOOK_CLIENT_SECRETS_JSON, 'r').read())['web']['app_id']\nFACEBOOK_APP_SECRET = json.loads(open(FACEBOOK_CLIENT_SECRETS_JSON, 'r').read())['web']['app_secret']\n\n# Setup Flask App\napp = Flask(__name__)\napp.url_map.strict_slashes = False\napp.register_blueprint(web_api)\n\n# Setup File Handling with Flask & Flask-Uploads\napp.config['MAX_CONTENT_LENGTH'] = 512 * 1024 # 512 KB\napp.config['UPLOADED_PHOTOS_DEST'] = os.path.join(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'static/upload/img'))\nconfigure_uploads(app, upload_set_photos)\n\n\n# Configure Template Filters\n@app.template_filter('format_date')\ndef format_date(date: datetime):\n return date.strftime('%B %d, %Y')\n\n\n# Configure Global Exception Handling for website and API\ndef handle_exception(exc):\n \"\"\"\n Global exception handler for exceptions raised from website and API.\n :param exc: the exception being raised\n :return: the response which is either 1) a rendered HTML template or 2) a JSON response\n in the case of a call to the API or an XMLHttpRequest\n \"\"\"\n code = 500\n message = 'An unexpected error occurred while processing your request.'\n url = request.url\n\n if isinstance(exc, HTTPException):\n code = exc.code\n message = exc.description\n\n # Return JSON if they were trying to access the api or if the request is an XMLHttpRequest\n if request.path.startswith('/api') or request.is_xhr:\n message = {\n 'status': code,\n 'message': message,\n 'url': url\n }\n\n return jsonify(message), code\n\n return render_template('error.html', error_code=code, error_message=message), code\n\n\n# Register handle_exception with all error handlers\nfor exc in default_exceptions:\n app.register_error_handler(exc, handle_exception)\n\n\n# WEBSITE ROUTE DEFINITIONS\n@app.route('/', methods=['GET'])\ndef index():\n stories_count = story_time_service.get_published_stories_count()\n stories = story_time_service.get_published_stories(count=12)\n return render_template('index.html', stories=stories, stories_count=stories_count)\n\n\n@app.route('/login', methods=['GET'])\ndef login():\n # Create a state token to prevent request forgery.\n # Store it in the session for later verification\n csrf_token = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))\n login_session[LoginSessionKeys.CSRF_TOKEN.value] = csrf_token\n return render_template('login.html', csrf_token=csrf_token)\n\n\n@app.route('/login-google', methods=['POST'])\n@csrf_protect(xhr_only=True)\ndef login_google():\n # Obtain one-time-use authorization code\n one_time_auth_code = request.data\n\n # Upgrade the authorization code into a credentials object\n try:\n oauth_flow = flow_from_clientsecrets(GOOGLE_CLIENT_SECRETS_JSON, scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(one_time_auth_code)\n except FlowExchangeError:\n response = make_response(json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token={}'.format(access_token))\n h = httplib2.Http()\n result = json.loads(str(h.request(url, 'GET')[1], 'utf-8'))\n\n # If there was an error in the access token info, abort\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(json.dumps('Token''s user ID doesn''t match given user ID.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app\n if result['issued_to'] != GOOGLE_CLIENT_ID:\n response = make_response(json.dumps('Token''s client ID does not match app''s'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check to see if a user is already logged in\n stored_credentials_json = login_session.get(LoginSessionKeys.GOOGLE_CREDENTIALS_JSON.value)\n stored_credentials = None if not stored_credentials_json else OAuth2Credentials.from_json(stored_credentials_json)\n stored_gplus_id = login_session.get(LoginSessionKeys.GOOGLE_ID.value)\n\n if stored_credentials is not None:\n if gplus_id == stored_gplus_id:\n # If the user is already logged in\n response = make_response(json.dumps('Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n # If a new user is logging in before the previous user logged out,\n # then reset the session before creating a new one\n reset_user_session()\n\n # Get user info\n user_info_url = 'https://www.googleapis.com/oauth2/v1/userinfo'\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(user_info_url, params=params)\n\n data = json.loads(answer.text)\n username = data['name']\n email = data['email']\n picture = data['picture']\n\n # Store user_id in session by saving new user or getting id if existing\n user_id = story_time_service.get_user_id_by_email(email)\n if not user_id:\n user_id = story_time_service.create_user(User(name=username, email=email, active=True))\n\n # Store the session information\n store_user_session(user_id=user_id, username=username, email=email, picture=picture, provider=AuthProvider.GOOGLE,\n google_credentials_json=credentials.to_json(), google_id=gplus_id)\n\n return 'Login successful'\n\n\n@app.route('/login-facebook', methods=['POST'])\n@csrf_protect(xhr_only=True)\ndef login_facebook():\n # Obtain one-time-use authorization code\n one_time_auth_code = request.get_data(as_text=True)\n\n # Exchange client token for long lived server side token\n url = 'https://graph.facebook.com/v2.12/oauth/access_token?grant_type=fb_exchange_token&client_id={}&client_secret={}&fb_exchange_token={}'.format(\n FACEBOOK_APP_ID, FACEBOOK_APP_SECRET, one_time_auth_code)\n\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n\n # Use token to get user info from API\n token_json = json.loads(str(result, 'utf-8'))\n token = token_json['access_token']\n\n url = 'https://graph.facebook.com/v2.12/me?access_token={}&fields=name,email,id'.format(token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data_me = json.loads(str(result, 'utf-8'))\n\n # Get user picture\n url = 'https://graph.facebook.com/v2.12/me/picture?access_token={}&redirect=0&height=200&width=200'.format(token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data_picture = json.loads(str(result, 'utf-8'))\n\n facebook_id = data_me['id']\n username = data_me['name']\n email = data_me['email']\n picture = data_picture['data']['url']\n\n # Store user_id in session by saving new user or getting id if existing\n user_id = story_time_service.get_user_id_by_email(email)\n if not user_id:\n user_id = story_time_service.create_user(User(name=username, email=email, active=True))\n\n # Store the session information\n store_user_session(user_id=user_id, username=username, email=email, picture=picture, provider=AuthProvider.FACEBOOK,\n facebook_id=facebook_id)\n\n return 'Login successful'\n\n\n@app.route('/logout', methods=['POST'])\ndef logout():\n # Redirect to index if user not logged in\n if not is_user_authenticated():\n return redirect(url_for('index'))\n\n auth_provider = login_session.get(LoginSessionKeys.PROVIDER.value)\n\n if auth_provider == AuthProvider.GOOGLE.value:\n # Get oauth2 credentials and only disconnect a connected user\n credentials = OAuth2Credentials.from_json(login_session.get(LoginSessionKeys.GOOGLE_CREDENTIALS_JSON.value))\n\n # Tell Google to revoke current token\n access_token = credentials.access_token\n url = 'https://accounts.google.com/o/oauth2/revoke?token={}'.format(access_token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] != '200':\n # For whatever reason, the given token was invalid\n print('google revoke token failed; received {}'.format(result['status']))\n elif auth_provider == AuthProvider.FACEBOOK.value:\n # Tell FB to reject access token\n facebook_id = login_session[LoginSessionKeys.FACEBOOK_ID.value]\n url = 'https://graph.facebook.com/{}/permissions'.format(facebook_id)\n h = httplib2.Http()\n result = h.request(url, 'DELETE')[1]\n\n # Reset the user's session\n reset_user_session()\n flash('You have logged out successfully.', 'success')\n return redirect(url_for('index'))\n\n\n@app.route('/dashboard', methods=['GET'])\n@login_required\ndef user_dashboard():\n stories = story_time_service.get_stories_by_user_id(login_session[LoginSessionKeys.USER_ID.value])\n return render_template('user_dashboard.html', stories=stories,\n username=login_session.get(LoginSessionKeys.USERNAME.value),\n email=login_session.get(LoginSessionKeys.EMAIL.value),\n picture=login_session.get(LoginSessionKeys.PICTURE.value))\n\n\n@app.route('/stories/create', methods=['GET'])\n@login_required\ndef get_create_story_page():\n categories = story_time_service.get_categories()\n return render_template('create_story.html', categories=categories,\n csrf_token=login_session.get(LoginSessionKeys.CSRF_TOKEN.value))\n\n\n@app.route('/stories//edit', methods=['GET'])\n@login_required\ndef get_edit_story_page(story_id):\n story = story_time_service.get_story_by_id(story_id)\n\n # Resource check - 404\n if not story:\n raise NotFound\n\n # Auth check - 401\n do_authorization(story.user_id)\n\n if story:\n categories = story_time_service.get_categories()\n return render_template('edit_story.html', story=story, categories=categories,\n csrf_token=login_session.get(LoginSessionKeys.CSRF_TOKEN.value))\n else:\n return redirect(url_for('user_dashboard'))\n\n\n@app.route('/stories//delete', methods=['POST'])\n@login_required\n@csrf_protect()\ndef delete_story(story_id):\n story = story_time_service.get_story_by_id(story_id=story_id)\n\n # Resource check - 404\n if not story:\n raise NotFound\n\n # Auth check - 401\n do_authorization(story.user_id)\n\n # Delete story and file\n story_time_service.delete_story(story.id)\n\n success_message = 'Successfully deleted story \"{}\".'.format(story.title)\n flash(success_message, 'success')\n return redirect(url_for('user_dashboard'))\n\n\n@app.route('/stories/', methods=['GET'])\ndef view_story(story_id):\n story = story_time_service.get_story_by_id(story_id=story_id)\n\n # Resource check - 404\n if not story:\n raise NotFound\n\n story_text_paragraphs = story.story_text.splitlines()\n return render_template('view_story.html', story=story, story_text_paragraphs=story_text_paragraphs,\n csrf_token=login_session.get(LoginSessionKeys.CSRF_TOKEN.value))\n\n\n@app.route('/stories/random', methods=['GET'])\ndef view_story_random():\n story = story_time_service.get_story_random()\n return redirect(url_for('view_story', story_id=story.id))\n\n\n@app.route('/stories/create', methods=['POST'])\n@login_required\n@csrf_protect()\ndef create_story():\n # Get story categories from form input\n category_ids = request.form.getlist('categories', type=int)\n categories = story_time_service.get_categories_by_ids(category_ids=category_ids)\n\n # Create story object from form input\n story = Story(title=request.form.get('title', None),\n description=request.form.get('description', None),\n story_text=request.form.get('text', None),\n published=bool(request.form.get('published')),\n categories=categories,\n user_id=login_session[LoginSessionKeys.USER_ID.value])\n\n # Validate required fields\n if not (story.title, story.description, story.story_text):\n error_message = 'You must specify the title, description and text for your story.'\n flash(error_message, 'danger')\n return redirect(url_for('get_create_story_page'))\n\n # Get the attached file if present\n file = None\n if 'story-thumbnail' in request.files:\n if request.files.get('story-thumbnail').filename:\n file = request.files['story-thumbnail']\n\n # Save Story and file\n story_time_service.create_story(story=story, image_file=file)\n\n # Render view\n success_message = 'Created {} successfully.'.format(story.title)\n flash(success_message, 'success')\n return redirect(url_for('view_story', story_id=story.id))\n\n\n@app.route('/stories//edit', methods=['POST'])\n@login_required\n@csrf_protect()\ndef edit_story(story_id):\n story = story_time_service.get_story_by_id(story_id)\n\n # Resource check - 404\n if not story:\n raise NotFound\n\n # Auth check - 401\n do_authorization(story.user_id)\n\n # Get the attached file if present\n file = None\n if 'story-thumbnail' in request.files:\n if request.files.get('story-thumbnail').filename:\n file = request.files['story-thumbnail']\n\n # Get remove existing image flag from form input or existence of new file\n remove_existing_image = bool(request.form.get('remove-existing-thumbnail')) or bool(file)\n\n # Update Story object from form input\n story.title = request.form.get('title', story.title)\n story.description = request.form.get('description', story.description)\n story.story_text = request.form.get('text', story.story_text)\n category_ids = request.form.getlist('categories', type=int)\n story.categories = story_time_service.get_categories_by_ids(category_ids=category_ids)\n story.published = bool(request.form.get('published'))\n\n # Validate required fields\n if not (story.title, story.description, story.story_text):\n error_message = 'You must specify the title, description and text for your story!'\n flash(error_message, 'danger')\n return redirect(url_for('get_edit_story_page'))\n\n # Save Story and File\n story_time_service.update_story(story=story, remove_existing_image=remove_existing_image, new_image_file=file)\n\n # Render View\n success_message = 'Updated {} successfully.'.format(story.title)\n flash(success_message, 'success')\n return redirect(url_for('view_story', story_id=story_id))\n\n\n# -------------------- MAIN\nif __name__ == '__main__':\n app.config['DEMO'] = False\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.jinja_env.auto_reload = True\n app.run(host='localhost', port=8000)\n","sub_path":"storytime/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"126780724","text":"\"\"\"Userpages dataset.\"\"\"\n\nimport os\nimport re\n# Scrapy\nfrom scrapy import Spider, Request\n# Wikiminer\nfrom wikiminer.web.spider_components import WikiMinerApiSpider\nfrom wikiminer.web.pipeline_components import WikiMinerSpiderPersistancePipeline\nfrom wikiminer.utils.odm import get_usernames\nfrom wikiminer.utils.wiki import prepend_ns\nfrom wikiminer.utils.path import get_root_path, make_dirpath, get_module_name\nfrom wikiminer.utils.misc import flatten\nfrom wikiminer.datastructures import Queue\nfrom wikiminer.odm.models import Userpage\nfrom wikiminer.tasks import Task, SpiderAction, Action\n\n# pylint: disable=W0212\n\nclass UserpagesSpider(Spider, WikiMinerApiSpider):\n \"\"\"Spider for getting userpages data via the Wikipedia API.\"\"\"\n name = 'userpages'\n speed = 6\n ns = (2, 3)\n _users = []\n _rx_exclude = re.compile(\n r\"template|sandbox|draft|\\.js|\\.php|\\.css\",\n re.IGNORECASE\n )\n\n _titles_done = set()\n\n # Spider-level scrapy settings\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'datasets.userpages.UserpagesSpiderPipeline': 300\n }\n }\n\n @property\n def users(self):\n \"\"\"Users set.\"\"\"\n if not self._users:\n self._users = get_usernames()\n return self._users\n\n # Methods -----------------------------------------------------------------\n\n def start_requests(self):\n \"\"\"Custom start method.\"\"\"\n for user in self.users:\n # Send cirrus requests for main User: and User_talk: pages\n titles = [ prepend_ns(user, ns) for ns in self.ns ]\n request_main = self.make_cirrus_request(\n titles=titles,\n redirects=True\n )\n request_main.meta['user'] = user\n yield request_main\n for ns in self.ns:\n request = self.make_ap_request(prefix=user+'/', namespace=ns)\n request.meta['user'] = user\n yield request\n\n def parse(self, response):\n \"\"\"Parse method dispatcher.\"\"\"\n user = response.meta['user']\n rtype = response.meta['_rtype']\n if rtype == 'ap':\n allpages, new_request = self.parse_ap_response(response)\n if new_request:\n new_request.meta['user'] = user\n yield new_request\n for pages in self.cirrus_slice(allpages):\n pids = [\n p['pageid'] for p in pages\n if not self._rx_exclude.search(p['title'])\n ]\n cirrus_request = self.make_cirrus_request(\n pids=pids,\n redirects=True\n )\n cirrus_request.meta['user'] = user\n yield cirrus_request\n else:\n items = self.parse_cirrus(response)\n for item in items:\n if not isinstance(item, Request):\n self._titles_done.add(item['title'])\n item['user_name'] = user\n yield item\n\n def parse_cirrus(self, response):\n \"\"\"Parse cirrus response.\"\"\"\n user = response.meta['user']\n rx_user = re.compile(\n r\"User(?:[ _]talk)?\\:\"+re.escape(user),\n re.IGNORECASE\n )\n data = self.json_load(response.body_as_unicode())\n data = data.get('query', {})\n items = super().parse_cirrus(response)\n targets = []\n if 'redirects' in data:\n redirects = data['redirects']\n targets = [ r['to'] for r in redirects ]\n for item in items:\n title = item['title']\n if title in targets:\n targets.remove(title)\n if title not in self._titles_done and rx_user.search(title):\n target_request = self.make_cirrus_request(\n titles=title,\n redirects=True\n )\n target_request.meta['user'] = user\n yield target_request\n else:\n if title not in self._titles_done:\n self._titles_done.add(title)\n yield item\n\n\nclass UserpagesSpiderPipeline(WikiMinerSpiderPersistancePipeline):\n \"\"\"Userpages spider item pipeline.\"\"\"\n\n bulk_update_batch_size = 5000\n\n def __init__(self, filename=get_module_name(__file__)+'.ndjson',\n dirpath=None, **kwargs):\n \"\"\"Initilization method.\"\"\"\n super().__init__(filename, dirpath, **kwargs)\n\n def close_spider(self, spider):\n \"\"\"Spider closing handler.\"\"\"\n self.make_bulk_update(\n query_fields='page_id',\n model=Userpage,\n spider=spider,\n batch_size=self.bulk_update_batch_size\n )\n\ndef execute():\n \"\"\"Execution function.\"\"\"\n return Task(actions=[ SpiderAction(UserpagesSpider) ])\n","sub_path":"datasets/userpages/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"145010591","text":"#!/usr/bin/python2\n# -*- coding: utf-8 -*-\n#__author__ = Özgür Erdoğdu\n#__e-mail__ = ozgurerdogdu@yandex.com.tr\n\nimport os, sqlite3\nfrom random import shuffle\n\nclass Veri:\n def __init__(self):\n self.dizin = os.getcwd()\n self.alfabe = 'abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ:.;,?!@#$%&()+=-*/_<> []{}`~^'\n\n def db_baglan(self):\n self.baglanti_db = sqlite3.connect(self.dizin + '/db/ozel.oe')\n self.isaretci = self.baglanti_db.cursor()\n\n def db_kapat(self):\n self.baglanti_db.close()\n\n def tbl_kullanici_olustur(self):\n self.db_baglan()\n self.db = self.isaretci.execute('''CREATE TABLE tbl_kullanici (\n kullanici_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n kullanici_adi VARCHAR(12) NOT NULL,\n kullanici_sifre VARCHAR(12) NOT NULL,\n kullanici_kr_no INTEGER NOT NULL)''')\n self.db_kapat()\n\n def tbl_veriler_olustur(self):\n self.db_baglan()\n self.db = self.isaretci.execute('''CREATE TABLE tbl_veriler (\n veri_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n veri_adi VARCHAR(50) NOT NULL,\n veri_kullanici_adi VARCHAR(50) NOT NULL,\n veri_eski_sifre VARCHAR(50),\n veri_yeni_sifre VARCHAR(50) NOT NULL,\n veri_olusturma_tarihi FLOAT NOT NULL,\n veri_kr_no INTEGER NOT NULL)''')\n self.db_kapat()\n\n def kr_baglan(self):\n self.baglanti_kr = sqlite3.connect(self.dizin + '/db/kript.oe')\n self.isaretci = self.baglanti_kr.cursor()\n\n def kr_kapat(self):\n self.baglanti_kr.close()\n\n def kriptolar(self):\n self.kr_baglan()\n db = self.isaretci.execute('''CREATE TABLE tbl_kripto (\n kripto_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n anahtar VARCHAR(91) NOT NULL)''')\n q = 0\n a_liste = range(0, len(self.alfabe))\n while q < 100:\n anah = ''\n shuffle(a_liste)\n for i in range(0, len(self.alfabe)):\n anah += self.alfabe[a_liste[i]]\n q += 1\n db = self.isaretci.execute('''INSERT INTO tbl_kripto (anahtar) VALUES (\"{0}\")'''.format(anah))\n self.baglanti_kr.commit()\n self.kr_kapat()","sub_path":"sifre/pr_db.py","file_name":"pr_db.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"600750082","text":"from typing import List\n\n\nclass Solution:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n if matrix[0] == []:\n return []\n if len(matrix) == 1:\n return matrix[0]\n i, j = 0, 0\n\n height = len(matrix)\n width = len(matrix[0])\n result = []\n while j < width:\n result.append(matrix[i][j])\n j += 1\n\n j -= 1\n i += 1\n while i < height:\n result.append(matrix[i][j])\n i += 1\n\n i -= 1\n j -= 1\n while j >= 0:\n result.append(matrix[i][j])\n j -= 1\n\n if width == 1:\n return result\n j += 1\n i -= 1\n while i >= 1:\n result.append(matrix[i][j])\n i -= 1\n # if width == 2:\n # return result\n sub_matrix = self.subMatrix(matrix)\n sub_result = self.spiralOrder(sub_matrix)\n result[len(result):] = sub_result\n return result\n\n def subMatrix(self, matrix: List[List[int]]) -> List[List[int]]:\n sub_matrix = []\n\n for i in range(1, len(matrix) - 1):\n sub_matrix.append(matrix[i][1:-1])\n return sub_matrix\n\n\ndef main():\n slt = Solution()\n a = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ]\n b = [\n [1, 11],\n [2, 12],\n [3, 13],\n [4, 14],\n [5, 15],\n [6, 16],\n [7, 17],\n [8, 18],\n [9, 19],\n [10, 20]\n ]\n\n print(a[0][1:-1])\n print(slt.spiralOrder([]))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/54_sprial_matrix.py","file_name":"54_sprial_matrix.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"260862349","text":"# Sever Control.py\n#SENG 299\n#Chatroom project\n\n\n#The server will be the central data structure for the chatroom porject\nimport socket\nfrom GeneralChatroom import GeneralChatroom\nfrom Chatroom import Chatroom\n\nclass ServerControl(object):\n\t'''\n\tIt controls the sending of messages and construction of new chat rooms\n\tWill also keep a list of connected clients and their relative information\n\tAttributes:\n\t\tcurrentClientIPs\n\t\tcurrentClientAliases\n\t\tchatroomNames\n\t'''\n\n\n\t\n\tdef __init__(self):\n\t\t#return objects with no populated lists\n\t\ts = socket.socket()\n\t\thost = socket.gethostname()\n\t\tport = 9999\n\t\taddress = (host, port)\n\t\ts.bind(address)\n\t\ts.listen(5)\n\n\t\tself.generalChatroom = GeneralChatroom()\n\t\tself.currentClients = {}\n\t\tself.chatrooms = []\n\n\t\tself.controlloop(s)\n\t\n\t# This function returns a chatroom based on its name, and returns None is it doesn't exist.\n\tdef getChatroom(self, chatroomName):\n\t\tif chatroomName == 'general':\n\t\t\treturn self.generalChatroom\n\t\telse:\n\t\t\tfor i in self.chatrooms:\n\t\t\t\tif self.chatrooms[i].chatroomName == chatroomName:\n\t\t\t\t\treturn self.chatrooms[i]\n\t\t\treturn None\n\t\n\t# This function sends the completed, formatted message to everyone in the given list of clients.\n\tdef sendmessage(self, message, clients):\n\t\tfor i in clients:\n\t\t\tclients[i].receiveMessage(message)\n\t\treturn\n\n\t# This function returns the alias of a given IP.\n\tdef getUserAlias(self,clientIP):\n\t\treturn self.currentClients[clientIP]\n\t\n\t# This function returns true if the user is the admin of the chatroom and false if they aren't.\n\tdef isAdmin(self,clientIP,chatroom):\n\t\tif chatroom.chatroomName == 'general':\n\t\t\treturn False\n\t\telif chatroom.adminIP == clientIP:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\n\t# This function gets a message from a client and sends it if the person is not banned.\n\tdef receivemessage(self, message, clientIP, chatroomName):\n\t\tchatroom = self.getChatroom(chatroomName)\n\t\t\n\t\tself.sendmessage(message, chatroom.currentClients)\n\n\t# This puts a client within a chatroom if they are not banned.\n\tdef connectuser(self, clientIP, chatroom):\n\n\t\t#1. connect to general chat on startup\n\t\t#2. try to connect to room if it exists\n\t\t#3. if not print a message\n\n\t\t# This part needs work\n\t\tif chatroom == 'general':\n\t\t\tself.generalChatroom.addUser(clientIP)\n\t\telif chatroom in self.chatroomNames:\n\t\t\tchatroom.addUser(clientIP)\n\t\treturn\n\n\tdef disconnectuser(self, clientIP):\n\t\tprint(\"disconnect\")\n\n\t# This creates a new chatroom if the name is not taken and assigns the client who issued the command as the admin.\n\tdef createchatroom(self, clientIP, chatroomName):\n\t\n\t\t# This checks if the chatroom name is taken.\n\t\tif(self.getChatroom(chatroomName) != None):\n\t\t\treturn\n\t\t\n\t\tnewChatroom = Chatroom(clientIP,chatroomName)\n\t\tself.chatrooms.append(newChatroom)\n\t\treturn\n\n\t# This deletes a chatroom if it's not General and the client trying to delete it is the admin.\n\tdef deletechatroom(self, clientIP, chatroomName):\n\t\tchatroom = self.getChatroom(chatroomName)\n\t\tif self.isAdmin(clientIP,chatroom):\n\t\t\tself.currentClients.remove(chatroom)\n\n\t# This blocks a user from a chatroom if it's not General, the admin is trying to ban someone and they are not bannign themselves.\n\tdef blockuser(self, clientIP, chatroomName, bannedIP):\n\t\tchatroom = self.getChatroom(chatroomName)\n\t\tif self.isAdmin(clientIP,chatroom) and clientIP != bannedIP:\n\t\t\tchatroom.blockUser(bannedIP)\n\t\treturn\n\n\t# This unblocks a user if it's not General and the admin is trying to unblock someone.\n\tdef unblockuser(self, clientIP, chatroomName, bannedIP):\n\t\tchatroom = self.getChatroom(chatroomName)\n\t\tif self.isAdmin(clientIP,chatroom):\n\t\t\tchatroom.unblockUser(bannedIP)\n\t\treturn\n\n\t# This sets a client's alias\n\tdef setalias(self,clientIP,newAlias):\n\t\tself.currentClients[clientIP] = newAlias\n\t\treturn\n\n\tdef parseinput(self, message, address):\n\n\t\t# I don't want to do a bunch of elifs\n\n\t\tif message.startswith('/'):\n\t\t\tcommand = message.split(' ', 1)[0]\n\t\t\tprint (command)\n\n\t\telse:\n\t\t\t#send message, but need the clientIP\n\t\t\tprint(\"no command found\")\n\t\t\tself.sendmessage(message)\n\t\t\treturn\n\n\t\toptions = {\n\t\t\t'/create': self.createchatroom,\n\t\t\t'/delete': self.deletechatroom,\n\t\t\t'/connect': self.connectuser\n\t\t}[command](address[0], message.split(' ',1)[1])\n\n\t\tif command.contains('block'):\n\t\t\tblocks = {\n\n\t\t\t'/block' : self.blockuser,\n\t\t\t'/unblock' : self.unblockuser\n\t\t\t}[command](address[0], message.split(' ', 1)[1], message.split(' ', 1)[2])\n\n\tdef controlloop(self, s):\n\t\t# type: () -> object\n\n\t\t#address[0] = local IP\n\t\t#address[1] = socket\n\t\t#message = message obviously\n\n\t\twhile True:\n\t\t\tclient, address = s.accept()\n\t\t\tmessage = client.recv(1024)\n\t\t\tprint ('%s:%s says >> %s' % (address[0], address[1], message))\n\n\t\t\tif message is not None:\n\t\t\t\tself.parseinput(message, address)\n\t\t\tmessage = None\n\t\t\taddress = None\n\n\ndef main():\n\n\tserver = ServerControl()\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"Project/server/ServerControl.py","file_name":"ServerControl.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"423012205","text":"import os\nimport re\n\nfrom nonebot import get_bot\nfrom aip import AipContentCensor\nfrom hoshino.util import FreqLimiter, DailyNumberLimiter\nfrom hoshino import R, Service\nfrom hoshino.typing import CQEvent, MessageSegment\nfrom asyncio import sleep\nfrom datetime import datetime,timedelta\nfrom .module import PicListener, porn_pic_index\n\nsv = Service('色图打分')\n_max = 10\nEXCEED_NOTICE = f'您今天已经打了{_max}次分了,请明早5点后再来!'\n_nlmt = DailyNumberLimiter(_max)\n_flmt = FreqLimiter(60)\nSEARCH_TIMEOUT = 30\ncache = 'C:/bot/chuyin-go' \n\n\npls = PicListener()\n\n@sv.on_prefix('打分')\nasync def setu_score(bot,ev: CQEvent):\n uid = ev['user_id']\n gid = ev['group_id']\n if not _nlmt.check(uid):\n await bot.send(ev, EXCEED_NOTICE, at_sender=True)\n return\n if not _flmt.check(uid):\n await bot.send(ev, f'您冲的太快了,{round(_flmt.left_time(uid))}秒后再来吧', at_sender=True)\n return\n ret = re.search(r\"\\[CQ:image,file=(.*),url=(.*)\\]\", str(ev.message))\n if not ret:\n if pls.get_on_off_status(gid):\n if uid == pls.on[gid]:\n await bot.finish(ev, f\"您已经在打分模式下啦!\\n如想退出打分模式请发送“退出打分”~\")\n else:\n await bot.finish(ev, f\"本群[CQ:at,qq={pls.on[gid]}]正在打分,请耐心等待~\")\n pls.turn_on(gid, uid)\n await bot.send(ev, f\"了解~请发送图片吧!\\n如想退出打分模式请发送“退出打分”\")\n await sleep(30)\n ct = 0\n while pls.get_on_off_status(gid):\n if datetime.now() < pls.timeout[gid] and ct<10:\n await sleep(30)\n if ct != pls.count[gid]:\n ct = pls.count[gid]\n pls.timeout[gid] = datetime.now()+timedelta(seconds=30)\n else:\n await bot.send(ev, f\"[CQ:at,qq={pls.on[gid]}] 由于超时,已为您自动退出打分模式,以后要记得说“退出打分”来退出打分模式噢~\")\n pls.turn_off(ev.group_id)\n return\n file = ret.group(1)\n #百度api无法直接从腾讯url获取图片,所以要下载到本地后再上传\n img = await get_bot().get_image(file=file)\n img_file = img['file']\n porn = porn_pic_index(img_file)\n if porn['code'] == 0:\n score = porn['value']\n else:\n code = porn['code']\n err = porn['msg']\n await bot.send(ev,f'错误:{code}\\n{err}')\n return\n url = os.path.join(cache,img_file)\n await bot.send(ev,str(MessageSegment.image(f'file:///{os.path.abspath(url)}')+f'\\n色图评分:{score}'))\n _flmt.start_cd(uid)\n _nlmt.increase(uid)\n\n@sv.on_message('group')\nasync def picmessage(bot, ev: CQEvent):\n ret = re.search(r\"\\[CQ:at,qq=(\\d*)\\]\", str(ev.message))\n atcheck = False\n batchcheck = False\n if ret:\n if int(ret.group(1)) == int(ev.self_id):\n atcheck = True\n if pls.get_on_off_status(ev.group_id):\n if int(pls.on[ev.group_id]) == int(ev.user_id):\n batchcheck = True\n if not(batchcheck or atcheck):\n return\n uid = ev.user_id\n \n ret = re.search(r\"\\[CQ:image,file=(.*)?,url=(.*)\\]\", str(ev.message))\n if not ret:\n return\n file= ret.group(1)\n img = await get_bot().get_image(file=file)\n img_file = img['file']\n porn = porn_pic_index(img_file)\n if porn['code'] == 0:\n score = porn['value']\n else:\n code = porn['code']\n err = porn['msg']\n await bot.send(ev,f'错误:{code}\\n{err}')\n return\n url = os.path.join(cache,img_file)\n await bot.send(ev,str(MessageSegment.image(f'file:///{os.path.abspath(url)}')+f'\\n色图评分:{score}'))\n pls.turn_off(ev.group_id)\n _flmt.start_cd(uid)\n _nlmt.increase(uid)\n\n@sv.on_fullmatch('退出打分')\nasync def thanks(bot, ev: CQEvent):\n if pls.get_on_off_status(ev.group_id):\n if pls.on[ev.group_id]!=ev.user_id:\n await bot.send(ev, '不能替别人退出打分哦~')\n return\n pls.turn_off(ev.group_id)\n await bot.send(ev, '已退出')\n return\n await bot.send(ev, 'にゃ~')\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"460800872","text":"\nfrom selenium import webdriver\n\nimport time\n\n# 创建谷歌浏览器驱动对象\nbrowser = webdriver.Chrome()\n\n# 访问页面\nbrowser.get('http://www.baidu.com')\n# 访问其他页面\ntime.sleep(5)\nbrowser.get('https://www.jianshu.com')\n# 等待\ntime.sleep(100)\n\n# 退出自动化控制程序\nbrowser.quit()\n\n\n\n\n\n\n","sub_path":"sp/day15/01_selenium.py","file_name":"01_selenium.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"535289327","text":"# Copyright Swiss Data Science Center (SDSC). A partnership between\n# École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Click utilities.\"\"\"\n\nfrom typing import TYPE_CHECKING, List\n\nimport click\n\nif TYPE_CHECKING:\n from renku.core.dataset.providers.models import ProviderParameter\n\n\ndef shell_complete_datasets(ctx, param, incomplete) -> List[str]:\n \"\"\"Shell completion for dataset names.\"\"\"\n from renku.command.dataset import search_datasets_command\n\n try:\n result = search_datasets_command().build().execute(name=incomplete)\n except Exception:\n return []\n else:\n return result.output\n\n\ndef shell_complete_workflows(ctx, param, incomplete) -> List[str]:\n \"\"\"Shell completion for plan names.\"\"\"\n from renku.command.workflow import search_workflows_command\n\n try:\n result = search_workflows_command().build().execute(name=incomplete)\n except Exception:\n return []\n else:\n return [n for n in result.output if n.startswith(incomplete)]\n\n\ndef shell_complete_sessions(ctx, param, incomplete) -> List[str]:\n \"\"\"Shell completion for session names.\"\"\"\n from renku.command.session import search_sessions_command\n\n try:\n result = search_sessions_command().build().execute(name=incomplete)\n except Exception:\n return []\n else:\n return result.output\n\n\ndef shell_complete_session_providers(ctx, param, incomplete) -> List[str]:\n \"\"\"Shell completion for session providers names.\"\"\"\n from renku.command.session import search_session_providers_command\n\n try:\n result = search_session_providers_command().build().execute(name=incomplete)\n except Exception:\n return []\n else:\n return result.output\n\n\nclass CaseInsensitiveChoice(click.Choice):\n \"\"\"Case-insensitive click choice.\n\n Based on https://github.com/pallets/click/issues/569.\n \"\"\"\n\n def convert(self, value, param, ctx):\n \"\"\"Convert value to its choice value.\"\"\"\n if value is None:\n return None\n return super().convert(value.lower(), param, ctx)\n\n\nclass MutuallyExclusiveOption(click.Option):\n \"\"\"Custom option class to allow specifying mutually exclusive options in click commands.\"\"\"\n\n def __init__(self, *args, **kwargs):\n mutually_exclusive = sorted(kwargs.pop(\"mutually_exclusive\", []))\n self.mutually_exclusive = set()\n self.mutually_exclusive_names = []\n\n for mutex in mutually_exclusive:\n if isinstance(mutex, tuple):\n self.mutually_exclusive.add(mutex[0])\n self.mutually_exclusive_names.append(mutex[1])\n else:\n self.mutually_exclusive.add(mutex)\n self.mutually_exclusive_names.append(mutex)\n\n _help = kwargs.get(\"help\", \"\")\n if self.mutually_exclusive:\n ex_str = \", \".join(self.mutually_exclusive_names)\n kwargs[\"help\"] = f\"{_help} NOTE: This argument is mutually exclusive with arguments: [{ex_str}].\"\n super().__init__(*args, **kwargs)\n\n def handle_parse_result(self, ctx, opts, args):\n \"\"\"Handles the parse result for the option.\"\"\"\n if self.mutually_exclusive.intersection(opts) and self.name in opts:\n raise click.UsageError(\n \"Illegal usage: `{}` is mutually exclusive with \"\n \"arguments `{}`.\".format(self.name, \", \".join(sorted(self.mutually_exclusive_names)))\n )\n\n return super().handle_parse_result(ctx, opts, args)\n\n\ndef create_options(providers, parameter_function: str):\n \"\"\"Create options for a group of providers.\"\"\"\n\n def wrapper(f):\n from click_option_group import optgroup\n\n for i, provider in enumerate(sorted(providers, reverse=True, key=lambda p: p.name.lower())):\n parameters: List[\"ProviderParameter\"] = getattr(provider, parameter_function)()\n for j, param in enumerate(parameters):\n param_help = f\"\\b\\n{param.help}\\n \" if j == 0 else param.help # NOTE: add newline after a group\n\n args = (\n [f\"-{a}\" if len(a) == 1 else f\"--{a}\" for a in param.flags if a] + [param.name.replace(\"-\", \"_\")]\n if param.flags\n else [f\"--{param.name}\"]\n )\n\n f = optgroup.option(\n *args,\n type=param.type,\n help=param_help,\n is_flag=param.is_flag,\n default=param.default,\n multiple=param.multiple,\n metavar=param.metavar,\n )(f)\n\n name = f\"{provider.name} configuration\"\n if i == len(providers) - 1:\n name = \"\\n \" + name # NOTE: add newline before first group\n\n f = optgroup.group(name=name)(f)\n\n return f\n\n return wrapper\n","sub_path":"renku/ui/cli/utils/click.py","file_name":"click.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"299539360","text":"import re\r\nimport sys\r\nimport time\r\nimport json\r\nimport socket \r\nimport os\r\n\r\n\r\ndef write_to_file(str):\r\n filename = \"/opt/json_log.txt\"\r\n write_file = open(filename, 'a')\r\n write_file.write(str + \"\\n\")\r\n write_file.close()\r\n\r\n\r\ndef get_json_str(line):\r\n json_dict = {}\r\n # READ_ERROR \r\n operation = re.findall(\"[A-Z_]+\", line)\r\n if operation:\r\n operation = operation[0]\r\n else:\r\n operation = \"\"\r\n json_dict[\"operation\"] = operation\r\n\r\n # - Takes(s): \r\n time_spent = re.findall(\"Takes\\\\(s\\\\): ([0-9.]+),\", line)\r\n if time_spent:\r\n time_spent = float(time_spent[0])\r\n else:\r\n time_spent = 0.0\r\n json_dict[\"time_spent\"] = time_spent\r\n\r\n # Count: \r\n count = re.findall(\"Count: (\\d+),\", line)\r\n if count:\r\n count = int(count[0])\r\n else:\r\n count = 0\r\n json_dict[\"count\"] = count\r\n\r\n # OPS: \r\n ops = re.findall(\"OPS: ([0-9.]+)\", line)\r\n if ops:\r\n ops = float(ops[0])\r\n else:\r\n ops = 0\r\n json_dict[\"ops\"] = ops\r\n\r\n # Avg(us): \r\n # print(line)\r\n avg = re.findall(\"Avg\\\\(\\w+\\\\):\\s*(\\d+)\", line)\r\n if avg:\r\n avg = avg[0]\r\n else:\r\n avg = 0\r\n json_dict[\"avg\"] = avg\r\n\r\n # Min(us): \r\n min_us = re.findall(\"Min\\\\(us\\\\): (\\d+)\", line)\r\n if min_us:\r\n min_us = int(min_us[0])\r\n else:\r\n min_us = 0 \r\n json_dict[\"min_us\"] = min_us \r\n\r\n # Max(us): \r\n max_us = re.findall(\"Max\\\\(us\\\\): (\\d+)\", line)\r\n if max_us:\r\n max_us = int(max_us[0])\r\n else:\r\n max_us = 0\r\n json_dict[\"max_us\"] = max_us\r\n\r\n # 99th(us): \r\n rate_99_us = re.findall(\"99th\\\\(us\\\\): (\\d+)\", line)\r\n if rate_99_us:\r\n rate_99_us = int(rate_99_us[0])\r\n else:\r\n rate_99_us = 0\r\n json_dict[\"rate_99_us\"] = rate_99_us\r\n\r\n # 99.9th(us): \r\n rate_999_us = re.findall(\"99.9th\\\\(us\\\\): (\\d+)\", line)\r\n if rate_999_us:\r\n rate_999_us = int(rate_999_us[0])\r\n else:\r\n rate_999_us = 0\r\n json_dict[\"rate_999_us\"] = rate_999_us\r\n\r\n # 99.99th(us): \r\n rate_9999_us = re.findall(\"99.99th\\\\(us\\\\): (\\d+)\", line)\r\n if rate_9999_us:\r\n rate_9999_us = int(rate_9999_us[0])\r\n else:\r\n rate_9999_us = 0\r\n json_dict[\"rate_9999_us\"] = rate_9999_us\r\n\r\n # hostname\r\n hostname = socket.gethostname()\r\n json_dict[\"hostname\"] = hostname\r\n\r\n json_str = json.dumps(json_dict)\r\n return json_str\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # print(\"log collector starting...\")\r\n db_log_filename = sys.argv[1] # db_log_filename 是读取日志的文件名\r\n pre_line = None\r\n\r\n # 不断的读取文件,将json化的文件输出\r\n while True:\r\n line = os.popen(\"tail -n 1 \" + db_log_filename).readline() # 总是读取日志的最后一行\r\n if line == pre_line:\r\n time.sleep(5)\r\n continue\r\n else:\r\n re_line = get_json_str(line)\r\n # print(re_line) # 此处需要将re_line发送给server\r\n write_to_file(re_line)\r\n pre_line = line\r\n time.sleep(3)\r\n\r\n # print(\"log collection finished.\")","sub_path":"log-collector/log_collector.py","file_name":"log_collector.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"324925440","text":"import pandas as pd\nimport numpy as np\nimport math, random, sys\n\n\ncol_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',\n 'PTRATIO', 'B', 'LSTAT', 'MEDV']\n\n\ntrain_file = 'housing_train.txt'\ntest_file = 'housing_test.txt'\n\n\n\n\n# ------------ Helper Funcitons ------------ #\n\ndef make_matrix(datafile, dummy=True):\n\n df = pd.read_csv(datafile, delim_whitespace=True, header=None, names=col_names)\n if dummy is True:\n df.insert(0, 'DUMMY', 1)\n X_names = ['DUMMY']\n X_names.extend(col_names)\n X_names.remove('MEDV')\n else:\n X_names = []\n X_names.extend(col_names)\n X_names.remove('MEDV')\n\n X = df.as_matrix(columns=X_names)\n Y = df.as_matrix(columns=['MEDV'])\n\n return X, Y\n\n\n\ndef compute_weight(X, Y):\n return np.matmul(np.matmul(np.linalg.inv(np.matmul(np.transpose(X), X)), np.transpose(X)), Y)\n\n\n\ndef compute_lambda_weight(X, Y, lmbda):\n return np.matmul(np.matmul(np.linalg.inv(np.add(np.matmul(np.transpose(X), X), (lmbda * np.identity(len(np.matmul(np.transpose(X), X)))))), np.transpose(X)), Y)\n\n\n\ndef compute_sse(X,Y,W):\n actual_predicted_diff = []\n for index, matrix in enumerate(X):\n actual_predicted_diff.append(math.pow(Y[index] - sum([W[k] * matrix[k] for k in range(len(matrix))]), 2))\n\n return sum(actual_predicted_diff)\n\n\n\ndef generate_random_column(limit, size):\n return np.asarray([random.uniform(0.0, float(limit)) for i in range(size)])\n\n\n\ndef generate_random_features(X, num_features):\n for i in range(num_features):\n new_col = generate_random_column((random.uniform(25.0, 500.0)), len(X))\n new_X = np.c_[X,new_col]\n X = new_X\n return X\n\n\n\n\n# ------------ Show Problem Results ------------ #\n\ndef problems_1_to_3():\n print(\"\\n-------- With dummy column --------\")\n\n X_train,Y_train = make_matrix(train_file)\n W = compute_weight(X_train,Y_train)\n print(\"\\nW vector:\\n\", W, \"\\n\")\n train_sse = compute_sse(X_train, Y_train, W)\n\n print(\"Training SSE: \", train_sse)\n\n X_test,Y_test = make_matrix(test_file)\n test_sse = compute_sse(X_test, Y_test, W)\n\n print(\"Testing SSE: \", test_sse, \"\\n\")\n\n\ndef problem_4():\n print(\"\\n-------- Without dummy column --------\")\n\n X_train,Y_train = make_matrix(train_file, dummy=False)\n W = compute_weight(X_train,Y_train)\n print(\"\\nW vector:\\n\", W, \"\\n\")\n train_sse = compute_sse(X_train, Y_train, W)\n\n print(\"Training SSE: \", train_sse)\n\n X_test,Y_test = make_matrix(test_file, dummy=False)\n test_sse = compute_sse(X_test, Y_test, W)\n\n print(\"Testing SSE: \", test_sse, \"\\n\")\n\n\ndef problem_5():\n print(\"\\n\\n----- Random Feature Generation -----\")\n #for i in range(num_iterations):\n rands = [1, 2, 4, 5, 8, 10, 12, 15, 20, 25, 30, 40, 50, 100, 150, 250, 500]\n for rand in rands:\n #rand = random.randint(1, 35)\n #print(\"\\n***** Iteration {0}: Creating {1} randomized features *****\".format(i, rand))\n print(\"\\n***** Creating {0} randomized features *****\".format(rand))\n\n X_train,Y_train = make_matrix(train_file)\n X_train = generate_random_features(X_train, rand)\n W = compute_weight(X_train, Y_train)\n train_sse = compute_sse(X_train, Y_train, W)\n #print(\"Training SSE {0}: \".format(i), train_sse)\n print(\"Training SSE: \", train_sse)\n\n X_test,Y_test = make_matrix(test_file)\n X_test = generate_random_features(X_test, rand)\n test_sse = compute_sse(X_test, Y_test, W)\n #print(\"Testing SSE {0}: \".format(i), test_sse)\n print(\"Testing SSE: \", test_sse)\n print(\"\\n\")\n\n\ndef problem_6():\n print(\"\\n\\n----- Lambda Weight Calculations -----\")\n X_train,Y_train = make_matrix(train_file)\n X_test,Y_test = make_matrix(test_file)\n\n values = [0.01, 0.05, 0.1, 0.5, 1, 2.5, 5, 25, 50, 100, 250, 500, 1000, 5000, 10000, 100000]\n for value in values:\n print(\"\\n***** Lambda value {0} *****\".format(value))\n W = compute_lambda_weight(X_train, Y_train, value)\n print(\"\\nW vector:\\n\", W, \"\\n\")\n print(\"\\nW norm: \", np.linalg.norm(W, ord=2))\n train_sse = compute_sse(X_train, Y_train, W)\n print(\"Training SSE: \", train_sse)\n test_sse = compute_sse(X_test, Y_test, W)\n print(\"Testing SSE: \", test_sse)\n print(\"\\n\")\n\n\n\n\nif __name__ == \"__main__\":\n try:\n arg = int(sys.argv[1])\n if arg >= 1 and arg <=3:\n problems_1_to_3()\n elif arg == 4:\n problem_4()\n elif arg == 5:\n problem_5()\n elif (arg >= 6) and (arg <= 8):\n problem_6()\n elif len(sys.argv) == 1:\n problems_1_to_3()\n problem_4()\n problem_5()\n problem_6()\n else:\n print(\"\\nUSAGE: python implementation_1.py \")\n print(\"\\n\\tdefaults to printing all questions if not parameter passed\\n\\n\")\n except:\n print(\"\\nUSAGE: python implementation_1.py \")\n print(\"\\n\\tdefaults to printing all questions if not parameter passed\\n\\n\")\n","sub_path":"implementation_1/implementation_1.py","file_name":"implementation_1.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"443032314","text":"#!usr/bin/env python3\nimport serial\nfrom serial import Serial\nimport time\nfrom random import *\n\nCounter = 0\n\nif __name__ == '__main__':\n ser = serial.Serial('/dev/ttyUSB0', 57600, timeout=1)\n ser.flush()\n \n while True:\n #ser.write(\"255\\n\".encode('utf-8'))\n RPi = ser.readline().decode('utf-8').rstrip()\n print(RPi)\n #Num1 = random()\n #String1 = str(Num1)\n #Counter = 0\n String1 = str(Counter)\n String2 = RPi + \" \" + String1 + \"\\n\"\n Counter = Counter + 1\n #Response = RPi + \" 255\\n\"\n #print(String2)\n ser.write(String2.encode('utf-8'))\n time.sleep(1)","sub_path":"Telemetry/Telemetry_Computer.py","file_name":"Telemetry_Computer.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"125217998","text":"from app import app\nfrom flask import request, jsonify, make_response\nimport requests\nimport pandas as pd\nimport os\nimport json\n\n@app.route('/')\ndef index():\n return \"Hello from Log\"\n\n\n@app.route('/log', methods=['POST'])\ndef log():\n data = request.get_json()\n req_time = data[\"req_time\"]\n keyword = data[\"keyword\"]\n if not os.path.isfile('req_time.csv'):\n df_req = pd.DataFrame(columns=[\"Req_Time\", \"Keyword\"])\n df_req.to_csv(r'req_time.csv', index=False, header=True)\n\n df_req = pd.read_csv('req_time.csv')\n df_req = df_req.append(pd.Series({\n \"Req_Time\": req_time,\n \"Keyword\": keyword\n }),\n ignore_index=True)\n df_req.to_csv(r'req_time.csv', index=False, header=True)\n\n if not os.path.isfile('count.csv'):\n df_count = pd.DataFrame(columns=[\"Keyword\", \"Count\"])\n df_count.to_csv(r'count.csv', index=False, header=True)\n\n df_count = pd.read_csv('count.csv')\n row = df_count.loc[df_count[\"Keyword\"] == keyword]\n if row.empty:\n df_count = df_count.append(pd.Series({\n \"Keyword\": keyword,\n \"Count\": 1\n }),\n ignore_index=True)\n else:\n df_count.at[row.index, \"Count\"] = row[\"Count\"] + 1\n df_count.to_csv(r'count.csv', index=False, header=True)\n return make_response('OK', 200)\n\n@app.route('/get_log', methods=['GET'])\ndef get_log():\n df_count = pd.read_csv('count.csv')\n return make_response(df_count.to_json(orient='index'), 200)\n","sub_path":"log/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"439381073","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 24 18:58:51 2020\r\n\r\n@author: Arif\r\n\"\"\"\r\n\r\n'''\r\nThis script performs quantitave evaluation of the camera poses obtained by \r\n3D reconstruction softwares Agisoft of Photoscan and Meshroom of Alicevision \r\nfrom the synthetic (Blender) stacked images for both moving and fixed lens \r\nsetups where camera poses form a complete sphere.\r\n'''\r\n\r\nimport pickle\r\nimport numpy as np\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom matplotlib import pylab as plt\r\nplt.style.use('classic')\r\nimport cv2\r\nfrom xml.dom import minidom\r\nimport xmltodict\r\nimport json\r\nimport math\r\n\r\nacquisition_step_angle=15\r\nno_of_cameras=13\r\n#tilt_angle=np.linspace(-90,180,no_of_cameras)\r\ntilt_angle=15\r\ninitial_orientation=np.array([[1,0,0],[0,1,0],[0,0,1]])\r\ninitial_position=np.array([0,0,-10])\r\npositions=np.zeros([no_of_cameras*2,3])\r\nm=0\r\nfor i in range(no_of_cameras):\r\n for j in range(2): \r\n beta=math.radians(acquisition_step_angle*j)\r\n R=np.array([[math.cos(beta),0.0,math.sin(beta)],[0.00,1,0.00],[-math.sin(beta),0.00,math.cos(beta)]])\r\n alpha=math.radians(tilt_angle*(-i+int(no_of_cameras/2)))\r\n R1=np.array([[1,0,0],[0,math.cos(alpha),-math.sin(alpha)],[0,math.sin(alpha),math.cos(alpha)]])\r\n R_z=np.array([[math.cos(0),-math.sin(0),0],[math.sin(0),math.cos(0),0],[0,0,1]])\r\n R_combined=np.dot(initial_orientation,np.dot(R_z,np.dot(R,R1)))\r\n \r\n positions[m,:]=np.dot(R_combined,initial_position)\r\n m=m+1\r\n \r\nang=np.zeros([no_of_cameras])\r\npan_angle_pos=np.zeros([no_of_cameras])\r\n\r\nfor i in range(no_of_cameras):\r\n \r\n x1, y1, z1 = positions[i*2,0], positions[i*2,2], positions[i*2,1]\r\n x2, y2, z2 = positions[i*2+1,0], positions[i*2+1,2], positions[i*2+1,1]\r\n \r\n ang[i] = math.degrees(math.acos( (x1*x2 + y1*y2 + z1*z1) / np.sqrt( (x1*x1 + y1*y1 + z1*z1)*(x2*x2+y2*y2+z2*z2) ) ))\r\n\r\nfor i in range(no_of_cameras):\r\n if ang[i]!=0:\r\n modified_acquisition_step_angle = ang[int(no_of_cameras/2)]*acquisition_step_angle/ang[i]\r\n if ang[i]==0:\r\n modified_acquisition_step_angle=360\r\n pan_angle_pos[i]=int(round((360/modified_acquisition_step_angle)))\r\n modified_acquisition_step_angle=360/pan_angle_pos\r\n \r\n'''\r\n# # load cameras poses by Agisoft Photoscan\r\n\r\ninfile = 'C:/Users/u6265553/Downloads/insect4_camera_pose/Agisoft_reconstruction_fixed_lens_helicon_blender_insect4.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect4_camera_pose/Agisoft_reconstruction_fixed_lens_proposed_blender_insect4.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect4_camera_pose/Agisoft_reconstruction_moving_lens_helicon_blender_insect4.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect4_camera_pose/Agisoft_Reconstruction_moving_lens_proposed_blender_insect4.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect4_camera_pose/Agisoft_reconstruction_moving_lens_proposed_blender_insect4_attempt2.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect4_camera_pose/agisoft_reconstruction_fixed_lens_proposed_blender_insect4_Li.xml'\r\n\r\n#infile = 'C:/Users/u6265553/Downloads/insect5_camera_poses/Agisoft_reconstruction_fixed_lens_helicon_blender_insect5.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect5_camera_poses/Agisoft_reconstruction_fixed_lens_proposed_blender_insect5.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect5_camera_poses/Agisoft_reconstruction_moving_lens_helicon_blender_insect5.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect5_camera_poses/Agisoft_reconstruction_moving_lens_proposed_blender_insect5.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect5_camera_poses/agisoft_reconstruction_fixed_lens_proposed_blender_insect5_Li.xml'\r\n\r\n#infile = 'C:/Users/u6265553/Downloads/insect5_camera_poses/Agisoft_reconstruction_moving_lens_proposed_blender_insect5_Strobel.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect5_camera_poses/Agisoft_reconstruction_moving_lens_proposed_blender_insect5_Strobel_reduced.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect4_camera_pose/agisoft_reconstruction_moving_lens_proposed_blender_insect4_Strobel.xml'\r\n#infile = 'C:/Users/u6265553/Downloads/insect4_camera_pose/agisoft_reconstruction_moving_lens_proposed_blender_insect4_Strobel_reduced.xml'\r\n\r\nwith open(infile, 'r') as xml_file:\r\n my_dict = xmltodict.parse(xml_file. read())\r\n cameras = my_dict['document']['chunk']['cameras']['camera']\r\n extrinsics = []\r\n for camera in cameras:\r\n if 'transform' in camera.keys():\r\n transform = [float(s) for s in camera['transform'].split(' ')]\r\n extrinsics.append(np.array(transform).reshape([4,4]))\r\n else:\r\n e = np.empty((4,4))\r\n e[:] = np.NaN\r\n extrinsics.append(e)\r\n\r\n if 'fixed' in infile:\r\n #M = 1.7 # fixed lens\r\n M = 0.50711 # fixed lens blender\r\n else:\r\n #M = 1.3 # moving lens\r\n M = 0.7647 # moving lens blender\r\n'''\r\n\r\n# # load cameras poses by Alicevision Meshroom\r\n\r\n#infile='C:/Users/u6265553/Downloads/insect4_camera_pose/Meshroom_reconstruction_fixed_lens_helicon_blender_insect4.sfm'\r\n#infile='C:/Users/u6265553/Downloads/insect4_camera_pose/Meshroom_reconstruction_fixed_lens_proposed_insect4_blender.sfm'\r\n#infile='C:/Users/u6265553/Downloads/insect4_camera_pose/Meshroom_reconstruction_moving_lens_helicon_blender_insect4.sfm'\r\n#infile='C:/Users/u6265553/Downloads/insect4_camera_pose/Meshroom_reconstruction_moving_lens_proposed_blender_insect4.sfm'\r\n#infile='C:/Users/u6265553/Downloads/insect4_camera_pose/Meshroom_reconstruction_moving_lens_proposed_blender_insect4_attempt2.sfm'\r\n#infile='C:/Users/u6265553/Downloads/insect4_camera_pose/Meshroom_reconstruction_fixed_lens_proposed_blender_insect4_Li.sfm'\r\n\r\n#infile='C:/Users/u6265553/Downloads/insect5_camera_poses/Meshroom_reconstruction_fixed_lens_helicon_blender_insect5.sfm'\r\n#infile='C:/Users/u6265553/Downloads/insect5_camera_poses/Meshroom_reconstruction_fixed_lens_proposed_blender_insect5.sfm'\r\n#infile='C:/Users/u6265553/Downloads/insect5_camera_poses/Meshroom_reconstruction_moving_lens_helicon_blender_insect5.sfm'\r\n#infile='C:/Users/u6265553/Downloads/insect5_camera_poses/Meshroom_reconstruction_moving_lens_proposed_blender_insect5.sfm'\r\n#infile='C:/Users/u6265553/Downloads/insect5_camera_poses/Meshroom_reconstruction_fixed_lens_proposed_blender_insect5_Li.sfm'\r\n\r\ninfile='C:/Users/u6265553/Downloads/latest_results/latest_results/camera_pose_meshroom_moving_lens_feature_point_blender_insect4.sfm'\r\ninfile='C:/Users/u6265553/Downloads/latest_results/latest_results/camera_pose_fixed_lens_feature_point_blender_insect4.sfm'\r\n\r\nwith open(infile, 'r') as json_file:\r\n my_dict = json.load(json_file)\r\n ids_paths = [(view['viewId'], view['path']) for view in my_dict['views']]\r\n ids_paths.sort(key=lambda tup: tup[1]) # sorts in place\r\n ids = [id for id, path in ids_paths]\r\n\r\n poses = my_dict['poses']\r\n extrinsics = [np.ones([3, 4])*np.nan]*len(ids)\r\n # for id, path in ids_paths:\r\n for pose in poses:\r\n rotation = [float(num) for num in pose['pose']['transform']['rotation']]\r\n center = [float(num) for num in pose['pose']['transform']['center']]\r\n rotation = np.array(rotation).reshape([3,3])\r\n center = np.array(center).reshape([3,1])\r\n mat = np.hstack([rotation, center])\r\n # extrinsics.append(mat)\r\n id = pose['poseId']\r\n extrinsics[ids.index(id)] = mat\r\n if 'fixed' in infile:\r\n #M = 1.7 # fixed lens\r\n M = 0.50711 # fixed lens blender\r\n else:\r\n #M = 1.3 # moving lens\r\n M = 0.7647 # moving lens blender\r\n\r\nfL = 65 #mm\r\nd0 = fL*(M+1)/M\r\nd1 = fL*(M+1)\r\nprint('d0 [mm]:', d0)\r\nprint('d1 [mm]:', d1)\r\n\r\npositions = np.array([mat[:3,3] for mat in extrinsics if not np.isnan(mat.sum())])\r\n\r\nfig = plt.figure(1)\r\nax = fig.add_subplot(111, projection='3d')\r\nax.scatter(positions[:,0], positions[:,1], positions[:,2], 'o')\r\n\r\n# compute rotation vector\r\ntilt_pos = []\r\nfig = plt.figure(2)\r\nax = fig.add_subplot(111, projection='3d')\r\nfor i in range(13):\r\n #pos = positions[i*24:(i+1)*24,:]\r\n pos = positions[int(np.sum(pan_angle_pos[:i])):int(np.sum(pan_angle_pos[:i])+pan_angle_pos[i]),:]\r\n centre = pos.mean(axis=0)\r\n tilt_pos.append(pos - centre)\r\n ax.scatter(tilt_pos[i][:,0], tilt_pos[i][:,1], tilt_pos[i][:,2], 'o')\r\npositions2 = np.vstack(tilt_pos)\r\n\r\n\r\n# find x in Ax = B\r\nA = np.ones_like(positions2)\r\nA[:,:2] = positions2[:,:2]\r\nB = -positions2[:,2]\r\nx = np.linalg.inv(A.T @ A) @ A.T @ B\r\nn = x.copy()\r\nn[2] = 1\r\n\r\nno_iters = 5\r\npositions4 = positions2\r\nfor i in range(no_iters):\r\n # find distance of positions to fitted planes for outlier removal\r\n # parameters of plane equation ax+by+cz=d\r\n a, b, c, d = x[0], x[1], 1, -x[2]\r\n positions3 = np.append(positions2, np.ones([positions2.shape[0],1]), axis=1)\r\n distances = np.abs(positions3 @ np.array([a, b, c, d]).T) / np.linalg.norm(n)\r\n # remove the 10 points with largest distance\r\n threshold = np.sort(distances)[-1]\r\n positions4 = positions2[distances < threshold, :] # removal\r\n positions4 = positions2\r\n\r\n # again find x in Ax = B\r\n A = np.ones_like(positions4)\r\n A[:,:2] = positions4[:,:2]\r\n B = -positions4[:,2]\r\n x = np.linalg.inv(A.T @ A) @ A.T @ B\r\n n = x.copy()\r\n n[2] = 1\r\n positions2 = positions4\r\n\r\nx_norm = n/np.linalg.norm(n)\r\nax.quiver(0, 0, 0, x_norm[0], x_norm[1], x_norm[2], length=1, normalize=True)\r\n# print(x_norm)\r\n\r\n\r\n# plot plane\r\nxlim = ax.get_xlim()\r\nylim = ax.get_ylim()\r\nX,Y = np.meshgrid(np.arange(xlim[0], xlim[1]),\r\n np.arange(ylim[0], ylim[1]))\r\nZ = np.zeros(X.shape)\r\nfit = x\r\nfor r in range(X.shape[0]):\r\n for c in range(X.shape[1]):\r\n Z[r,c] = -(fit[0] * X[r,c] + fit[1] * Y[r,c] + fit[2])\r\nax.plot_wireframe(X,Y,Z, color='k')\r\n\r\nax.set_xlabel('x')\r\nax.set_ylabel('y')\r\nax.set_zlabel('z')\r\n\r\n# apply rotation between x_norm and y-axis\r\nz_axis = np.array([0, 0, 1])\r\nrot_axis = np.cross(x_norm, z_axis)\r\nrot_axis = rot_axis/np.linalg.norm(rot_axis)\r\nangle = np.arccos(np.dot(x_norm, z_axis))\r\nrot_matrix, _ = cv2.Rodrigues(angle*rot_axis)\r\n\r\npositions3 = rot_matrix @ positions2.T\r\npositions3 = positions3.T\r\nax.scatter(positions3[:,0], positions3[:,1], positions3[:,2], 'o')\r\n\r\n# collect points again and app\r\npositions = np.array([mat[:3,3] for mat in extrinsics])\r\npositions4 = rot_matrix @ (positions - positions[np.invert(np.isnan(positions.sum(axis=1))),:].mean(axis=0)).T\r\npositions4 = positions4.T\r\nfig = plt.figure(3)\r\nax = fig.add_subplot(111, projection='3d')\r\nax.scatter(positions4[:,0], positions4[:,1], positions4[:,2], 'bo')\r\nax.set_xlabel('x')\r\nax.set_ylabel('y')\r\nax.set_zlabel('z')\r\n\r\n# # find center of rotation\r\n# # TODO: remove outlier or using RANSAC\r\n# from scipy import optimize\r\n# method_2 = \"leastsq\"\r\n# def calc_R(xc, yc):\r\n# \"\"\" calculate the distance of each 2D points from the center (xc, yc) \"\"\"\r\n# return np.sqrt((x-xc)**2 + (y-yc)**2)\r\n\r\n# def f_2(c):\r\n# \"\"\" calculate the algebraic distance between the data points and the mean circle centered at c=(xc, yc) \"\"\"\r\n# Ri = calc_R(*c)\r\n# return Ri - Ri.mean()\r\n\r\n# x = positions4[np.invert(np.isnan(positions4.sum(axis=1))), 0]\r\n# y = positions4[np.invert(np.isnan(positions4.sum(axis=1))), 1]\r\n# center_estimate = x.mean(), y.mean()\r\n# center_2, ier = optimize.leastsq(f_2, center_estimate)\r\n\r\n\r\n# compute erros, mean and std\r\ntilt_angles = []\r\ntilt_angles2 = []\r\npan_angle_step = []\r\npan_step = 15.0\r\ntilt_step = 15.0\r\nradii = []\r\n\r\n############ Change this such that the first value of pan_angles[0] is close to zeros\r\n#a = 118/180*np.pi #90/180*np.pi #-90/180*np.pi # 65/180*np.pi #\r\n#a = 100/180*np.pi #110/180*np.pi #90/180*np.pi # -55/180*np.pi #\r\na=0/180*np.pi\r\n#####################\r\nrotation_z = np.array([[np.cos(a), -np.sin(a), 0],\r\n [np.sin(a), np.cos(a), 0],\r\n [0, 0, 1]])\r\n\r\nfor i in range(no_of_cameras):\r\n for j in range(int(pan_angle_pos[i])):\r\n pos = rotation_z @ positions4[int(np.sum(pan_angle_pos[:i])) + j, :]\r\n pos_norm = pos/np.linalg.norm(pos)\r\n tilt = np.arccos(np.dot(pos_norm, z_axis))\r\n tilt_angles.append(tilt/np.pi*180)\r\n \r\n if j!=0:\r\n x1, y1, z1 = positions4[int(np.sum(pan_angle_pos[:i])) +j-1,0], positions4[int(np.sum(pan_angle_pos[:i])) +j-1,1], positions4[int(np.sum(pan_angle_pos[:i])) +j-1,2]\r\n x2, y2, z2 = positions4[int(np.sum(pan_angle_pos[:i])) +j,0], positions4[int(np.sum(pan_angle_pos[:i])) +j,1], positions4[int(np.sum(pan_angle_pos[:i])) +j,2]\r\n pan_angle_step.append(math.degrees(math.acos( (x1*x2 + y1*y2 + z1*z1) / np.sqrt( (x1*x1 + y1*y1 + z1*z1)*(x2*x2+y2*y2+z2*z2) ) )))\r\n \r\n radii.append(np.linalg.norm(pos))\r\n \r\n#print('pan_angles[0] =', pan_angles[0])\r\nprint('tilt_angles[0] =', tilt_angles[0])\r\n\r\nfor i in range(no_of_cameras): \r\n tilt_angles2.append(np.nanmean((tilt_angles[int(np.sum(pan_angle_pos[:i])):int(np.sum(pan_angle_pos[:i])+pan_angle_pos[i])])))\r\n\r\ntilt_angles2=np.array(tilt_angles2)\r\ntilt_angle_step=abs(tilt_angles2[:-1]-tilt_angles2[1:])\r\ntilt_angle_step_std=abs(tilt_angles2[:-1]-tilt_angles2[1:]).std()\r\ntilt_angle_step_mean=abs(tilt_angles2[:-1]-tilt_angles2[1:]).mean()\r\nprint('tilt_angle_step_std [degree]:', tilt_angle_step_std)\r\nprint('tilt_angle_step_mean [degree]:', tilt_angle_step_mean)\r\n\r\npan_angle_step=np.array(pan_angle_step)\r\npan_angle_step_std=np.nanstd(pan_angle_step)\r\npan_angle_step_mean=np.nanmean(pan_angle_step)\r\nprint('pan_angle_step_std [degree]:', pan_angle_step_std)\r\nprint('pan_angle_step_mean [degree]:', pan_angle_step_mean)\r\n\r\nradii = np.array(radii)\r\nradii_mean = radii[np.invert(np.isnan(radii))].mean()\r\nradii_std = radii[np.invert(np.isnan(radii))].std()\r\nradii_mean_mm = radii_mean * d0 / radii_mean\r\nradii_std_mm = radii_std * d0 / radii_mean\r\nprint('radii_mean_mm:', radii_mean_mm)\r\nprint('radii_std_mm:', radii_std_mm)\r\n\r\n# plot camera positions in physical dimension [mm]\r\nfig = plt.figure(4)\r\nax = fig.add_subplot(111, projection='3d')\r\nax.scatter(positions4[:,0]*d0 / radii_mean, positions4[:,1]*d0 / radii_mean,\r\n positions4[:,2]*d0 / radii_mean, 'go')\r\nax.set_xlabel('x')\r\nax.set_ylabel('y')\r\nax.set_zlabel('z')\r\nax.set_xlim([-220, 220])\r\nax.set_ylim([-220, 220])\r\nax.set_zlim([-180, 180])\r\nplt.show()\r\n","sub_path":"positions_blender_controlled_camera_pose.py","file_name":"positions_blender_controlled_camera_pose.py","file_ext":"py","file_size_in_byte":14401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"8309374","text":"import os;\nimport sys;\nimport math\nfrom sets import Set\nkeywords=[];\ndef findkey(line):\n for key in keywords:\n if key in line:\n return key;\n return \"\"\ndef putfile(filename,new,subfix=\".pc\",newsubfix='.pc1'):\n f=open(filename)\n firstline=\"\"\n declares=[]\n declares.append(new[0]+\"\\n\")\n query=[]\n for line in f:\n line=line.replace(\"skb->cb\",\"cb\").replace(\"sk->th\",\"th\")\n if \"array\" in line:\n declares.append(line);\n continue;\n if \"(query [\" in line:\n firstline=line\n continue\n query.append(line);\n f.close()\n start=firstline.find(\"[\")\n firstline=firstline[0:start+1]+new[1]+\"\\n\"+firstline[start+1:]\n f=open(\"result-\"+filename.replace(subfix,newsubfix),'w+')\n f.write(''.join(declares)+'\\n'+firstline+\"\".join(query))\n f.close()\n\ndef trimob(realcontent):\n lst=realcontent.split(\"\\n\")\n content=[]\n for line in lst:\n if \"output_init_net\" in line:\n loc=line.find(\"output_init_net\")\n last=line.find(\")\",loc)\n first=line.rfind(\"(\",0,loc)\n content.append(line[first:last+1])\n #print loc,first,last,line\n return \"\\n\".join(content)\n\ndef classifyOb(Dir,subfix=\".observable\",start=0):\n obs={}\n Dir=Dir+\"/\"\n for filename in os.listdir(Dir):\n if filename.endswith(subfix):\n index=int(filename.replace(\"test\",\"\").replace(subfix,\"\"))\n if index w8 = symbolic\"\n obstr=\"(ReadLSB w\"+str(size)+\" 0 ob)\"\n count=0;\n os.system(\"mkdir result-\"+Dir)\n f=open(\"result-\"+Dir+\"class.txt\",\"w+\")\n for key in obs:\n f.write(str(count)+\":\"+\" \".join(obs[key])+\"\\n\")\n for name in obs[key]:\n putfile(Dir+name+\".pc\",[declare,\"(Eq \"+obstr+\" \"+str(count)+\")\"],'.pc','.pc1')\n count=count+1\n f.close()\n\ndef AddSA(Dir,inputfile):\n f=open(inputfile);\n for line in f:\n if \"array\" in line:\n declare.append(line)\n else:\n query.append(line)\n putfile(Dir+\"result.pc.clean\",\"\".join(declare)+\"\".join(query),'.pc.clean','.pc.new')\n f.close()\n\ndef mergeDir(Dirs):\n count=0\n os.system(\"mkdir klee-all\");\n for Dir in Dirs:\n Dir=Dir+'/'\n for filename in os.listdir(Dir):\n os.system(\"cp \"+Dir+filename+\" klee-all/\"+str(count)+filename)\n count=count+1\ndef cleanfile(filename,newsubfix=\".clean\"):\n declare=Set([])\n others=[]\n f=open(filename)\n for line in f:\n if \"array\" in line:\n declare.add(line)\n else:\n others.append(line)\n f.close()\n f=open(filename+newsubfix,'w+')\n f.write(''.join(declare))\n for line in others:\n f.write(line);\n f.close()\n\n\nif len(sys.argv)==2:\n classifyOb(sys.argv[1],'.observable',5678);\nif len(sys.argv)>2:\n mergeDir(sys.argv[1:])\nif len(sys.argv)==1:\n for filename in os.listdir(\"result-klee-real\"):\n if \"subpc\" in filename:\n cleanfile(\"result-klee-real/\"+filename,\"\")\n #cleanfile(\"result-klee-out-126/result.pc\")\n #AddSA(\"result-klee-real\",\"SA.txt\")\n","sub_path":"examples/linux-3.18.37/transinoutput.py","file_name":"transinoutput.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"9070374","text":"def checkBysimmetric(dim, matrix):\n half = 0\n\n if(dim % 2 == 1):\n half = dim//2 + 1\n else :\n half = dim//2\n\n for i in range(half):\n for j in range(dim):\n if((matrix[i][j] != matrix[j][i]) or (matrix[i][j] != matrix[dim-1-j][dim-1-i])):\n return False\n\n return True\n\nmatrix = []\ndimension = 0\n\nwith open('matrix.txt') as file :\n try:\n dimension = int(file.readline())\n\n for i in range(dimension):\n temp = file.readline().strip().split(' ')\n matrix.append(temp)\n \n except Excetion as e:\n print(e)\n\nprint('matriks loaded!')\nprint('Yes') if checkBysimmetric(dimension, matrix) else print('No')\n","sub_path":"src/5-Problem01.py","file_name":"5-Problem01.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"225202813","text":"from ryu.ofproto import ether\nfrom ryu.ofproto import inet\nfrom ryu.ofproto import ofproto_v1_3\nfrom ryu.ofproto.ofproto_v1_3 import OFPP_CONTROLLER\nfrom ryu.exception import OFPUnknownVersion\nfrom ryu.lib import dpid as dpid_lib\nfrom ryu.lib import mac as mac_lib\nfrom ryu.lib import hub\nfrom constant import *\nfrom util import *\n\nclass OfCtl(object):\n _OF_VERSIONS = {}\n\n @staticmethod\n def register_of_version(version):\n def _register_of_version(cls):\n OfCtl._OF_VERSIONS.setdefault(version, cls)\n return cls\n return _register_of_version\n\n @staticmethod\n def factory(dp, logger):\n of_version = dp.ofproto.OFP_VERSION\n if of_version in OfCtl._OF_VERSIONS:\n ofctl = OfCtl._OF_VERSIONS[of_version](dp, logger)\n else:\n raise OFPUnknownVersion(version=of_version)\n\n return ofctl\n\n def __init__(self, dp, logger):\n super(OfCtl, self).__init__()\n self.dp = dp\n self.sw_id = {'sw_id': dpid_lib.dpid_to_str(dp.id)}\n self.logger = logger\n\n def set_sw_config_for_ttl(self):\n # OpenFlow v1_2 only.\n pass\n\n def set_flow(self, cookie, priority, in_port=0, dl_type=0, dl_dst=0, dl_vlan=0,\n nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,\n nw_proto=0, tp_src=0, tp_dst=0, idle_timeout=0, actions=None):\n # Abstract method\n raise NotImplementedError()\n\n def send_arp(self, arp_opcode, vlan_id, src_mac, dst_mac,\n src_ip, dst_ip, arp_target_mac, output):\n # Generate ARP packet\n if vlan_id != VLANID_NONE:\n ether_proto = ether.ETH_TYPE_8021Q\n pcp = 0\n cfi = 0\n vlan_ether = ether.ETH_TYPE_ARP\n v = vlan.vlan(pcp, cfi, vlan_id, vlan_ether)\n else:\n ether_proto = ether.ETH_TYPE_ARP\n hwtype = 1\n arp_proto = ether.ETH_TYPE_IP\n hlen = 6\n plen = 4\n\n pkt = packet.Packet()\n e = ethernet.ethernet(dst_mac, src_mac, ether_proto)\n a = arp.arp(hwtype, arp_proto, hlen, plen, arp_opcode,\n src_mac, src_ip, arp_target_mac, dst_ip)\n pkt.add_protocol(e)\n if vlan_id != VLANID_NONE:\n pkt.add_protocol(v)\n pkt.add_protocol(a)\n pkt.serialize()\n\n # Send packet out\n self.send_packet_out(output, pkt.data, data_str=str(pkt))\n\n def send_icmp(self, protocol_list, vlan_id, icmp_type,\n icmp_code, icmp_data=None, msg_data=None, src_ip=None):\n # Generate ICMP reply packet\n csum = 0\n offset = ethernet.ethernet._MIN_LEN\n\n if vlan_id != VLANID_NONE:\n ether_proto = ether.ETH_TYPE_8021Q\n pcp = 0\n cfi = 0\n vlan_ether = ether.ETH_TYPE_IP\n v = vlan.vlan(pcp, cfi, vlan_id, vlan_ether)\n offset += vlan.vlan._MIN_LEN\n else:\n ether_proto = ether.ETH_TYPE_IP\n\n eth = protocol_list[ETHERNET]\n e = ethernet.ethernet(eth.src, eth.dst, ether_proto)\n\n if icmp_data is None and msg_data is not None:\n ip_datagram = msg_data[offset:]\n if icmp_type == icmp.ICMP_DEST_UNREACH:\n icmp_data = icmp.dest_unreach(data_len=len(ip_datagram),\n data=ip_datagram)\n elif icmp_type == icmp.ICMP_TIME_EXCEEDED:\n icmp_data = icmp.TimeExceeded(data_len=len(ip_datagram),\n data=ip_datagram)\n\n ic = icmp.icmp(icmp_type, icmp_code, csum, data=icmp_data)\n\n ip = protocol_list[IPV4]\n if src_ip is None:\n src_ip = ip.dst\n ip_total_length = ip.header_length * 4 + ic._MIN_LEN\n if ic.data is not None:\n ip_total_length += ic.data._MIN_LEN\n if ic.data.data is not None:\n ip_total_length += + len(ic.data.data)\n i = ipv4.ipv4(ip.version, ip.header_length, ip.tos,\n ip_total_length, ip.identification, ip.flags,\n ip.offset, DEFAULT_TTL, inet.IPPROTO_ICMP, csum,\n src_ip, ip.src)\n\n pkt = packet.Packet()\n pkt.add_protocol(e)\n if vlan_id != VLANID_NONE:\n pkt.add_protocol(v)\n pkt.add_protocol(i)\n pkt.add_protocol(ic)\n pkt.serialize()\n\n # Send packet out\n self.send_packet_out(self.dp.ofproto.OFPP_IN_PORT, pkt.data,\n data_str=str(pkt))\n\n def send_packet_out(self, output, data, in_port=OFPP_CONTROLLER,\n data_str=None):\n actions = [self.dp.ofproto_parser.OFPActionOutput(output, 0)]\n self.dp.send_packet_out(buffer_id=UINT32_MAX, in_port=in_port,\n actions=actions, data=data)\n #TODO: Packet library convert to string\n #if data_str is None:\n # data_str = str(packet.Packet(data))\n #self.logger.debug('Packet out = %s', data_str, extra=self.sw_id)\n\n def set_normal_flow(self, cookie, priority):\n out_port = self.dp.ofproto.OFPP_NORMAL\n actions = [self.dp.ofproto_parser.OFPActionOutput(out_port, 0)]\n self.set_flow(cookie, priority, actions=actions)\n\n def set_packetin_flow(self, cookie, priority, dl_type=0, dl_dst=0,\n dl_vlan=0, dst_ip=0, dst_mask=32, nw_proto=0):\n miss_send_len = UINT16_MAX\n actions = [self.dp.ofproto_parser.OFPActionOutput(\n self.dp.ofproto.OFPP_CONTROLLER, miss_send_len)]\n self.set_flow(cookie, priority, dl_type=dl_type, dl_dst=dl_dst,\n dl_vlan=dl_vlan, nw_dst=dst_ip, dst_mask=dst_mask,\n nw_proto=nw_proto, actions=actions)\n\n def send_stats_request(self, stats, waiters):\n self.dp.set_xid(stats)\n waiters_per_dp = waiters.setdefault(self.dp.id, {})\n event = hub.Event()\n msgs = []\n waiters_per_dp[stats.xid] = (event, msgs)\n self.dp.send_msg(stats)\n\n cond = event.wait(timeout=OFP_REPLY_TIMER)\n if not cond:\n del waiters_per_dp[stats.xid]\n\n return msgs\n\n@OfCtl.register_of_version(ofproto_v1_3.OFP_VERSION)\nclass OfCtl_v1_3(OfCtl):\n\n def __init__(self, dp, logger):\n super(OfCtl_v1_3, self).__init__(dp, logger)\n\n def set_sw_config_for_ttl(self):\n flags = self.dp.ofproto.OFPC_INVALID_TTL_TO_CONTROLLER\n miss_send_len = UINT16_MAX\n m = self.dp.ofproto_parser.OFPSetConfig(self.dp, flags,\n miss_send_len)\n self.dp.send_msg(m)\n self.logger.info('Set SW config for TTL error packet in.',\n extra=self.sw_id)\n\n def get_packetin_inport(self, msg):\n in_port = self.dp.ofproto.OFPP_ANY\n for match_field in msg.match.fields:\n if match_field.header == self.dp.ofproto.OXM_OF_IN_PORT:\n in_port = match_field.value\n break\n return in_port\n\n def get_all_flow(self, waiters):\n ofp = self.dp.ofproto\n ofp_parser = self.dp.ofproto_parser\n\n match = ofp_parser.OFPMatch()\n stats = ofp_parser.OFPFlowStatsRequest(self.dp, 0, ofp.OFPTT_ALL, \n ofp.OFPP_ANY, ofp.OFPG_ANY, 0, 0, match)\n return self.send_stats_request(stats, waiters)\n\n def get_all_group(self, waiters):\n ofp = self.dp.ofproto\n ofp_parser = self.dp.ofproto_parser\n\n stats = ofp_parser.OFPGroupDescStatsRequest(self.dp, 0)\n\n return self.send_stats_request(stats, waiters)\n\n def get_all_port(self, waiters):\n ofp = self.dp.ofproto\n ofp_parser = self.dp.ofproto_parser\n\n stats = ofp_parser.OFPPortStatsRequest(self.dp, 0, ofp.OFPP_ANY)\n\n return self.send_stats_request(stats, waiters)\n\n def get_all_table(self, waiters):\n ofp = self.dp.ofproto\n ofp_parser = self.dp.ofproto_parser\n\n stats = ofp_parser.OFPTableStatsRequest(self.dp, 0)\n\n return self.send_stats_request(stats, waiters)\n\n def set_goto(self, cookie, priority, src, dst):\n ofp = self.dp.ofproto\n ofp_parser = self.dp.ofproto_parser\n cmd = ofp.OFPFC_ADD\n\n # Match\n match = ofp_parser.OFPMatch()\n # Instructions\n inst = [self.dp.ofproto_parser.OFPInstructionGotoTable(dst)]\n\n m = ofp_parser.OFPFlowMod(self.dp, cookie, 0, src, cmd, 0,\n 0, priority, UINT32_MAX, ofp.OFPP_ANY,\n ofp.OFPG_ANY, 0, match, inst)\n self.dp.send_msg(m)\n\n def set_flow(self, cookie, priority, table_id=0, in_port=0, dl_type=0, \n dl_dst=0, dl_vlan=0, dl_mpls=0, nw_src=0, src_mask=32, \n nw_dst=0, dst_mask=32, nw_proto=0, tp_src=0, tp_dst=0, \n idle_timeout=0, actions=None, inst=None):\n ofp = self.dp.ofproto\n ofp_parser = self.dp.ofproto_parser\n cmd = ofp.OFPFC_ADD\n\n # Match\n match = ofp_parser.OFPMatch()\n if in_port:\n match.set_in_port(in_port)\n if dl_type:\n match.set_dl_type(dl_type)\n if dl_dst:\n match.set_dl_dst(dl_dst)\n if dl_vlan:\n match.set_vlan_vid(dl_vlan)\n if dl_mpls:\n match.set_mpls_label(dl_mpls)\n if nw_src:\n match.set_ipv4_src_masked(ipv4_text_to_int(nw_src),\n mask_ntob(src_mask))\n if nw_dst:\n match.set_ipv4_dst_masked(ipv4_text_to_int(nw_dst),\n mask_ntob(dst_mask))\n if nw_proto:\n if dl_type == ether.ETH_TYPE_IP:\n match.set_ip_proto(nw_proto)\n elif dl_type == ether.ETH_TYPE_ARP:\n match.set_arp_opcode(nw_proto)\n\n # Instructions\n actions = actions or []\n _inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,\n actions)]\n if inst:\n _inst.append(inst)\n\n m = ofp_parser.OFPFlowMod(self.dp, cookie, 0, table_id, cmd, idle_timeout,\n 0, priority, UINT32_MAX, ofp.OFPP_ANY,\n ofp.OFPG_ANY, 0, match, _inst)\n self.dp.send_msg(m)\n\n def set_routing_flow(self, cookie, priority, outport, dl_vlan=0,\n nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,\n src_mac=0, dst_mac=0, idle_timeout=0, dec_ttl=False):\n ofp = self.dp.ofproto\n ofp_parser = self.dp.ofproto_parser\n\n dl_type = ether.ETH_TYPE_IP\n\n actions = []\n if dec_ttl:\n actions.append(ofp_parser.OFPActionDecNwTtl())\n if src_mac:\n set_src = ofp_parser.OFPMatchField.make(ofp.OXM_OF_ETH_SRC,\n src_mac)\n actions.append(ofp_parser.OFPActionSetField(set_src))\n if dst_mac:\n set_dst = ofp_parser.OFPMatchField.make(ofp.OXM_OF_ETH_DST,\n dst_mac)\n actions.append(ofp_parser.OFPActionSetField(set_dst))\n if outport is not None:\n actions.append(ofp_parser.OFPActionOutput(outport, 0))\n\n self.set_flow(cookie, priority, dl_type=dl_type, dl_vlan=dl_vlan,\n nw_src=nw_src, src_mask=src_mask,\n nw_dst=nw_dst, dst_mask=dst_mask,\n idle_timeout=idle_timeout, actions=actions)\n\n def delete_flow(self, flow_stats):\n ofp = self.dp.ofproto\n ofp_parser = self.dp.ofproto_parser\n\n cmd = ofp.OFPFC_DELETE\n cookie = flow_stats.cookie\n priority = flow_stats.priority\n cookie_mask = UINT64_MAX\n match = ofp_parser.OFPMatch()\n inst = []\n\n flow_mod = ofp_parser.OFPFlowMod(self.dp, cookie, cookie_mask, priority, cmd,\n 0, 0, 0, UINT32_MAX, ofp.OFPP_ANY,\n ofp.OFPG_ANY, 0, match, inst)\n self.dp.send_msg(flow_mod)\n self.logger.info('Delete flow [cookie=0x%x]', cookie, extra=self.sw_id)\n\n def add_group(self, groupid, buckets):\n ofp = self.dp.ofproto\n ofp_parser = self.dp.ofproto_parser\n\n req = ofp_parser.OFPGroupMod(self.dp, ofp.OFPFC_ADD, ofp.OFPGT_SELECT,\\\n groupid, buckets)\n self.dp.send_msg(req)\n\n","sub_path":"ryu/app/SegmentRouting/ofctl.py","file_name":"ofctl.py","file_ext":"py","file_size_in_byte":12484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"131240142","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 4 12:31:38 2016\n\n@author: tallt\n\"\"\"\nimport cv2\nimport numpy as np\nimport preprocess as pp \nimport os\nfrom sklearn.externals import joblib\nfrom train_mlp import *\nPLATE_WIDTH = 400\nPLATE_HEIGHT = 150\nMIN_AREA = (PLATE_WIDTH/13) * (PLATE_HEIGHT/4) #\nMAX_AREA = (PLATE_WIDTH/3) * (PLATE_HEIGHT)\ndig2char = {0:'0', 1:'1', 2:'2', 3:'3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8',\n 9:'9', 10:'A', 11:'B', 12:'C', 13:'D', 14:'E', 15:'F', 16:'G',\n 17:'H', 18:'J', 19:'K', 20:'L', 21:'M', 22:'N', 23:'P', 24:'Q',\n 25:'R', 26:'S', 27:'T', 28:'U', 29:'V', 30:'W', 31:'X', 32:'Y',\n 33:'Z', 34:'川', 35:'鄂', 36:'赣', 37:'甘', 38:'贵', 39:'桂',\n 40:'黑', 41:'沪', 42:'冀', 43:'津', 44:'京', 45:'吉', 46:'辽',\n 47:'鲁', 48:'蒙', 49:'闽', 50:'宁', 51:'青', 52:'琼', 53:'陕',\n 54:'苏', 55:'西', 56:'皖', 57:'湘', 58:'新', 59:'豫', 60:'渝',\n 61:'粤', 62:'云',63:'藏', 64:'浙'}\ndef threshold_image(image):\n if image is None:\n print('error: image not exit')\n return\n threshold = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)\n\n return threshold\ndef find_proper_list(rect_list, image):\n if rect_list is None:\n print(\"error: parameter not exit\")\n return\n proper_list = []\n for rect in rect_list:\n (x, y), (width, height), angle = rect\n\n if angle < -30:\n rect = ((x, y), (height, width), 0)\n if angle > 30:\n rect = ((x, y), (height, width), angle-90)\n (x, y), (width, height), angle = rect\n if width * height < MIN_AREA:\n continue\n if width * height > MAX_AREA:\n continue\n if width > height:\n continue\n \n proper_list.append(rect)\n '''\n image_copy = image\n print(rect)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n cv2.drawContours(image, [box], 0, (0,255,0),2)\n cv2.imshow('rect', image_copy)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n '''\n return proper_list\n# end function\ndef rect_list_sort(rect_list):\n sorted_rect_list = sorted(rect_list, key = lambda x: x[0][0])\n return sorted_rect_list\n\ndef closeure(image):\n if image is None:\n print(\"error: parameter not exit\")\n return\n # end if\n kernal = cv2.getStructuringElement(cv2.MORPH_RECT, (1,1))\n closed_image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernal)\n return closed_image\n# end funtion\n\ndef load_preprocess_image(image):\n image = cv2.resize(image, (PLATE_WIDTH, PLATE_HEIGHT), interpolation=cv2.INTER_CUBIC)\n image_blurred = pp.gaussian_blur_img(image)\n image_gray = pp.make_gray(image_blurred)\n image_binary = threshold_image(image_gray)\n # image_close = closeure(image_binary)\n contours = pp.find_contour(image_binary)\n rect_list = pp.min_rect(contours)\n proper_list = find_proper_list(rect_list, image)\n return image, contours, proper_list\n# end function\n\ndef deal_chinest(rect_list):\n sorted_list = rect_list_sort(rect_list)\n (x1, y1), (width1, height1), angle1 = sorted_list[0]\n (x2, y2), (width2, height2), angle2 = sorted_list[1]\n sorted_list[0] = ((x1, y2), (width2, height2), 0)\n return sorted_list\n\ndef get_char_in_plate(img):\n image, contours, rect_list = load_preprocess_image(img)\n final_rect_list = deal_chinest(rect_list)\n char_list = []\n for rect in final_rect_list:\n (x, y), (width, height), angle = rect;\n image_ROI = image[y-height/2:y+height/2,x-width/2 - 7:x+width/2 + 7]\n char_list.append(image_ROI)\n return char_list\n# end function\ndef make_gray(image):\n if image is None:\n print(\"error: parameter not exit\")\n return\n # end if\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n return gray_image\n# end function\ndef sobel_process(image):\n if image is None:\n print(\"error: parameter not exit\")\n return\n # end if\n sobel_image = cv2.Sobel(image, -1, 1, 0,ksize=3)\n return sobel_image\n# end function\n\nimg = cv2.imread('p4.jpg') \ncv2.imshow('',img)\nimage_list = get_char_in_plate(img)\nmlp_model = joblib.load('.//model//KNN_model.m') \nfor image in image_list:\n image = cv2.resize(image, (20, 20), interpolation=cv2.INTER_CUBIC)\n gray_image=make_gray(image)\n thres = cv2.adaptiveThreshold(gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 1)\n cv2.imshow('image',thres)\n image_r = np.reshape(thres, 20*20 ) \n predication = mlp_model.predict([image_r]) \n print( dig2char[predication[0]] ) \n cv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","sub_path":"ANPR_zh/char_detect.py","file_name":"char_detect.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"443306368","text":"import numpy as np\nfrom PyRuntime import ExecutionSession\n\n\nmodel = './add.so'\nsession = ExecutionSession(model, \"run_main_graph\")\n\ninput = np.array([[[1,1],[2,2],[3,3]],[[1,2],[2,2],[3,3]]], np.dtype(np.float32))\noutputs = session.run(input)\nprint(outputs)\n","sub_path":"mlir-runtime-add.py","file_name":"mlir-runtime-add.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"192893308","text":"import socket, select, struct\n\nlisten_address = ('', 2018)\nconnections = {}\nhandlers = {}\n\ndef handle_input(socket, data):\n socket.sendall(data)\n\ndef handle_request(fileno, event):\n if event & select.POLLIN:\n client_socket = connections[fileno]\n data = client_socket.recv(4096)\n if data:\n handle_input(client_socket, data)\n else:\n print(\"disconnect from\", client_socket.getpeername())\n poll.unregister(fileno)\n client_socket.close()\n del connections[fileno]\n del handlers[fileno]\n\ndef handle_accept(fileno, event):\n client_socket, client_address = server_socket.accept()\n if client_socket:\n print(\"got connection from\", client_address)\n # client_socket.setblocking(0)\n poll.register(client_socket.fileno(), select.POLLIN)\n connections[client_socket.fileno()] = client_socket\n handlers[client_socket.fileno()] = handle_request\n\nif __name__ == \"__main__\":\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind(listen_address)\n server_socket.listen(socket.SOMAXCONN)\n server_socket.setblocking(0)\n\n poll = select.poll()\n poll.register(server_socket.fileno(), select.POLLIN)\n handlers[server_socket.fileno()] = handle_accept\n\n while True:\n events = poll.poll(1000) # 10 secs\n for fileno, event in events:\n handler = handlers[fileno]\n handler(fileno, event)\n","sub_path":"model5/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"477253309","text":"import gym\nimport gym_rock_paper_scissors\n\nenv = gym.make('RockPaperScissors-v0')\n\nob = env.reset()\nprint(\"Initial observation {}\\n\".format(ob))\nfor i in range(0, 3):\n fixed_action = [1, 2]\n ob, reward, done, info = env.step(fixed_action)\n print(\"Observation: {}\\nReward: {}\\nRepetition: {}\\n\".format(ob, reward, env.repetition))\n\nprint(\"Done!\")\n","sub_path":"usage-example.py","file_name":"usage-example.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"390942753","text":"import numpy as np\r\nimport cv2 as cv\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\nimg = cv.imread(\r\n '../data/beading_basler_cropped/Basler_acA2440-35um__23336827__20201013_093810717_1.tiff',0)\r\nedges = cv.Canny(img,100,200)\r\nplt.subplot(121),plt.imshow(img,cmap = 'gray')\r\nplt.title('Original'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(122),plt.imshow(edges,cmap = 'gray')\r\nplt.title('\"Strong\" Edge Detect'), plt.xticks([]), plt.yticks([])\r\nplt.show()","sub_path":"helper_scripts/canny_edge.py","file_name":"canny_edge.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"319443219","text":"# =============================================================================\n# Este Script coverte um numero inteiro dado em segundos em um valor\n# composto em horas, minutos e segundos\n# =============================================================================\n\nvalorInformado = int(input('Entre com um valor em segundos = '))\n\n# este comando captura a parte inteira do valor dado em dias\ndias = valorInformado // 86400 # 1 dia = 86400 seg\n# este comando captura o valor em horas\nhoras = (valorInformado % 86400) // 3600 # 1h = 3600 seg\n# este comando captura o valor em minutos\nminutos = (valorInformado % 3600) // 60\n# este comando captura o valor em segundos\nsegundos = (valorInformado % 3600) % 60\n\nprint(dias, ' dias,',\n horas, ' horas,',\n minutos, ' minuntos e',\n segundos, ' segundos')\n\n# =============================================================================\n# # Teste #\n#\n# Entrada de Dados:\n# Por favor, entre com o número de segundos que deseja converter: 178615\n#\n# Saída de Dados:\n# 2 dias, 1 horas, 36 minutos e 55 segundos.\n# =============================================================================\n","sub_path":"PyCursoUSP - Parte 1/semana 2/segundos.py","file_name":"segundos.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"483896936","text":"#! python3\n# -*- coding: utf-8 -*-\n\"\"\"\n目标:实现小规模文本的聚类\n1.去除无用词\n2.分词\n3.tf-idf\n4.聚类\n\"\"\"\nimport json\nimport jieba\nfrom random import shuffle\nfrom sklearn.cluster import KMeans\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\n\"\"\"\nsklearn里面的TF-IDF主要用到了两个函数:CountVectorizer()和TfidfTransformer()。 \nCountVectorizer是通过fit_transform函数将文本中的词语转换为词频矩阵。 \n矩阵元素weight[i][j] 表示j词在第i个文本下的词频,即各个词语出现的次数。 \n通过get_feature_names()可看到所有文本的关键字,通过toarray()可看到词频矩阵的结果。 \nTfidfTransformer也有个fit_transform函数,它的作用是计算tf-idf值。\n\"\"\"\ndef extract_native_comment(filename):\n \"\"\"\n 从数据库中提取原始文本\n :param filename:\n :return:\n \"\"\"\n with open(filename, 'rb') as f:\n data = json.loads(f.read())\n record = data['RECORDS']\n comment = []\n for item in record:\n # logger.info(item['id'])\n single_comment = item['comment_content']\n # print(single_comment)\n comment.append(single_comment)\n # break\n # print(comment)\n return comment\n\n\ndef shuffle_comment(comment):\n \"\"\"\n 1.先打乱顺序\n 2.将短文本进行中文冒号分割,提取真正的评论\n :param comment:\n :return:\n \"\"\"\n shuffle(comment)\n real_comment = []\n for item in comment:\n print(item)\n split_list = item.split(u':')\n print(split_list)\n length = len(split_list)\n if length == 1:\n real_comment.append(item)\n elif length > 1:\n flag = False\n for idx in range(1, length):\n if '回复' in split_list[idx]:\n inner_split_list = split_list[idx].split(':')\n print(inner_split_list[-1])\n print('------------------')\n real_comment.append(inner_split_list[-1])\n flag = True\n if not flag:\n \"\"\"\n 如果遍历完成后,都没有回复的词语\n \"\"\"\n real_comment.append(split_list[-1])\n print(real_comment)\n return real_comment\n\n\ndef cut_word(real_comment):\n after_cut_word = []\n for item in real_comment:\n print('----------------->')\n print(item)\n seg_list = list(jieba.cut(item, cut_all=False)) # 精确模式,适合文本分析\n print(seg_list)\n print('----------------->')\n after_cut_word.extend(seg_list)\n # break\n # print(after_cut_word)\n return after_cut_word\n\n\ndef remove_stop_word(after_cut_word):\n \"\"\"\n 去除停用词,暂时还是用的网上通用的停用词(如哈工大),后面看效果可能会构建微博独有的停用词表\n :param after_cut_word:\n :return:\n \"\"\"\n non_empty = [i for i in after_cut_word if i != ' '] # 去除空白符\n stop_word_collection = []\n with open('stop_word/所有停用词.txt', 'r', encoding='utf8') as f:\n for line in f.readlines():\n if line != '\\n':\n stop_word_collection.append(line)\n # print(line)\n after_remove = []\n for word in non_empty:\n if word not in stop_word_collection:\n after_remove.append(word)\n return after_remove\n\n\ndef continue_remove_useless_word(after_remove):\n \"\"\"\n 肉眼发现还是有一些无用词语,要进行是否是汉字的判断\n :param after_remove:\n :return:\n \"\"\"\n final_result = [char for char in after_remove if '\\u4e00' <= char <= '\\u9fff']\n return final_result\n\n\ndef feature(corpus):\n \"\"\"\n sklearn里面的TF-IDF主要用到了两个函数:CountVectorizer()和TfidfTransformer()。\n CountVectorizer是通过fit_transform函数将文本中的词语转换为词频矩阵。\n 矩阵元素weight[i][j] 表示j词在第i个文本下的词频,即各个词语出现的次数。\n 通过get_feature_names()可看到所有文本的关键字,通过toarray()可看到词频矩阵的结果。\n TfidfTransformer也有个fit_transform函数,它的作用是计算tf-idf值。\n \"\"\"\n vectorizer = CountVectorizer() # 将文本中的词语转换为词频矩阵 矩阵元素a[i][j] 表示j词在i类文本下的词频\n transformer = TfidfTransformer() # 该类会统计每个词语的tf-idf权值\n tf_idf = transformer.fit_transform(vectorizer.fit_transform(corpus))\n print('%%%%%%%%%')\n print(tf_idf)\n print('%%%%%%%%%')\n word = vectorizer.get_feature_names() # 获取词袋模型中的所有词语\n print('&&&&&&&&&')\n print(word)\n print('&&&&&&&&&')\n print('$$$$$$$$$')\n weight = tf_idf.toarray() # 将tf-idf矩阵抽取出来,元素w[i][j]表示j词在i类文本中的tf-idf权重\n print(weight)\n print('$$$$$$$$$')\n\n train_x, test_x = train_test_split(tf_idf, test_size=0.2)\n # scores = []\n # for i in range(2, 21):\n # km = KMeans(n_clusters=i)\n # km.fit(train_x)\n # label = km.labels_\n # print(label)\n # print(km.inertia_) # 用来评估簇的个数是否合适,距离越小说明簇分的越好,选择临界点的簇的个数\n # scores.append({-km.score(test_x): i})\n # 确定簇的个数\n # return 19\n\n km = KMeans(n_clusters=19)\n km.fit(train_x)\n order_centroids = km.cluster_centers_.argsort()[:, ::-1]\n terms = vectorizer.get_feature_names()\n print(vectorizer.get_stop_words())\n for i in range(19):\n print(\"Cluster %d:\" % i, end='')\n for ind in order_centroids[i, :10]:\n print(' %s' % terms[ind], end='')\n print()\n\n # sort_score = sorted(scores, key=lambda k: k[0], reverse=True)\n # print(sort_score)\n\n\ndef main():\n comment = extract_native_comment('comment.json')\n real_comment = shuffle_comment(comment)\n after_cut_word = cut_word(real_comment)\n after_remove = remove_stop_word(after_cut_word)\n final_result = continue_remove_useless_word(after_remove)\n feature(final_result)\n # print(final_result)\n # with open('final_result.txt', 'w') as f:\n # f.write(json.dumps(final_result, sort_keys=True, indent=4, ensure_ascii=False))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cluster_0.py","file_name":"cluster_0.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"554907191","text":"import cv2\nimport math\nimport numpy as np\n\nimport MyPackageCommon.Constants as cst\n\n# pt0-> pt1およびpt0-> pt2からの\n# ベクトル間の角度の余弦(コサイン)を算出\ndef angle(pt1, pt2, pt0) -> float:\n dx1 = float(pt1[0,0] - pt0[0,0])\n dy1 = float(pt1[0,1] - pt0[0,1])\n dx2 = float(pt2[0,0] - pt0[0,0])\n dy2 = float(pt2[0,1] - pt0[0,1])\n v = math.sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) )\n return (dx1*dx2 + dy1*dy2)/ v\n\n# 画像上の四角形を検出\ndef findSquares(bin_image, image, cond_area = 1000):\n # 輪郭取得\n contours, _ = cv2.findContours(bin_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n for i, cnt in enumerate(contours):\n # 輪郭の周囲に比例する精度で輪郭を近似する\n arclen = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, arclen*0.02, True)\n\n #四角形の輪郭は、近似後に4つの頂点があります。\n #比較的広い領域が凸状になります。\n\n # 凸性の確認 \n area = abs(cv2.contourArea(approx))\n if approx.shape[0] == 4 and area > cond_area and cv2.isContourConvex(approx) :\n maxCosine = 0\n\n for j in range(2, 5):\n # 辺間の角度の最大コサインを算出\n cosine = abs(angle(approx[j%4], approx[j-2], approx[j-1]))\n maxCosine = max(maxCosine, cosine)\n\n # すべての角度の余弦定理が小さい場合\n #(すべての角度は約90度です)次に、quandrangeを書き込みます\n # 結果のシーケンスへの頂点\n # if maxCosine < 0.7 :\n # 四角判定!!\n rcnt = approx.reshape(-1,2)\n cv2.polylines(image, [rcnt], True, (0,0,255), thickness=2, lineType=cv2.LINE_8)\n # print(rcnt)\n rectCut(image,approx)\n return image\n\ndef rectCut(image,approx):\n dst=[]\n pts1=np.float32(approx)\n pts2 = np.float32([[600, 300], [600, 0], [0, 0], [0, 300]])\n\n M = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(image, M, (600, 300))\n cv2.imwrite(cst.images.detected+\"rectTransformed.png\",dst)\n print(M)\n # print(dst)\n\ndef main():\n targetImage=\"sample_1.png\"\n image = cv2.imread(cst.images.detectSample+targetImage, cv2.IMREAD_COLOR)\n if image is None :\n exit(1)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n _, bw = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n rimage = findSquares(bw, image)\n cv2.imwrite(cst.images.detected+targetImage,image)\n # cv2.imshow('Square Detector', rimage)\n # c = cv2.waitKey()\n # return 0\n\nif __name__ == '__main__':\n main()","sub_path":"MyPackageRectangleDetect/RectangleDetectionSample.py","file_name":"RectangleDetectionSample.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"467780542","text":"import cv2 as cv\nimport time\nimport mediapipe as mp\nimport base64\n\ncap = None\nmp_hands = None\nmp_draw = None\nrunning = None\n\nindexTip, indexDip = (0, 0), (0, 0)\nmiddleTip, middleDip = (0, 0), (0, 0)\nringTip, ringDip = (0, 0), (0, 0)\npinkyTip, pinkyDip = (0, 0), (0, 0)\nthumpTip = (0, 0)\nFinger = None\nhands = None\nprev = None\n\n\ndef initSetup():\n global cap, mp_hands, mp_draw, running, indexDip, indexTip, middleDip, middleTip,ringTip, ringDip\n global pinkyTip, pinkyDip, thumpTip, Finger, hands, prev\n cap = cv.VideoCapture(1, cv.CAP_DSHOW)\n running = True\n\n print('Starting Camera...')\n mp_hands = mp.solutions.hands\n mp_draw = mp.solutions.drawing_utils\n fingerpoint = mp_hands.HandLandmark\n prev = 0\n hands = mp_hands.Hands(min_detection_confidence=0.8, min_tracking_confidence=0.5)\n Finger = {\n 1: '',\n 2: '',\n 3: '',\n 4: ''\n }\n\n indexTip, indexDip = (0, 0), (0, 0)\n middleTip, middleDip = (0, 0), (0, 0)\n ringTip, ringDip = (0, 0), (0, 0)\n pinkyTip, pinkyDip = (0, 0), (0, 0)\n thumpTip = (0, 0)\n\ndef getFrame():\n global cap, mp_hands, mp_draw, running, indexDip, indexTip, middleDip, middleTip, ringTip, ringDip\n global pinkyTip, pinkyDip, thumpTip, Finger, hands, prev\n ret, frame = cap.read()\n image = cv.flip(frame, 1)\n\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n image.flags.writeable = False\n POINTS = hands.process(image)\n image.flags.writeable = True\n image = cv.cvtColor(image, cv.COLOR_RGB2BGR)\n imageHeight, imageWidth, _ = image.shape\n\n\n ctime = time.time()\n fps = 1 / (ctime - prev)\n fps = str(int(fps))\n prev = ctime\n\n #\n cv.putText(image, 'FPS: ' + fps, (20, imageHeight - 50), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))\n if POINTS.multi_hand_landmarks:\n for hand in POINTS.multi_hand_landmarks:\n for point in mp_hands.HandLandmark:\n normalizedLandmark = hand.landmark[point]\n pixelCoordinatesLandmark =mp_draw._normalized_to_pixel_coordinates(normalizedLandmark.x,\n normalizedLandmark.y,\n imageWidth,\n imageHeight)\n if pixelCoordinatesLandmark == None:\n continue\n if point == mp_hands.HandLandmark.INDEX_FINGER_TIP:\n indexTip = pixelCoordinatesLandmark\n\n elif point == mp_hands.HandLandmark.INDEX_FINGER_DIP:\n indexDip = pixelCoordinatesLandmark\n elif point == mp_hands.HandLandmark.MIDDLE_FINGER_TIP:\n middleTip = pixelCoordinatesLandmark\n elif point == mp_hands.HandLandmark.MIDDLE_FINGER_DIP:\n middleDip = pixelCoordinatesLandmark\n elif point == mp_hands.HandLandmark.RING_FINGER_DIP:\n ringDip = pixelCoordinatesLandmark\n elif point == mp_hands.HandLandmark.RING_FINGER_TIP:\n ringTip = pixelCoordinatesLandmark\n elif point == mp_hands.HandLandmark.PINKY_DIP:\n pinkyDip = pixelCoordinatesLandmark\n elif point == mp_hands.HandLandmark.PINKY_TIP:\n pinkyTip = pixelCoordinatesLandmark\n elif point == mp_hands.HandLandmark.THUMB_TIP:\n thumpTip = pixelCoordinatesLandmark\n\n\n\n # if point == fingerpoint.MIDDLE_FINGER_DIP or point == fingerpoint.INDEX_FINGER_TIP:\n\n if indexTip[1] < indexDip[1]:\n Finger[1] = '1'\n elif indexDip[1] < indexTip[1]:\n Finger[1] = '0'\n\n if middleTip[1] < middleDip[1]:\n Finger[2] = '1'\n elif middleDip[1] < middleTip[1]:\n Finger[2] = '0'\n\n if ringTip[1] < ringDip[1]:\n Finger[3] = '1'\n elif ringDip[1] < ringTip[1]:\n Finger[3] = '0'\n\n if pinkyTip[1] < pinkyDip[1]:\n Finger[4] = '1'\n elif pinkyDip[1] < pinkyTip[1]:\n Finger[4] = '0'\n\n cv.putText(image, Finger[1], (20, 50), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))\n cv.putText(image, Finger[2], (50, 50), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))\n cv.putText(image, Finger[3], (80, 50), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))\n cv.putText(image, Finger[4], (110, 50), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))\n cv.putText(image, 'X: '+ str(indexTip[0]), (20, 100), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))\n cv.putText(image, 'Y: '+ str(indexTip[1]), (20, 140), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))\n\n # print(pixelCoordinatesLandmark)\n # print(normalizedLandmark)\n\n mp_draw.draw_landmarks(image, hand,mp_hands.HAND_CONNECTIONS)\n\n #cv.imshow('Hand', image)\n if cv.waitKey(1) == 27:\n running = False\n stop()\n smallImg = cv.resize(image, (200,150))\n ret, imgenc = cv.imencode('.jpg', smallImg)\n imgStr = base64.b64encode(imgenc)\n\n\n return (Finger, indexTip, str(imgStr)[2:-1])\n\ndef toStr(A):\n try:\n f = A[0]\n x,y = A[1]\n I = A[2]\n except:\n return {}\n\n global cap, mp_hands, mp_draw, running, indexDip, indexTip, middleDip, middleTip, ringTip, ringDip\n global pinkyTip, pinkyDip, thumpTip, Finger, hands, prev\n s = ''\n for i in f:\n s += f[i]\n\n d = {\n 'Finger': s,\n 'X': x,\n 'Y': y,\n 'I':I\n }\n return d\n\n\ndef stop():\n global cap, running\n cap.release()\n cv.destroyAllWindows()\n running = False\n\n\n\ndef getFingersValue():\n if running:\n return toStr(getFrame())\n else:\n return False\n\nif __name__ == '__main__':\n initSetup()\n while True:\n print(getFingersValue())","sub_path":"GAME/Camera.py","file_name":"Camera.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"348199323","text":"import turtle\nimport random\nimport random\n\nturtle.tracer(1,0)\n\npoacher = turtle.Turtle()\ntree = turtle.Turtle()\n'''\nturtle.register_shape(\"tree.gif\")\nturtle.register_shape('poacher.gif')\n\npoacher.shape('poacher.gif')\ntree.shape('tree.gif')\n'''\nborder = turtle.clone()\nborder.penup()\nborder.goto(300,300)\nborder.pendown()\nborder.goto(300, -300)\nborder.goto(-300,-300)\nborder.goto(-300, 300)\nborder.goto(300,300)\nborder.hideturtle()\n\nscore = 0\n\nSTEP = 20\n\ndirection = None\n\n#TIME_STEP =\n\nUP = 0\nDOWN = 1\nLEFT = 2\nRIGHT = 3\n\nUP_EDGE = 300\nDOWN_EDGE = -300\nRIGHT_EDGE = 300\nLEFT_EDGE = -300\n\npoacher_pos = None\ntree_pos_list = []\n\ndef W():\n global direction\n global poacher\n global poacher_pos\n direction = UP\n poacher_pos = poacher.pos()\n #move_poacher\n print(\"You pressed W\")\n\ndef S():\n global direction\n global poacher\n global poacher_pos\n direction = DOWN\n poacher_pos = poacher.pos()\n #move_poacher\n print(\"You pressed S\")\n\ndef A():\n global direction\n global poacher\n global poacher_pos\n direction = LEFT\n poacher_pos = poacher.pos()\n #move_poacher\n print(\"You pressed A\")\n\ndef D():\n global direction\n global poacher\n global poacher_pos\n direction = RIGHT\n poacher_pos = poacher.pos()\n #move_poacher\n print(\"You pressed D\")\n\nturtle.onkeypress(W , \"Up\")\nturtle.onkeypress(S , \"Down\")\nturtle.onkeypress(A , \"Left\")\nturtle.onkeypress(D , \"Right\")\n\nturtle.listen()\n\ndef move_poacher():\n global poacher_pos\n \n poacher_pos = poacher.pos()\n x_pos = poacher_pos[0]\n y_pos = poacher_pos[1]\n\n if direction==RIGHT:\n poacher.goto(x_pos + STEP, y_pos)\n print(\"You moved right!\")\n elif direction==LEFT:\n poacher.goto(x_pos - STEP, y_pos)\n print(\"You moved left!\")\n elif direction==DOWN:\n poacher.goto(x_pos, y_pos - STEP)\n print('You moved down!')\n elif direction==UP:\n poacher.goto(x_pos, y_pos+ STEP)\n print('You moved up!')\n\n new_poacher_pos = poacher.pos()\n new_x_pos = poacher_pos[0]\n new_y_pos = poacher_pos[1]\n\n if new_x_pos >= RIGHT_EDGE:\n print(\"You hit the right edge! Game over!\")\n quit()\n elif new_x_pos <= LEFT_EDGE:\n print(\"You hit the left edge! Game over!\")\n quit()\n elif new_y_pos >= UP_EDGE:\n print(\"You hit the up edge! Game over!\")\n quit()\n elif new_y_pos <= DOWN_EDGE:\n print(\"You hit the down edge! Game over!\")\n quit()\n turtle.ontimer(move_poacher, 200)\n\nmove_poacher()\n \n","sub_path":"day9/ final_project.py","file_name":" final_project.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"236559114","text":"# Copyright notice:\n# Copyright Members of the EMI Collaboration, 2013.\n#\n# See www.eu-emi.eu for details on the copyright holders\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom fts3rest.lib.oauth2provider import FTS3OAuth2ResourceProvider\nfrom fts3rest.lib.middleware.fts3auth.credentials import vo_from_fqan, build_vo_from_dn, InvalidCredentials\n\n\ndef do_authentication(credentials, env):\n \"\"\"\n The user will be the one who gave the bearer token\n \"\"\"\n res_provider = FTS3OAuth2ResourceProvider(env)\n authn = res_provider.get_authorization()\n if authn is None:\n return False\n if not authn.is_valid:\n if authn.error == 'access_denied':\n raise InvalidCredentials()\n return False\n\n credentials.dn.append(authn.credentials.dn)\n credentials.user_dn = authn.credentials.dn\n credentials.delegation_id = authn.credentials.dlg_id\n if authn.credentials.voms_attrs:\n for fqan in authn.credentials.voms_attrs.split('\\n'):\n credentials.voms_cred.append(fqan)\n credentials.vos.append(vo_from_fqan(fqan))\n else:\n credentials.vos.append(build_vo_from_dn(credentials.user_dn))\n credentials.method = 'oauth2'\n return True\n","sub_path":"src/fts3rest/fts3rest/lib/middleware/fts3auth/methods/oauth2.py","file_name":"oauth2.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"234145265","text":"# -*- coding: utf-8 -*-\nimport os\nimport time\nimport numpy as np\nimport numbers\nimport io\nimport logging\n\nfrom utility import tidy_xml, setup_logger # pylint: disable=E0401\n\nlogger = logging.getLogger('Excel XML processor')\nsetup_logger(logger)\n\nfrom specification import DataTableSpecification # pylint: disable=E0401\nfrom model import ValueData, MetaData, DataImportError # pylint: disable=E0401\n\njj = os.path.join\n\nclass XmlProcessor:\n '''\n Main class that processes the Excel file and produces a corresponging XML-file.\n The format of the XML-file is conforms to clearinghouse specifications\n '''\n def __init__(self, outstream, level=logging.WARNING):\n self.outstream = outstream\n self.level = level\n self.specification = DataTableSpecification()\n self.ignore_columns = self.specification.ignore_columns\n\n def emit(self, data, indent=0):\n self.outstream.write('{}{}\\n'.format(' ' * indent, data))\n\n def emit_tag(self, tag, attributes=None, indent=0, close=True):\n self.emit('<{} {}{}>'.format(tag, ' '.join([ '{}=\"{}\"'.format(x, y) for (x, y) in (attributes or {}).items() ]), '/' if close else ''), indent)\n\n def emit_close_tag(self, tag, indent):\n self.emit(''.format(tag), indent)\n\n def camel_case_name(self, undescore_name):\n first, *rest = undescore_name.split('_')\n return first + ''.join(word.capitalize() for word in rest)\n\n def process_data(self, data, table_names, max_rows=0):\n '''\n Import assumes that all FK references points to a local \"system_id\" in referenced table\n All data tables MUST have a non null \"system_id\"\n All data tables MUST have a PK column with a name equal to that specified in \"Tables\" meta-data PK-name field\n '''\n date_updated = ''.format(time.strftime(\"%Y-%m-%d %H%M\")) # pylint: disable=E1305\n for table_name in table_names:\n try:\n\n referenced_keyset = set(data.get_referenced_keyset(table_name))\n\n logger.info(\"Processing %s...\", table_name)\n\n data_table = data.DataTables[table_name]\n table_definition = data.MetaData.get_table(table_name)\n pk_name = table_definition['pk_name']\n\n table_namespace = \"com.sead.database.{}\".format(table_definition['java_class'])\n\n if data_table is None:\n continue\n\n self.emit('<{} length=\"{}\">'.format(table_definition['java_class'], data_table.shape[0]), 1) # data_table.length\n # self.emit_tag(table_definition['java_class'], dict(length=data_table.shape[0]), close=False, indent=1)\n\n fields = data.MetaData.table_fields(table_name)\n\n for index, item in data_table.iterrows():\n\n try:\n\n data_row = item.to_dict()\n public_id = data_row[pk_name] if pk_name in data_row else np.NAN\n\n if np.isnan(public_id) and np.isnan(data_row['system_id']):\n logger.warning('Table %s: Skipping row since both CloneId and SystemID is NULL', table_name)\n continue\n\n system_id = int(data_row['system_id'] if not np.isnan(data_row['system_id']) else public_id)\n\n referenced_keyset.discard(system_id)\n\n assert not (np.isnan(public_id) and np.isnan(system_id))\n\n if not np.isnan(public_id):\n public_id = int(public_id)\n self.emit('<{} id=\"{}\" clonedId=\"{}\"/>'.format(table_namespace, system_id, public_id), 2)\n else:\n self.emit('<{} id=\"{}\">'.format(table_namespace, system_id), 2)\n\n for _, item in fields.loc[(~fields.column_name.isin(self.ignore_columns))].iterrows():\n column = item.to_dict()\n column_name = column['column_name']\n is_fk = data.MetaData.is_fk(table_name, column_name)\n is_pk = data.MetaData.is_pk(table_name, column_name)\n class_name = column['class']\n\n # TODO Move to Specification\n if column_name[-3:] == '_id' and not (is_fk or is_pk):\n logger.warning('Table %s, FK? column %s: Column ending with _id not marked as PK/FK', table_name, column_name)\n\n # TODO Move to Specification\n if column_name not in data_row.keys():\n logger.warning('Table %s, FK column %s: META field name not found in DATA', table_name, column_name)\n continue\n\n camel_case_column_name = self.camel_case_name(column_name)\n value = data_row[column_name]\n if not is_fk:\n if is_pk:\n value = int(public_id) if not np.isnan(public_id) else system_id\n elif isinstance(value, numbers.Number) and np.isnan(value):\n value = 'NULL'\n self.emit('<{0} class=\"{1}\">{2}'.format(camel_case_column_name, class_name, value), 3)\n else: # value is a fk system_id\n try:\n\n fk_table_name = data.MetaData.get_tablename_by_classname(class_name)\n if fk_table_name is None:\n logger.warning('Table %s, FK column %s: unable to resolve FK class %s', table_name, column_name, class_name)\n continue\n\n fk_data_table = data.DataTables[fk_table_name]\n\n if np.isnan(value):\n # CHANGE: Cannot allow id=\"NULL\" as foreign key\n # logger.error(\"Warning: table {}, id {} FK {} is NULL. Skipping property!\".format(table_name, system_id, column_name))\n self.emit('<{} class=\"com.sead.database.{}\" id=\"NULL\"/>'.format(camel_case_column_name, class_name), 3)\n continue\n\n fk_system_id = int(value)\n if fk_data_table is None:\n fk_public_id = fk_system_id\n else:\n if column_name not in fk_data_table.columns:\n logger.warning('Table %s, FK column %s: FK column not found in %s, id=%s', table_name, column_name, fk_table_name, fk_system_id)\n continue\n #if 'system_id' not in fk_data_table.columns:\n # logger.error('FATAL ERROR while processing {}. FK table {} has not \"system_id\" column'.format(table_name, fk_table_name))\n fk_data_row = fk_data_table.loc[(fk_data_table.system_id == fk_system_id)]\n if fk_data_row.empty or len(fk_data_row) != 1:\n fk_public_id = fk_system_id\n else:\n fk_public_id = fk_data_row[column_name].iloc[0]\n\n class_name = class_name.split('.')[-1]\n\n if np.isnan(fk_public_id):\n self.emit('<{} class=\"com.sead.database.{}\" id=\"{}\"/>'.format(camel_case_column_name, class_name, fk_system_id), 3)\n else:\n self.emit('<{} class=\"com.sead.database.{}\" id=\"{}\" clonedId=\"{}\"/>'.format(camel_case_column_name, class_name, int(fk_system_id), int(fk_public_id)), 3)\n\n except:\n logger.error('Table %s, id=%s, process failed for column %s', table_name, system_id, column_name)\n raise\n\n # ClonedId tag is always emitted (NULL id missing)\n self.emit('{}'.format('NULL' if np.isnan(public_id) else int(public_id)), 3)\n self.emit('{}'.format(date_updated), 3)\n self.emit(''.format(table_namespace), 2)\n\n if max_rows > 0 and index > max_rows:\n break\n\n except Exception as x:\n logger.error('CRITICAL FAILURE: Table %s %s', table_name, x)\n raise\n\n if len(referenced_keyset) > 0 and max_rows == 0:\n logger.warning('Warning: %s has %s referenced keys not found in data', table_name, len(referenced_keyset))\n class_name = data.MetaData.get_classname_by_tablename(table_name)\n for key in referenced_keyset:\n self.emit(''.format(class_name, int(key), int(key)), 2)\n self.emit(''.format(table_definition['java_class']), 1)\n\n except:\n logger.exception('CRITICAL ERROR')\n raise\n\n def process_lookups(self, data, table_names):\n\n for table_name in table_names:\n\n referenced_keyset = set(data.get_referenced_keyset(table_name))\n\n if len(referenced_keyset) == 0:\n logger.info(\"Skipping %s: not referenced\", table_name)\n continue\n\n class_name = data.MetaData.get_classname_by_tablename(table_name)\n rows = list(map(lambda x: ''.format(class_name, int(x), int(x)), referenced_keyset))\n xml = '<{} length=\"{}\">\\n {}\\n\\n'.format(class_name, len(rows), \"\\n \".join(rows), class_name)\n\n self.emit(xml)\n\n def process(self, data, table_names=None, extra_names=None):\n\n self.specification.is_satisfied_by(data)\n\n if len(self.specification.warnings) > 0:\n logger.info(\"\\n\".join(self.specification.warnings))\n\n if len(self.specification.errors) > 0:\n logger.error(\"\\n\".join(self.specification.errors))\n raise DataImportError(\"Process ABORTED since data does not conform to SPECIFICATION\")\n\n data_tablenames = data.data_tablenames if table_names is None else table_names\n extra_names = set(data.MetaData.tablenames) - set(data.tablenames) if extra_names is None else extra_names\n\n self.emit('')\n self.emit('')\n self.process_lookups(data, extra_names)\n self.process_data(data, data_tablenames)\n self.emit('')\n\ndef process_excel_to_xml(option, basename, timestamp):\n '''\n Reads Excel files and convert content to an CH XML-file.\n Stores data in output_filename and returns filename for a cleaned up version of the XML\n '''\n meta_filename = jj(option['input_folder'], option['meta_filename'])\n data_filename = jj(option['input_folder'], option['data_filename'])\n output_filename = jj(option['output_folder'], '{}_{}.xml'.format(basename, timestamp))\n\n meta_data = MetaData().load(meta_filename)\n\n data = ValueData(meta_data).load(data_filename)\n\n with io.open(output_filename, 'w', encoding='utf8') as outstream:\n service = XmlProcessor(outstream)\n service.process(data, option['table_names'])\n\n tidy_output_filename = tidy_xml(output_filename)\n\n if tidy_output_filename != output_filename:\n os.remove(output_filename)\n\n return tidy_output_filename\n\n","sub_path":"import/parse_excel.py","file_name":"parse_excel.py","file_ext":"py","file_size_in_byte":12424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"606457351","text":"#!/usr/bin/env python2\nimport pygame\nimport game\n\ndef main():\n pygame.init()\n pygame.display.set_caption(\"Arcade CS Games 2016\")\n font = pygame.font.Font('res/font/ps2p.ttf', 32)\n border = pygame.display.set_mode((game.Game.SCREEN_WIDTH, game.Game.SCREEN_HEIGHT)) #, pygame.FULLSCREEN\n\n while True:\n app = game.Game(border, font)\n print(\"Start game\")\n app.run()\n print(\"Game stop\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"448756551","text":"import numpy as np\nimport gym\n\n#Solving nchain using value iteration. Algorithm 4.3 in DMU.\n\ndef main(iterations=5000):\n\n\tenv = gym.make(\"NChain-v0\")\n\tactions = np.array([0,1]) \t\t#0 is fwd, 1 is backwards\n\tstates = np.array([0,1,2,3,4])\n\n\t#Determine optimal policy through value iteration\n\tT,R = exact_T_R_nchain(states=states,actions=actions,\n\t\t\t\t\t\t slip_chance=.2,small_reward=2,large_reward=10)\n\n\tpolicy = value_iteration(T=T,R=R,gamma=.95,tolerance=.0001)\n\n\t#Run\n\trewards = np.zeros(iterations)\n\tfor iteration in range(0,iterations):\n\t\tprint(iteration)\n\t\tstate = env.reset()\t\t\t\n\t\twhile True:\n\t\t\taction = policy[state]\n\t\t\tnext_state, reward, done, _= env.step(action)\n\t\t\trewards[iteration] += reward\n\t\t\tstate = next_state\n\n\t\t\t#Each episode is 1000 game steps long.\n\t\t\tif done: \n\t\t\t\tbreak\n\n\tnp.savez('output/Optimal_output', iterations=iterations, rewards=rewards)\n\n\ndef exact_T_R_nchain(states,actions,slip_chance,small_reward,large_reward):\n\tT = np.zeros((len(states),len(actions),len(states)))\n\tR = np.zeros((len(states),len(actions)))\n\t#Exact tables of state transitions, expected reward for each state, action in nchain.\n\t#T(s'|s,a) == T[s,a,s'] and R(s,a) == R[s,a]. \n\n\t#Chances for resetting\n\tT[:,0,0] = slip_chance\t \n\tT[:,1,0] = 1-slip_chance \n\n\t#Chances for advancing (last state just stays in place)\n\tfor s in states:\n\t\tsp = min(states[-1],s+1)\n\t\tT[s,0,sp] = 1-slip_chance\n\t\tT[s,1,sp] = slip_chance\t \n\n\t#Rewards\n\tR_reset = np.ones(len(states))*small_reward\t\n\tR_advance = np.zeros(len(states))\n\tR_advance[-1] = large_reward\n\n\t#Expectation of rewards. \n\tR[:,0] = (1-slip_chance)*R_advance + slip_chance*R_reset\n\tR[:,1] = (1-slip_chance)*R_reset + slip_chance*R_advance\n\n\treturn T,R\n\ndef value_iteration(T,R,gamma,tolerance):\n\tU = np.zeros(R.shape[0])\n\twhile True:\n\t\t\n\t\t#The way I set up T to be T[s,a,sp] makes this workout\n\t\t#U gets multiplied elementwise in the correct dimension (over each sp slice),\n\t\t#Then sum over that dimension to get sum term.\n\n\t\t#Then we take the max along the second dimension (axis 1) corresponding to the actions.\n\n\t\tV = U.copy()\n\t\tU = np.max( R + gamma*np.sum(T*V,axis=2), axis = 1)\n\n\t\t#Check when to break\n\t\tresidual = max(abs(U-V))\n\t\tif residual\n#Group Name: \n#Class: \n#Date: <16 July 2021>\n#Version: <...>\n#########################################################################\n\n#########################################################################\n#IMPORT Pandas Library for Data Analysis\n#########################################################################\n#import pandas and matplotlib for data analysis\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#import sys to help exit program\nimport sys\n\n#########################################################################\n#IMPORT Pandas Library for Data Analysis\n#########################################################################\n\n\n#########################################################################\n#CLASS Branch - Data Analysis\n#load excel data (CSV format) to dataframe\n#########################################################################\nclass DataAnalysis:\n def __init__(self):\n\n #load excel data (CSV format) to dataframe - 'df'\n dataframe = pd.read_csv('MonthlyVisitors.csv')\n #show specific country dataframe\n sortCountry(dataframe)\n\n\n#########################################################################\n#CLASS Branch: End of Code\n#########################################################################\n\n\n#########################################################################\n#FUNCTION Branch - sortCountry\n#parses data and displays sorted result(s)\n#########################################################################\ndef sortCountry(df):\n\n #print number of rows in dataframe\n print(\"There are \" + str(len(df)) + \" data rows read. \\n\")\n\n #display dataframe (rows and columns)\n print(\"The following dataframe are read as follows: \\n\")\n print(df)\n\n #Replacing the Value of na to 0\n df = df.replace(to_replace=[\" na \", \"na\"], value =\"0\")\n\n #display European countries\n Eur = df.columns[20] + df.columns[21] + df.columns[22] + df.columns[23] + df.columns[24] + df.columns[25] + df.columns[26] + df.columns[27] + df.columns[28] + df.columns[29] + df.columns[30]\n print(\"\\nThese are the countries in Europe:\" + Eur + \". The Europe region from 1997 to 2007 was selected as shown below.\\n\")\n \n #display a sorted dataframe based on Europe\n SortedEur = (df[[\n 'Year', 'Month', ' United Kingdom ', ' Germany ', ' France ',\n ' Italy ', ' Netherlands ', ' Greece ', ' Belgium & Luxembourg ', ' Switzerland ',' Austria ', ' Scandinavia ', ' CIS & Eastern Europe '\n ]][228:360])\n\n print(SortedEur)\n\n #remove year & month from dataframe\n NewEur = SortedEur.drop(columns=[\"Year\", \"Month\"])\n\n #convert the contents into int\n NewEur = NewEur.astype(str).astype(int)\n \n #Total visitors for each country\n TotalEur = NewEur.sum()\n\n #Sort in order\n NewSortedEur = TotalEur.sort_values(ascending = False)\n\n #back to object\n NewSortedEur = NewSortedEur.reset_index()\n\n #adding columns\n NewSortedEur.columns = ['Countries', 'Visitors']\n\n #display top 3 countries in europe\n print(\"\\nThe top 3 countries in Europe that visited Singapore are ranked below.\\n\")\n Top3 = (NewSortedEur.head(3))\n print(Top3)\n\n#Plotting Pie Chart\n visitorss = [4752232, 1735040, 945953]\n Countries = [' United Kingdom ',' Germany ',' Scandinavia ']\n plt.pie(visitorss, labels = Countries , autopct = '%1.1f%%')\n plt.title('Insert Title')\n plt.axis('equal')\n plt.legend(loc=\"lower right\")\n plt.savefig(\"pie-chart.png\", bbox_inches = 'tight')\n\n #Exit program\n exit = input(\"\\nTo end this program press X: \")\n if exit.lower() == \"x\":\n sys.exit(\"Closing Program...\")\n else:\n print(\"Invalid input\")\n \n\n return\n\n\n#########################################################################\n#FUNCTION Branch: End of Code\n#########################################################################\n\n#########################################################################\n#Main Branch\n#########################################################################\nif __name__ == '__main__':\n\n #Project Title\n print('######################################')\n print('# Data Analysis App - PYTHON Project #')\n print('######################################')\n\n #perform data analysis on specific excel (CSV) file\n DataAnalysis()\n\n#########################################################################\n#Main Branch: End of Code\n#########################################################################\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"116452869","text":"from sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\n\nfrom models import Mogujie\n\nengine = create_engine(\"mysql+pymysql://root:123456@127.0.0.1/spider?charset=utf8\", max_overflow=5)\nsession_maker = sessionmaker(bind=engine)\nsession = session_maker()\n\n\ndef save_db(result_list):\n for mogu_dict in result_list:\n mogu = Mogujie()\n mogu.tradeitemid = mogu_dict['tradeItemId']\n mogu.img = mogu_dict['img']\n mogu.itemtype = mogu_dict['itemType']\n mogu.clienturl = mogu_dict['clientUrl']\n mogu.link = mogu_dict['link']\n mogu.itemmarks = mogu_dict['itemMarks']\n mogu.acm = mogu_dict['acm']\n # mogu.title = mogu_dict['title']\n mogu.type = mogu_dict['type']\n mogu.orgprice = mogu_dict['orgPrice']\n mogu.hassimilarity = mogu_dict['hasSimilarity']\n mogu.cfav = mogu_dict['cfav']\n mogu.price = mogu_dict['price']\n mogu.similarityurl = mogu_dict['similarityUrl']\n\n session.add(mogu)\n session.commit()\n","sub_path":"mogujie_spider/sqlalchemy_helper.py","file_name":"sqlalchemy_helper.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"632896169","text":"from app import app, db\nfrom flask import render_template, request, redirect, url_for, flash\nfrom app.models import Post, User, Submit\nfrom flask_login import login_user, logout_user\nimport plotly\nfrom plotly import express as px\nimport plotly.io as pio\n\n\n\nimport pandas as pd\nimport numpy as np\nimport json\n\n\n\n\n@app.route('/')\ndef home():\n air = pd.read_csv(r'listings.csv')\n\n # pio.renderers.default = 'browser'\n fig = px.scatter_mapbox(air, lat=\"latitude\", lon=\"longitude\", hover_name=\"neighbourhood_cleansed\", hover_data=[\n \"room_type\", \"price\"], color=\"neighbourhood_cleansed\", zoom=11, height=400)\n\n fig.update_layout(mapbox_style=\"carto-darkmatter\")\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n\n graphJSON = json.dumps(fig.show(), cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template('index.html', graphJSON=graphJSON)\n\n@app.route('/contact', methods=['GET', 'POST'])\ndef contact():\n if request.method == 'POST':\n s = Submit()\n s.from_dict(request.form)\n db.session.add(s)\n db.session.commit()\n flash('Thank you for your submission!')\n return redirect(url_for('home'))\n return render_template('contact.html')\n\n@app.route('/blog')\ndef blog():\n \n context = {\n 'posts': [p.to_dict() for p in Post.query.all()]\n }\n return render_template('blog.html', **context)\n\n@app.route('/login', methods =['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n user = User.query.filter_by(email=request.form.get('email')).first()\n if user is None or user.check_password(request.form.get('password')) is False:\n print('Something is not right')\n flash('User name and email do not match')\n return redirect(url_for('login'))\n remember_me=True if request.form.get('checked') is not None else False\n login_user(user, remember=remember_me)\n flash('Welcome! You are logged in!')\n return redirect(url_for('home'))\n return render_template('login.html')\n\n\n@app.route('/register', methods=['GET','POST'])\ndef register():\n if request.method == 'POST':\n u = User()\n u.from_dict(request.form)\n u.save()\n flash('Success!')\n return redirect(url_for('login'))\n\n return render_template('register.html')\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"50108687","text":"'''\nCreated on 28 Mar 2011\n\n@author: Simon Bull\n'''\n\nimport time\n\ndef pruneByNeighbours(adjList, timeAllowed, startTime):\n \"\"\"\n\n @param adjList: The adjacency list of the current connected component being examined.\n @type adjList : dictionary\n @param timeAllowed: The number of seconds that the algorithm is allowed to run for.\n @type timeAllowed : float\n @param startTime: The time when the algorithm was started.\n @type startTime : float\n return @type: list, boolean\n return @use : The nodes that need removing from the graph in order to remove all edges, whether the time limit was exceeded.\n\n \"\"\"\n \n removeList = []\n \n while True:\n\n if time.clock() - startTime > timeAllowed:\n return removeList, True\n \n # Determine the number of neighbours for each node.\n neighbours = [len(adjList[k]) for k in adjList.keys()]\n \n # If there are no nodes with neighbours then exit.\n maxNeighbours = max(neighbours)\n if maxNeighbours == 0:\n return removeList, False\n \n # Get the IDs of the nodes with the maximum number of neighbours.\n nodesWithMaxNeighbours = [x for x in range(len(neighbours)) if neighbours[x] == maxNeighbours]\n if len(nodesWithMaxNeighbours) != 1:\n # If there is more than one node with the maximum number of neighbours, then determine which node to remove.\n extendedNeighbourhood = [adjList[x] + [x] for x in nodesWithMaxNeighbours]\n extendedNeighbourhood = [set([x for i in a for x in adjList[i]]) for a in extendedNeighbourhood]\n # Determine the size of each extended neighbourhood, and which nodes have the minimum size.\n sizes = [len(x) for x in extendedNeighbourhood]\n minSize = min(sizes)\n nodesWithMaxNeighbours = [nodesWithMaxNeighbours[x] for x in range(len(sizes)) if sizes[x] == minSize]\n \n toRemove = nodesWithMaxNeighbours[0] \n removeList.append(toRemove) \n # Update the list of neighbours for each node that toRemove is adjacent to.\n for i in adjList[toRemove]:\n adjList[i].remove(toRemove) \n # Update the adjacency list to reflect the removal of toRemove.\n adjList[toRemove] = []\n\n\ndef main(adj, names, timeAllowed):\n \"\"\"Use the BlastCuller heuristic method to calculate an approximation to the maximum independent set.\n\n Returns a list of the proteins to keep and a list of the proteins to cull. The list of proteins to keep only contains the\n names of the proteins in the protein similarity graph that should be kept. If there are any proteins that were not\n included in adj (for example proteins with no neighbours), then these will NOT be included in the list of proteins to keep.\n See the README for a more in depth description of this.\n \n @param adj: A sparsematrix representation of the protein similarity graph\n @type adj : sparsematrix\n @param names: A list of the names of the proteins in adj. Ordered such that the name of the protein represented by node i\n in adj is located at names[i].\n @type names : list\n @param timeAllowed: The maximum number of seconds the algorithm is allowed to run for.\n @type timeAllowed : float\n return @type: list, list, list, list, float, boolean\n return @use : Names of the proteins to cull, names of the proteins from the graph to keep, numerical IDs of the proteins to cull, numerical IDs of the protein from the graph to keep, time taken by the algorithm, whether the algorithm ran out of time or not.\n\n \"\"\"\n\n # Determine the connected components of adj\n subgraphs = adj.connectedcomponents()\n\n # Create an adjacency list for each of the connected components, and record it along with the node ID for each\n # of the nodes in the component.\n subgraphMatrices = []\n for i in subgraphs:\n subSet = sorted(i)\n subMat = adj.takesquare(subSet)\n subMat = subMat.adjList()\n subgraphMatrices.append((subMat, subSet))\n\n startTime = time.clock()\n\n # Determine the IDs of the nodes to keep by running the BlastCuller algorithm, and from this determine the names of\n # the proteins to keep and remove.\n removeNode = []\n nodesToKeep = []\n outOfTime = False\n for i in subgraphMatrices:\n subSetNodes = i[1]\n rem, outOfTime = pruneByNeighbours(i[0], timeAllowed, startTime)\n extendRemove = [subSetNodes[x] for x in range(len(subSetNodes)) if x in rem]\n removeNode.extend(extendRemove)\n extendKeep = [subSetNodes[x] for x in range(len(subSetNodes)) if x not in rem]\n nodesToKeep.extend(extendKeep)\n if outOfTime:\n break\n proteinsToCull = [names[x] for x in removeNode]\n proteinsToKeep = [names[x] for x in nodesToKeep]\n\n return proteinsToCull, proteinsToKeep, removeNode, nodesToKeep, time.clock()-startTime, outOfTime\n","sub_path":"ComparisonCode/CMNeighbourCull.py","file_name":"CMNeighbourCull.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"233135273","text":"\"\"\"\nhttps://www.geeksforgeeks.org/0-1-knapsack-problem-dp-10/\n\n0-1 Knapsack Problem | DP-10\n\nGiven weights and values of n items, \nput these items in a knapsack of capacity W \nto get the maximum total value in the knapsack. \nIn other words, given two integer arrays val[0..n-1] and wt[0..n-1] \nwhich represent values and weights associated with n items respectively. \nAlso given an integer W which represents knapsack capacity, \nfind out the maximum value subset of val[] such that \nsum of the weights of this subset is smaller than or equal to W. \n\nInput:\n weights = [10, 20, 30]\n values = [60, 100, 120]\n capacity = 50\n\nOutput: 220\n\n\nThe approach:\nrecursion with memoization and helper function\nlet j be last item you select in knapsack\nat each recursive call\n choose jth element and add to total value, decrement from capacity OR\n skip jth element\n\ndef knapsack(values,weights,capacity):\n memo = {}\n def compute(last, currCapacity):\n 1. Handle base cases\n A. if last in memo -> return memo[last]\n B. if capacity is 0 -> memo[last] = 0 -> return memo[last]\n C. if capacity < 0 -> return -float(\"inf\")\n 2. Recursive relationship:\n option1 = compute(j-1) + values[j] if currCapacity - weights[j] >=0\n option 2 = compute[j-1]\n memo[last]= max(option1,option2)\n return memo[last]\n\n c=50,t=0\n c=40, v=60 c=50,t=0\n c=20, v=160 c=40,60 c=30,t=100 c=50,0\n c=-10,v=280 c=0, t = 220 c=30,t=100 \n return -inf c=10 , v=180 c=40,60\n\n\"\"\"\n\ndef knapsack(values,weights,capacity):\n memo = {}\n def getValue(last, currCapacity):\n key = str(last) + \"_\" + str(currCapacity)\n if key in memo:\n return memo[key]\n elif currCapacity <= 0:\n memo[key] = 0\n elif last == 0:\n memo[key] = values[last] if (weights[last] str:\n r\"\"\"数字索引转excel地址索引\n :param n: 行号,可以输入字符串形式的数字\n :param m: 列号,同上可以输入str的数字\n :return:\n\n >>> Openpyxl.address(2, 3)\n 'C2'\n \"\"\"\n from openpyxl.utils.cell import get_column_letter\n return f'{get_column_letter(int(m))}{n}'\n\n @staticmethod\n def in_range(cell):\n \"\"\"判断一个单元格所在的合并单元格\n >> in_range(ws['C1'])\n A1:D3\n \"\"\"\n ws = cell.parent\n for rng in ws.merged_cells.ranges:\n if cell.coordinate in rng:\n break\n else: # 如果找不到则返回原值\n rng = cell\n return rng\n\n @staticmethod\n def mcell(cell):\n \"\"\"返回“有效单元格”,即如果输入的是一个合并单元格,会返回该合并单元格左上角的单元格\n 修改左上角单元格的值才是可行、有意义的\n\n 因为跟合并单元格有关,所以 以m前缀 merge\n \"\"\"\n from openpyxl.cell.cell import MergedCell\n if isinstance(cell, MergedCell):\n ws = cell.parent\n xy = Openpyxl.in_range(cell).top[0]\n return ws[Openpyxl.address(*xy)]\n else:\n return cell\n\n @staticmethod\n def celltype(cell):\n \"\"\"\n :param cell: 一个单元格\n :return: 单元格类型\n 0:普通单元格\n 1:合并单元格其他衍生位置\n 2:合并单元格的左上角的位置\n\n TODO 这个函数还是可以看看能不能有更好的实现、提速\n \"\"\"\n from openpyxl.cell.cell import MergedCell\n if isinstance(cell, MergedCell):\n return 1\n elif isinstance(cell.offset(1, 0), MergedCell) or isinstance(cell.offset(0, 1), MergedCell):\n # 这里只能判断可能是合并单元格,具体是不是合并单元格,还要\n rng = Openpyxl.in_range(cell)\n return 2 if hasattr(rng, 'size') else 0\n else:\n return 0\n\n @staticmethod\n def isnone(cell):\n \"\"\"是普通单元格且值为None\n 注意合并单元格的衍生单元格不为None\n \"\"\"\n celltype = Openpyxl.celltype(cell)\n return celltype == 0 and cell.value is None\n\n @staticmethod\n def copy_cell_format(cell, new_cell):\n \"\"\" 单元格全格式复制,需要事先指定好新旧单元格的物理位置\n 参考:https://stackoverflow.com/questions/23332259/copy-cell-style-openpyxl\n \"\"\"\n from copy import copy\n if cell.has_style:\n new_cell.font = copy(cell.font) # 字体\n new_cell.border = copy(cell.border) # 表格线\n new_cell.fill = copy(cell.fill) # 填充色\n new_cell.number_format = copy(cell.number_format) # 数字格式\n new_cell.protection = copy(cell.protection) # 保护?\n new_cell.alignment = copy(cell.alignment) # 对齐格式\n # new_cell.style = cell.style\n # if cell.comment:\n # 这个会引发AttributeError。。。\n # vml = fromstring(self.workbook.vba_archive.read(ws.legacy_drawing))\n # AttributeError: 'NoneType' object has no attribute 'read'\n # new_cell.comment = copy(cell.comment)\n # 就算开了keep_vba可以强制写入了,打开的时候文件可能还是会错\n\n @staticmethod\n def copy_cell(cell, new_cell):\n \"\"\" 单元格全格式、包括值的整体复制\n \"\"\"\n new_cell.value = cell.value\n Openpyxl.copy_cell_format(cell, new_cell)\n\n @classmethod\n def down(cls, cell):\n \"\"\"输入一个单元格,向下移动一格\n 注意其跟offset的区别,如果cell是合并单元格,会跳过自身的衍生单元格\n \"\"\"\n if cls.celltype(cell): # 合并单元格\n rng = cls.in_range(cell)\n return cell.parent.cell(rng.max_row + 1, cell.column)\n else:\n return cell.offset(1, 0)\n\n @classmethod\n def right(cls, cell):\n if cls.celltype(cell):\n rng = cls.in_range(cell)\n return cell.parent.cell(cell.row, rng.max_row + 1)\n else:\n return cell.offset(0, 1)\n\n @classmethod\n def up(cls, cell):\n if cls.celltype(cell):\n rng = cls.in_range(cell)\n return cell.parent.cell(rng.min_row - 1, cell.column)\n else:\n return cell.offset(-1, 0)\n\n @classmethod\n def left(cls, cell):\n if cls.celltype(cell):\n rng = cls.in_range(cell)\n return cell.parent.cell(cell.row, rng.min_row - 1)\n else:\n return cell.offset(0, -1)\n\n @staticmethod\n def copy_worksheet(origin_ws, target_ws):\n \"\"\"跨工作薄时复制表格内容的功能\n openpyxl自带的Workbook.copy_worksheet没法跨工作薄复制,很坑\n \"\"\"\n # 1 取每个单元格的值\n for row in origin_ws:\n for cell in row:\n try:\n Openpyxl.copy_cell(cell, target_ws[cell.coordinate])\n except AttributeError:\n pass\n # 2 合并单元格的处理\n for rng in origin_ws.merged_cells.ranges:\n target_ws.merge_cells(rng.ref)\n # 3 其他表格属性的复制\n # 这个从excel读取过来的时候,是不准的,例如D3可能因为关闭时停留窗口的原因误跑到D103\n # dprint(origin_ws.freeze_panes)\n # target_ws.freeze_panes = origin_ws.freeze_panes\n\n\ndef product(*iterables, order=None, repeat=1):\n \"\"\" 对 itertools 的product扩展orders参数的更高级的product迭代器\n :param order: 假设iterables有n=3个迭代器,则默认 orders=[1, 2, 3] (起始编号1)\n 即标准的product,是按顺序对每个迭代器进行重置、遍历的\n 但是我扩展的这个接口,允许调整每个维度的更新顺序\n 例如设置为 [-2, 1, 3],表示先对第2维降序,然后按第1、3维的方式排序获得各个坐标点\n 注:可以只输入[-2],默认会自动补充维[1, 3]\n\n for x in product('ab', 'cd', 'ef', order=[3, -2, 1]):\n print(x)\n\n ['a', 'd', 'e']\n ['b', 'd', 'e']\n ['a', 'c', 'e']\n ['b', 'c', 'e']\n ['a', 'd', 'f']\n ['b', 'd', 'f']\n ['a', 'c', 'f']\n ['b', 'c', 'f']\n\n TODO 我在想numpy这么牛逼,会不会有等价的功能接口可以实现,我不用重复造轮子?\n \"\"\"\n import itertools, numpy\n\n # 一、标准调用方式\n if order is None:\n for x in itertools.product(*iterables, repeat=repeat):\n yield x\n return\n\n # 二、输入orders参数的调用方式\n # 1 补全orders参数长度\n n = len(iterables)\n for i in range(1, n + 1):\n if not (i in order or -i in order):\n order.append(i)\n if len(order) != n: return ValueError(f'orders参数值有问题 {order}')\n\n # 2 生成新的迭代器组\n new_iterables = [(iterables[i - 1] if i > 0 else reversed(iterables[-i - 1])) for i in order]\n idx = numpy.argsort([abs(i) - 1 for i in order])\n for y in itertools.product(*new_iterables, repeat=repeat):\n yield [y[i] for i in idx]\n\n\nclass Worksheet(openpyxl.worksheet.worksheet.Worksheet):\n \"\"\" 扩展标准的Workshhet功能\n >> wb = openpyxl.load_workbook(filename='高中数学知识树匹配终稿.xlsx', data_only=True)\n >> ws1 = Worksheet(wb['main'])\n >> ws2 = Worksheet(wb['导出'])\n \"\"\"\n\n def __init__(self, ws):\n self.__dict__ = ws.__dict__\n\n def _cells_by_row(self, min_col, min_row, max_col, max_row, values_only=False):\n \"\"\"openpyxl的这个迭代器,遇到合并单元格会有bug\n 所以我把它重新设计一下~~\n \"\"\"\n for row in range(min_row, max_row + 1):\n cells = (self.cell(row=row, column=column) for column in range(min_col, max_col + 1))\n if values_only:\n # yield tuple(cell.value for cell in cells) # 原代码\n yield tuple(getattr(cell, 'value', None) for cell in cells)\n else:\n yield tuple(cells)\n\n def search(self, pattern, min_row=None, max_row=None, min_col=None, max_col=None, order=None, direction=0):\n \"\"\"查找满足pattern正则表达式的单元格\n\n :param pattern: 正则匹配式,可以输入re.complier对象\n 会将每个单元格的值转成str,然后进行字符串规则search匹配\n 支持多层嵌套 ['模块一', '属性1']\n :param direction: 只有在 pattern 为数组的时候有用\n pattern有多组时,会嵌套找单元格\n 每计算出一个条件后,默认取该单元格下方的子区间 axis=0\n 如果不是下方,而是右方,可以设置为1\n 还有奇葩的上方、左方,可以分别设置为2、3\n :param order: 默认None,也就是 [1, 2] 的效果,规律详见product接口\n\n >> wb = openpyxl.load_workbook(filename='2020寒假教材各地区数量统计最新2020.1.1.xlsx')\n >> ws = Worksheet(wb['预算总表'])\n >> ws.search('年段')\n \n \"\"\"\n # 1 定界\n x1, x2 = max(min_row or 1, 1), min(max_row or self.max_row, self.max_row)\n y1, y2 = max(min_col or 1, 1), min(max_col or self.max_column, self.max_column)\n\n # 2 遍历\n if isinstance(pattern, (list, tuple)):\n cel = None\n for p in pattern:\n cel = self.search(p, x1, x2, y1, y2, order)\n if cel:\n # up, down, left, right 找到的单元格四边界\n l, u, r, d = getattr(Openpyxl.in_range(cel), 'bounds', (cel.column, cel.row, cel.column, cel.row))\n if direction == 0:\n x1, y1, y2 = max(x1, d), max(y1, l), min(y2, r)\n elif direction == 1:\n x1, x2, y1 = max(x1, u), min(x2, d), max(y1, r)\n elif direction == 2:\n x2, y1, y2 = min(x2, d), max(y1, l), min(y2, r)\n elif direction == 3:\n x1, x2, y2 = max(x1, u), min(x2, d), min(y2, l)\n else:\n raise ValueError(f'direction参数值错误{direction}')\n else:\n return None\n return cel\n else:\n if isinstance(pattern, str): pattern = re.compile(pattern)\n for x, y in product(range(x1, x2 + 1), range(y1, y2 + 1), order=order):\n cell = self.cell(x, y)\n if Openpyxl.celltype(cell) == 1: continue # 过滤掉合并单元格位置\n if pattern.search(str(cell.value)): return cell # 返回满足条件的第一个值\n\n findcel = search\n\n def findrow(self, pattern, *args, **kwargs):\n cel = self.findcel(pattern, *args, **kwargs)\n return cel.row if cel else 0\n\n def findcol(self, pattern, *args, **kwargs):\n cel = self.findcel(pattern, *args, **kwargs)\n return cel.column if cel else 0\n\n def chrome(self):\n \"\"\"注意,这里会去除掉合并单元格\"\"\"\n chrome(pd.DataFrame(self.values))\n\n def select_columns(self, columns, column_name='searchkey'):\n r\"\"\"获取表中columns属性列的值,返回dataframe数据类型\n\n :param columns: 搜索列名使用正则re.search字符串匹配查找\n 可以单列:'attr1',找到列头后,会一直往后取到最后一个非空值\n 也可以多列: ['attr1', 'attr2', 'attr3']\n 会结合多个列标题定位,数据从最大的起始行号开始取,\n (TODO 截止到最末非空值所在行 未实现,先用openpyxl自带的max_row判断,不过这个有时会判断过大)\n 遇到合并单元格,会寻找其母单元格的值填充\n :param column_name: 返回的df。列名\n origin,原始的列名\n searchkey,搜索时用的查找名\n \"\"\"\n if not isinstance(columns, (list, tuple)):\n columns = [columns]\n\n # 1 找到所有标题位置,定位起始行\n cels, names, start_line = [], [], -1\n for search_name in columns:\n cel = self.findcel(search_name)\n if cel:\n cels.append(cel)\n if column_name == 'searchkey':\n names.append(str(search_name))\n elif column_name == 'origin':\n if isinstance(search_name, (list, tuple)) and len(search_name) > 1:\n names.append('/'.join(list(search_name[:-1]) + [str(cel.value)]))\n else:\n names.append(str(cel.value))\n else:\n raise ValueError(f'{column_name}')\n start_line = max(start_line, Openpyxl.down(cel).row)\n else:\n dprint(search_name) # 找不到指定列\n\n # 2 获得每列的数据\n datas = {}\n for k, cel in enumerate(cels):\n if cel:\n col = cel.column\n li = []\n for i in range(start_line, self.max_row + 1):\n v = Openpyxl.mcell(self.cell(i, col)).value # 注意合并单元格的取值\n li.append(v)\n datas[names[k]] = li\n else:\n # 如果没找到列,设一个空列\n datas[names[k]] = [None] * (self.max_row + 1 - start_line)\n df = pd.DataFrame(datas)\n\n # 3 去除所有空行数据\n df.dropna(how='all', inplace=True)\n\n return df\n\n def copy_range(self, cell_range, rows=0, cols=0):\n \"\"\" 同表格内的 range 复制操作\n Copy a cell range by the number of rows and/or columns:\n down if rows > 0 and up if rows < 0\n right if cols > 0 and left if cols < 0\n Existing cells will be overwritten.\n Formulae and references will not be updated.\n \"\"\"\n from openpyxl.worksheet.cell_range import CellRange\n from itertools import product\n # 1 预处理\n if isinstance(cell_range, str):\n cell_range = CellRange(cell_range)\n if not isinstance(cell_range, CellRange):\n raise ValueError(\"Only CellRange objects can be copied\")\n if not rows and not cols:\n return\n min_col, min_row, max_col, max_row = cell_range.bounds\n # 2 注意拷贝顺序跟移动方向是有关系的,要防止被误覆盖,复制了新的值,而非原始值\n r = sorted(range(min_row, max_row + 1), reverse=rows > 0)\n c = sorted(range(min_col, max_col + 1), reverse=cols > 0)\n for row, column in product(r, c):\n Openpyxl.copy_cell(self.cell(row, column), self.cell(row + rows, column + cols))\n\n def reindex_columns(self, orders):\n \"\"\" 重新排列表格的列顺序\n >> ws.reindex_columns('I,J,A,,,G,B,C,D,F,E,H,,,K'.split(','))\n\n TODO 支持含合并单元格的整体移动?\n \"\"\"\n from openpyxl.utils.cell import column_index_from_string\n max_row, max_column = self.max_row, self.max_column\n for j, col in enumerate(orders, 1):\n if not col: continue\n self.copy_range(f'{col}1:{col}{max_row}', cols=max_column + j - column_index_from_string(col))\n self.delete_cols(1, max_column)\n\n\ndef adjust_sheets(wb, new_sheetnames):\n \"\"\" 按照 new_sheetnames 的清单重新调整sheets\n 在清单里的按顺序罗列\n 不在清单里的表格删除\n 不能出现wb原本没有的表格名\n \"\"\"\n for name in set(wb.sheetnames) - set(new_sheetnames):\n # 最好调用标准的remove接口删除sheet\n # 不然虽然能表面上看也能删除sheet,但会有命名空间的一些冗余信息留下\n wb.remove(wb[name])\n wb._sheets = [wb[name] for name in new_sheetnames]\n return wb\n\n\ndef demo_openpyxl():\n # 一、新建一个工作薄\n from openpyxl import Workbook\n wb = Workbook()\n\n # 取一个工作表\n ws = wb.active # wb['Sheet'],取已知名称、下标的表格,excel不区分大小写,这里索引区分大小写\n\n # 1 索引单元格的两种方法,及可以用.value获取值\n ws['A2'] = '123'\n dprint(ws.cell(2, 1).value) # 123\n\n # 2 合并单元格\n ws.merge_cells('A1:C2')\n dprint(ws['A1'].value) # None,会把原来A2的内容清除\n\n # print(ws['A2'].value) # AttributeError: 'MergedCell' object has no attribute 'value'\n\n # ws.unmerge_cells('A1:A3') # ValueError: list.remove(x): x not in list,必须标记完整的合并单元格区域,否则会报错\n ws['A1'].value = '模块一'\n ws['A3'].value = '属性1'\n ws['B3'].value = '属性2'\n ws['C3'].value = '属性3'\n\n ws.merge_cells('D1:E2')\n ws['D1'].value = '模块二'\n ws['D3'].value = '属性1'\n ws['E3'].value = '属性2'\n\n dprint(ws['A1'].offset(1, 0).coordinate) # A2\n dprint(Openpyxl.down(ws['A1']).coordinate) # A3\n\n # 3 设置单元格样式、格式\n from openpyxl.comments import Comment\n cell = ws['A3']\n cell.font = Font(name='Courier', size=36)\n cell.comment = Comment(text=\"A comment\", author=\"Author's Name\")\n from openpyxl.styles.colors import RED\n\n styles = [['Number formats', 'Comma', 'Comma [0]', 'Currency', 'Currency [0]', 'Percent'],\n ['Informative', 'Calculation', 'Total', 'Note', 'Warning Text', 'Explanatory Text'],\n ['Text styles', 'Title', 'Headline 1', 'Headline 2', 'Headline 3', 'Headline 4', 'Hyperlink',\n 'Followed Hyperlink', 'Linked Cell'],\n ['Comparisons', 'Input', 'Output', 'Check Cell', 'Good', 'Bad', 'Neutral'],\n ['Highlights', 'Accent1', '20 % - Accent1', '40 % - Accent1', '60 % - Accent1', 'Accent2', 'Accent3',\n 'Accent4', 'Accent5', 'Accent6', 'Pandas']]\n for i, name in enumerate(styles, start=4):\n ws.cell(i, 1, name[0])\n for j, v in enumerate(name[1:], start=2):\n ws.cell(i, j, v)\n ws.cell(i, j).style = v\n\n # 二、测试一些功能\n ws = Worksheet(ws)\n\n dprint(ws.search('模块二').coordinate) # D1\n dprint(ws.search(['模块二', '属性1']).coordinate) # D3\n\n dprint(ws.findcol(['模块一', '属性1'], direction=1)) # 0\n\n wb.save(\"demo_openpyxl.xlsx\")\n","sub_path":"pyxllib/util/excellib.py","file_name":"excellib.py","file_ext":"py","file_size_in_byte":19377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"141536049","text":"#!/usr/bin/python\nimport sys\nimport re\n\nprevious = None\ntotsum = 0\n\nfor line in sys.stdin: # read the each line in the file from command prompt\n\tkey, value = line.split('\\t') # separate keys and values by using \\t to separate\n\tif key != previous: # if the key is different, then put this key as previous and set the totsum to zero\n\t\tif previous is not None: # if the previous has a key (diffrent key), then print it\n\t\t\tprint (previous + '\\t' + str(totsum))\n\t\tprevious = key\n\t\ttotsum = 0\n\ttotsum = totsum + int(value) # if the keys are same, then increase the total sum by 1\nprint (previous + '\\t' + str(totsum)) # print the previous key, the total sum for the last case (after it traverse through the line)\n","sub_path":"treducer.py","file_name":"treducer.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"495205226","text":"# -*- coding: utf-8 -*-\nimport logging\nimport configparser\nimport urllib3\n\nconfig = configparser.ConfigParser()\nconfig.read('etc/config.ini')\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=config['log']['log.file'],\n filemode='w')\nlogger = logging.getLogger(__file__)\n\nclass PttWrapper:\n \"\"\"\n A urllib wrapper to send request to ptt\n \"\"\"\n def __init__(self):\n self.protocol = config['ptt']['ptt.protocol']\n self.url_prefix = config['ptt']['ptt.url.prefix']\n self.headers = {\n 'Content-type': 'Accept text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'User-Agent': 'ptt-rocks',\n 'Cookie': 'over18=1', }\n\n # oldest index: 1\n # newest index: '' (blank)\n def get_article_list(self, board, index):\n http = urllib3.PoolManager()\n try:\n req = http.request(\n 'GET',\n '{0}://{1}/{2}/index{3}.html'.format(\n self.protocol, \n self.url_prefix, \n board, \n index), \n headers=self.headers)\n return req.data\n except Exception as e: \n logger.error(str(e))\n return ''\n\n def get_article(self, board, article_id): \n http = urllib3.PoolManager()\n try:\n req = http.request(\n 'GET',\n '{0}://{1}/{2}/{3}.html'.format(\n self.protocol, \n self.url_prefix, \n board, \n article_id), \n headers=self.headers)\n return req.data\n except Exception as e: \n logger.error(str(e))\n return ''\n\n","sub_path":"web_crawler/utils/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"622440393","text":"\"\"\"\nCreated on May 2, 2013\n\nThis script contains all the useful sidefunctions for studying\nthe Maximum Weighted Spanning Tree problem in the Combinatorial\nIdentification Setting.\n\n@author: ngurnani\n\"\"\"\n\nimport os\nos.getcwd()\nos.chdir('/Users/ngurnani/Dropbox/Senior/Thesis/Code')\nfrom math import*\nfrom pandas import DataFrame as df\nimport networkx as nx\nimport time\nimport xlwt\nfrom scipy.stats import bernoulli\n\n\n\"\"\"\nSimple function to copy list to excel spreadsheet\n\"\"\"\ndef copyToExcel(result):\n book = xlwt.Workbook()\n sheet1 = book.add_sheet('sheet1')\n \n for i,e in enumerate(result):\n sheet1.write(i,1,e)\n \n name = \"random.xls\"\n book.save(name)\n \n\"\"\" \nFunction caclculates sample Max Weighted Bipartite Matching\nNOT FINISHED YET - ngurnani (05/02/13) \n\"\"\" \ndef calcMaxBip(V,empirical_means):\n # Every time a new complete bipartite graph is constructed here.\n G=nx.complete_bipartite_graph(V,V) #N.B. Indexing from 0 to K-1 \n \n edges=list(G.edges_iter())\n for (u,v) in edges:\n # assign negative weights so that MST calculates MaxST\n G.add_edge(u,v,weight=-(empirical_means[edges.index((u,v))])) \n \n T = nx.max_weight_matching(G)\n temp=T.edges()\n output=[0]*len(temp)\n for indx in range(len(output)):\n output[indx]=edges.index(temp[indx])\n \n return output # Indices are from 0 to K-1 \n \n\"\"\"\nFunction runs Kruskal to calculate sample Max Weighted Spanning Tree\nNOTE: BE CAREFUL AS TO HOW EMPIRICAL MEANS ARE SYSTEMATICALLY ASSIGNED\n\"\"\" \ndef calcMaxST(V,empirical_means):\n # Every time a new complete graph is constructed here.\n G=nx.complete_graph(V) #N.B. Indexing from 0 to K-1 \n \n edges=list(G.edges_iter())\n for (u,v) in edges:\n # assign negative weights so that MST calculates MaxST\n G.add_edge(u,v,weight=-(empirical_means[edges.index((u,v))])) \n \n T = nx.minimum_spanning_tree(G)\n temp=T.edges()\n output=[0]*len(temp)\n for indx in range(len(output)):\n output[indx]=edges.index(temp[indx])\n \n return output # Indices are from 0 to K-1 \n\n\"\"\"\nFunction orders the empirical means into two sets those belonging to T^ and ~T^ and returns ordered Arms\n\"\"\"\ndef orderT_Arm(sample_T,empirical_means):\n \n temp1 = [empirical_means[i] for i in sample_T]\n dict1 = dict(zip(sample_T,temp1)) \n order1 = list(sorted(dict1,key=dict1.__getitem__,reverse=True))\n\n indx_empirical_means=list(xrange(len(empirical_means)))\n [indx_empirical_means.remove(j) for j in sample_T]\n temp2= [empirical_means[i] for i in indx_empirical_means]\n dict2= dict(zip(indx_empirical_means,temp2))\n order2= list(sorted(dict2,key=dict2.__getitem__,reverse=True)) \n \n return order1 + order2\n\n\"\"\"\nFunction orders the empirical means into two sets those belonging to T^ and ~T^\n\"\"\"\ndef orderT(sample_T,empirical_means):\n temp1 = [empirical_means[i] for i in sample_T]\n order1 = sorted(temp1,reverse=True)\n\n indx_empirical_means=list(xrange(len(empirical_means)))\n [indx_empirical_means.remove(j) for j in sample_T] # CHECK THIS LINE STILL WORKS\n temp2= [empirical_means[i] for i in indx_empirical_means]\n order2 = sorted(temp2,reverse=True)\n \n return order1 + order2\n \n\"\"\"\nReturns a list of Bernoulli parameters corresponding to experiment inputted\n\"\"\"\ndef oracle_means(experiment):\n if experiment==1:\n return [0.5] + [0.4]*20 # V = 7, K = 21\n elif experiment==2:\n return [0.5] + [0.42]*5 + [0.38]*15 # V = 7, K = 21\n elif experiment==3:\n return [0.5, 0.49743427, 0.4930656,0.48125839, 0.449347, 0.3631] # V = 4, K = 6\n elif experiment==4:\n return [0.5,0.42,0.4,0.4,0.35,0.35] # V = 4, K = 6\n elif experiment==5:\n return [0.5] + [(0.5-(0.025*j)) for j in range(2,16)] # V = 6, K = 15\n elif experiment==6:\n return [0.5] + [0.45]*7 + [0.43]*14 + [0.38]*14 # V = 9, K = 36\n\n\"\"\"\nReturns a list of edges corresponding to experiment inputted\n\"\"\"\ndef oracle_set(experiment):\n if experiment==1:\n return [0,1,2,3,4,5] # V = 7, K = 21\n elif experiment==2:\n return [0,1,2,3,4,5] # V = 7, K = 21\n elif experiment==3:\n return [0,1,2] # V = 4, K = 6\n elif experiment==4:\n return [0,1,2] # V = 4, K = 6\n elif experiment==5:\n return [0,1,2,3,4] # V = 6, K = 15\n elif experiment==6:\n return [0,1,2,3,4,5,6,7] # V = 9, K = 36\n \n\"\"\"\nFunction calculates the value of log_K\n\"\"\"\ndef log_SAR(K):\n extra_sum=0\n for j in range(2,K+1):\n extra_sum = extra_sum + (1.0/j)\n log_K=0.5+extra_sum\n return log_K\n\n\"\"\"\nFunction calculates the number of rounds of SAR\n\"\"\"\ndef rounds_SAR(n,K,alpha):\n if alpha==0:\n return 0.0\n a = 1.0/log_SAR(K)\n b = float((n-K))/((K+1)-alpha)\n return ceil(a*b) \n \n\"\"\"\nCalculates hardness measure (as defined in Bubeck et. al 2013) for given gaps\n\"\"\"\ndef hardness(empirical_gaps):\n runsum = 0.0\n for val in empirical_gaps:\n if val!= 0.0:\n runsum = runsum + (1/(val*val)) \n return runsum\n ","sub_path":"MaxST_sidefunctions.py","file_name":"MaxST_sidefunctions.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"557430968","text":"from sys import exit\nL = int(input())\nn = int(input())\nx = sorted([int(n) for n in input().split()])\nmiddle = L/2\nd = 10**8\n\nfor e in x:\n tmp = abs(middle -e)\n if d > tmp:\n d = tmp\n minans = e\n else:\n break\nprint(min(L - minans, minans))\nprint(max(L - x[0], L- x[-1]))\n","sub_path":"ant/1/ant.py","file_name":"ant.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"433095048","text":"import random\n\nclass KIRebecca:\n\tname=\"Rebecca\"\n\tllimit=0\n\tulimit=1000\n\t\n\tdef sayHello(self):\n\t\tprint(\"Hello from Rebecca.\")\n\n\tdef minMax(self, min, max):\n\t\tself.llimit = min\n\t\tself.ulimit = max\n\t\t\n\tdef zahl(self):\n\n\t\ttry:\n\t\t\tnextValue = random.randint(self.llimit, self.ulimit)\n\t\t\treturn nextValue\n\t\texcept ValueError as valErr:\n\t\t\treturn self.llimit\n\n\n\t\treturn\n\t\t\n\tdef result(self,pickedNbr,result):\n\t\tprint(pickedNbr,result)\n\t\tif(result == 0):\n\t\t\tprint(\"Juhu, ich habe gewonnen! \" + str(pickedNbr) + \" war die Zahl.\")\n\t\telif(result == -1):\n\t\t\tprint(\"Oh nein, meine Zahl \" + str(pickedNbr) + \" war zu klein.\")\n\t\t\tself.llimit= pickedNbr + 1\n\t\t\tprint(\"Die neue untere Grenze ist \" + str(self.llimit))\n\t\telif(result == 1):\n\t\t\tprint(\"Oh nein, meine Zahl \" + str(pickedNbr) + \" war zu groß.\")\n\t\t\tself.ulimit= pickedNbr - 1\n\t\t\tprint(\"Die neue obere Grenze ist \" + str(self.ulimit))\n\t\telse:\n\t\t\tprint(\"Da ist etwas schiefgelaufen...\")","sub_path":"KIRebecca.py","file_name":"KIRebecca.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"201821753","text":"#!/usr/bin/env python3\n\nimport argparse\nimport errno\nimport hashlib\nimport os\nimport random\nimport shutil\nimport signal\nimport string\nimport stat\nimport subprocess\nimport sys\nimport traceback\nfrom contextlib import contextmanager, suppress\n\n\n# Student binary (FUSE driver)\nFUSE_BIN = './sfs'\n\n# SFS filesystem tools (create and inspect images)\nMKFS = './mkfs.sfs'\nFSCK = './fsck.sfs'\n\n# Maximum runtime per test in seconds.\nTIMEOUT = 30\n\n# Global state - set by one (or more) test and used later to subtract points\ng_compiler_warnings = None\n\ng_rand_seed = None\ng_debug_testerror = False\n\n\ndef fs_tests():\n return [\n TestGroup('Valid submission', 'compile', 1.0,\n Test('Make', check_compile),\n stop_if_fail=True,\n ),\n TestGroup('Compiler warnings', '', -1,\n Test('No warnings', check_warnings),\n ),\n TestGroup('Listing rootdir', 'ls', 0.5,\n Test('Files', test_list_root_files),\n Test('Directories', test_list_root_dirs),\n Test('Full', test_list_root_full),\n Test('Random', test_list_root_random),\n stop_if_fail=True,\n ),\n TestGroup('Reading file from rootdir', 'read', 1.5,\n Test('Small file',\n test_read_root('Hello world!\\n%s\\n' % randstr(8, 12))),\n Test('Large file', test_read_root(randstr(4096 * 3, 4096 * 4))),\n Test('Binary data', test_read_root('%s\\0%s' % (randbin(512, 1024),\n randbin(256, 512)))),\n Test('Reading non-existing file', test_read_noexist_root),\n Test('Reading from offset in large file', test_read_offset_root),\n ),\n TestGroup('Subdirecties', 'subdir', 1.0,\n Test('Listing 1 level', test_list_subdir_1),\n Test('Listing n levels', test_list_subdir_n),\n Test('Listing non-existing dir', test_list_subdir_noexist),\n Test('Read file 1 level', test_read_subdir_1),\n Test('Read file n levels', test_read_subdir_n),\n Test('Read non-existing file in subdir', test_read_subdir_noexist),\n ),\n TestGroup('Creating directories', 'mkdir', 1.0,\n Test('Create in root', test_mkdir_1),\n Test('Create nested', test_mkdir_n),\n Test('Create too long name', test_mkdir_toolong),\n ),\n TestGroup('Removing directories', 'rmdir', 1.0,\n Test('Remove from root', test_rmdir_root),\n Test('Remove tree', test_rmdir_tree),\n Test('Remove non-empty directory', test_rmdir_nonempty),\n ),\n TestGroup('Removing files', 'rm', 1.0,\n Test('Remove empty file', test_rm_empty),\n Test('Remove small file', test_rm_small),\n Test('Remove large file', test_rm_large),\n Test('Remove from subdir', test_rm_subdir),\n ),\n TestGroup('Creating files', 'create', 1.0,\n Test('Create in root', test_create_root),\n Test('Create in subdirs', test_create_subdir),\n Test('Create too long name', test_create_toolong),\n ),\n TestGroup('Truncating files', 'truncate', 1.5,\n Test('Grow a file', test_truncate_grow),\n Test('Shrink a file', test_truncate_shrink),\n ),\n TestGroup('Writing files', 'write', 2.0,\n Test('Simple', test_write_simple),\n Test('Multi-block', test_write_multiblock),\n Test('Subset', test_write_subset),\n Test('Offset', test_write_offset),\n ),\n ]\n\n\nclass TestError(Exception):\n pass\n\n\nclass Test():\n \"\"\"A single test case, with a name and a function to execute).\"\"\"\n def __init__(self, name, func, stop_group_on_fail=False,\n stop_all_on_fail=False):\n self.name, self.func = name, func\n self.stop_group_on_fail = stop_group_on_fail\n self.stop_all_on_fail = stop_all_on_fail\n\n\nclass TestGroup():\n \"\"\"Collection of test cases, which are together worth n points. A testgroup\n is usually a single point in the grade scheme, and individual test cases\n award an (equal) fraction of those points when passed.\"\"\"\n\n def __init__(self, fullname, codename, points, *tests, stop_if_fail=False):\n self.fullname = fullname\n self.codename = codename\n self.points = float(points)\n self.tests = tests\n self.stop_if_fail = stop_if_fail\n\n\n def run_tests(self, output):\n succeeded = 0\n for test in self.tests:\n output.write('\\t' + test.name, end=': ')\n try:\n test.func()\n except TestError as e:\n output.write('FAIL', color='red')\n output.write(e.args[0])\n if g_debug_testerror:\n output.write_traceback()\n output.write('Image used for test (if any) preserved, see '\n '_checker.img', bold=True)\n sys.exit(1)\n if test.stop_all_on_fail:\n self.stop_if_fail = True\n if self.stop_if_fail or test.stop_group_on_fail:\n break\n else:\n output.write('OK', color='green')\n succeeded += 1\n\n self.last_run_had_failing_tests = succeeded != len(self.tests)\n return succeeded\n\n\n def run(self, output):\n output.write(self.fullname, color='blue', bold=True, end='')\n if self.codename:\n output.write(' (%s)' % self.codename, color='gray', end='')\n output.write()\n\n succeeded = self.run_tests(output)\n\n perc = ((1. * succeeded) / len(self.tests))\n if self.points < 0:\n perc = 1 - perc\n points = round(self.points * perc, 2)\n\n if self.points > 0:\n output.write(' Passed %d/%d tests, %.2f/%.2f points'\n % (succeeded, len(self.tests), points, self.points))\n else:\n if perc > 0:\n output.write(' Failed, subtracting %.2f points' % abs(points))\n\n return points\n\n\ndef test_groups(groups, output):\n points = 0.0\n for group in groups:\n points += group.run(output)\n\n if group.stop_if_fail and group.last_run_had_failing_tests:\n break\n\n return points\n\n\ndef full_run(output):\n points = test_groups(fs_tests(), output)\n totalpoints = sum(g.points for g in fs_tests() if g.points > 0)\n\n output.write()\n output.write('Executed all tests, got %.2f/%.2f points in total' % (points,\n totalpoints))\n\n return points\n\n\ndef partial_run(tests, output):\n all_tests = fs_tests()\n testmap = {g.codename: g for g in all_tests if g.codename}\n\n points = 0.0\n for test in tests:\n if test not in testmap:\n output.write('Error: ', color='red', end='')\n output.write('Unknown test \"%s\". Valid options are: %s'\n % (test, ', '.join(testmap.keys())))\n break\n group = testmap[test]\n if group.codename and group.codename in tests:\n points += group.run(output)\n return points\n\n\nclass Output:\n def __init__(self, enable_color=True, outfile=sys.stdout):\n self.enable_color = enable_color\n self.outfile = outfile\n\n\n def write(self, text='', end='\\n', color=None, bold=False, underline=False,\n blink=False, hilight=False):\n\n if self.enable_color and any((color, bold, underline, blink, hilight)):\n text = self.colorize_shell(text, color=color, bold=bold,\n underline=underline, blink=blink, hilight=hilight)\n\n print(text, end=end, file=self.outfile)\n\n def write_traceback(self):\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = traceback.format_exception(exc_type, exc_value, exc_traceback)\n self.write(''.join(tb), end='')\n\n\n def colorize_shell(self, val, color=None, bold=False, underline=False,\n blink=False, hilight=False):\n C_RESET = '\\033[0m'\n C_BOLD = '\\033[1m'\n C_UNDERLINE = '\\033[4m'\n C_BLINK = '\\033[5m'\n C_HILIGHT = '\\033[7m'\n C_GRAY = '\\033[90m'\n C_RED = '\\033[91m'\n C_GREEN = '\\033[92m'\n C_YELLOW = '\\033[93m'\n C_BLUE = '\\033[94m'\n C_PINK = '\\033[95m'\n C_CYAN = '\\033[96m'\n\n codes = ''\n if bold: codes += C_BOLD\n if underline: codes += C_UNDERLINE\n if blink: codes += C_BLINK\n if hilight: codes += C_HILIGHT\n if color:\n codes += {'gray': C_GRAY,\n 'red': C_RED,\n 'green': C_GREEN,\n 'yellow': C_YELLOW,\n 'blue': C_BLUE,\n 'pink': C_PINK,\n 'cyan': C_CYAN}[color]\n\n return '%s%s%s' % (codes, val, C_RESET)\n\n\ndef get_printable(data, maxlen=None):\n # don't consider whitespace as printable\n printable_chars = string.ascii_letters + string.digits + string.punctuation\n if isinstance(data, bytes):\n printable_chars = printable_chars.encode('utf-8')\n if maxlen:\n data = data[:maxlen]\n if all(c in printable_chars for c in data):\n return data\n else:\n return repr(data)\n\n\ndef run_cmd(args, allow_err=False):\n proc = subprocess.Popen(args, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, universal_newlines=True)\n try:\n out, err = proc.communicate(timeout=TIMEOUT)\n except subprocess.TimeoutExpired:\n proc.kill()\n out, err = proc.communicate()\n err += 'Timeout of %d seconds expired for command \"%s\"' % TIMEOUT\n\n if proc.returncode and not allow_err:\n raise TestError('Command returned non-zero value.\\n' +\n 'Command: %s\\nReturn code: %d\\nstdout: %s\\nstderr: %s' %\n (' '.join(args), proc.returncode, out, err))\n\n if allow_err:\n return proc.returncode, out, err\n else:\n return out, err\n\n\ndef randstr(minlength, maxlength=None):\n length = random.randrange(minlength, maxlength or minlength + 1)\n return ''.join(random.choice(string.ascii_lowercase) for i in range(length))\n\n\ndef randbin(minlength, maxlength=None):\n length = random.randrange(minlength, maxlength or minlength)\n return ''.join(chr(random.randrange(0, 256)) for i in range(length))\n\n\ndef randpath(depth=0, minpartlen=3, maxpartlen=6, is_dir=False, avoid=None):\n avoid = avoid or []\n if not isinstance(avoid, (list, tuple, dict)):\n avoid = [avoid]\n avoid = [fname.strip('/') for fname in avoid]\n\n if maxpartlen < minpartlen:\n maxpartlen = minpartlen\n\n while True:\n path = '/' + '/'.join(randstr(minpartlen, maxpartlen + 1)\n for _ in range(depth + 1))\n if is_dir:\n path = path + '/'\n if path.strip('/') not in avoid:\n return path\n\n\ndef generate_random_contents(num_files=5, num_dirs=3, max_depth=3, avoid=None,\n prefix_dir='/'):\n conts = []\n\n for _ in range(num_files):\n depth = random.randrange(0, max_depth + 1)\n fname = os.path.join(prefix_dir, randpath(depth=depth).lstrip('/'))\n fconts = randstr(10, 100)\n conts.append((fname, fconts))\n\n for _ in range(num_dirs):\n depth = random.randrange(0, max_depth + 1)\n dname = os.path.join(prefix_dir,\n randpath(depth=depth, is_dir=True).lstrip('/'))\n conts.append(dname)\n\n return conts\n\n\ndef check_list_eq(ref_list, test_list, item_name='entry', container='image'):\n for entry in ref_list:\n if entry not in test_list:\n raise TestError('Expected {item_name} {entry} not found in '\n '{container} (got list {test_list})'.format(**locals()))\n\n for entry in test_list:\n if entry not in ref_list:\n raise TestError('Unexpected {item_name} {entry} found in '\n '{container} (expected: {ref_list})'.format(**locals()))\n\n\ndef is_in_dir(checkpath, dirname):\n if not dirname.endswith('/'): dirname = dirname + '/'\n if checkpath.endswith('/'): checkpath = checkpath[:-1]\n if not checkpath.startswith(dirname):\n return False\n remainder = checkpath[len(dirname):]\n if '/' in remainder:\n return False\n return True\n\n\ndef path_iter(fullpath, omit_file=False):\n is_dir = fullpath.endswith('/')\n *parts, last = fullpath.strip('/').split('/')\n path = '/'\n for part in parts:\n path = path + part + '/'\n yield path\n\n if is_dir or not omit_file:\n yield path + last + ('/' if is_dir else '')\n\n\n@contextmanager\ndef lowlevel_open(*args, **kwargs):\n fd = os.open(*args, **kwargs)\n try:\n yield fd\n finally:\n os.close(fd)\n\n\nclass Filesystem:\n def __init__(self, *spec, padding=True, avoid=None):\n self.image_path = '_checker.img'\n self.mountpoint = '/tmp/vu-os-sfsmount'\n self.files = {}\n self.dirs = []\n self.fuse_proc = None\n\n self.populate(spec)\n\n if padding:\n avoid = avoid or []\n if isinstance(avoid, (list, tuple, dict)):\n avoid = list(avoid)\n else:\n avoid = [avoid]\n avoid += list(self.dirs) + list(self.files.keys())\n padding_conts = generate_random_contents(avoid=avoid)\n self.populate(padding_conts)\n\n\n def __enter__(self):\n self.mkfs()\n self.unmount()\n self.mount()\n return self\n\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # If there was already an exception, just clean up asap, otherwise, do\n # some more sanity-checking first\n if exc_type:\n with suppress(TestError):\n self.unmount()\n if not g_debug_testerror:\n self.remove_img()\n else:\n try:\n self.fsck()\n finally:\n unmount_ok = self.unmount()\n self.remove_img()\n\n if not unmount_ok:\n raise TestError('Unmounting the FUSE filesystem failed, '\n 'probably indicating it has crashed previously.')\n\n\n def add_intermediate_dirs(self, full_path):\n for path in path_iter(full_path, omit_file=True):\n if path not in self.dirs:\n self.dirs.append(path)\n\n\n def populate(self, spec):\n for entry in spec:\n if isinstance(entry, (tuple, list)):\n name, contents = entry\n if name.endswith('/'):\n raise Exception('Cannot specify file contents for a'\n 'directory: \"{name}\" got contents \"{contents}\"'\n .format(**locals()))\n else:\n name, contents = entry, ''\n\n if not name.startswith('/'):\n name = '/%s' % name\n\n if name == '/':\n raise Exception('Cannot create root directory')\n\n self.add_intermediate_dirs(name)\n if not name.endswith('/'):\n self.files[name] = contents\n\n\n def unmount(self):\n did_unmount = False\n\n rv, out, err = run_cmd(['fusermount', '-u', self.mountpoint],\n allow_err=True)\n if not rv:\n did_unmount = True\n\n if os.path.isdir(self.mountpoint):\n os.rmdir(self.mountpoint)\n\n return did_unmount\n\n\n def remove_img(self):\n os.remove(self.image_path)\n\n\n def dump(self):\n # Ideally this function would print files/dirs in order and, and add\n # other data similar to fsck.\n print('dirs', self.dirs)\n print('files', self.files)\n\n\n def mount(self):\n os.makedirs(self.mountpoint, exist_ok=True)\n\n # Runs fuse binary in background mode: voids stdout/stderr, exits when\n # unmounted (e.g., with fusermount)\n run_cmd([FUSE_BIN, '--background', '-i', self.image_path,\n self.mountpoint])\n\n\n def mkfs(self):\n mkfs_spec = []\n tmpfiles = []\n tmpfile_cnt = 0\n for d in self.dirs:\n mkfs_spec.append(d)\n for name, contents in self.files.items():\n if not contents:\n mkfs_spec.append(name)\n else:\n tmpfile = '_checker_tmpfile%d' % tmpfile_cnt\n with open(tmpfile, 'wb') as f:\n f.write(contents.encode('utf-8'))\n tmpfile_cnt += 1\n tmpfiles.append(tmpfile)\n mkfs_spec.append('%s:%s' % (name, tmpfile))\n\n try:\n run_cmd([MKFS, '--randomize', '--seed', str(g_rand_seed), '--quiet',\n self.image_path] + mkfs_spec)\n finally:\n for tmpfile in tmpfiles:\n os.remove(tmpfile)\n\n\n def fsck(self):\n out, _ = run_cmd([FSCK, '--list', '--md5', self.image_path])\n fsck_files, fsck_dirs = {}, []\n for line in out.splitlines():\n name = line.split()[-1]\n if name.endswith('/'):\n fsck_dirs.append(name)\n else:\n md5 = line.split()[0]\n size = int(line.split()[1], 16)\n fsck_files[name] = (md5, size)\n\n check_list_eq(self.dirs, fsck_dirs, 'directory')\n check_list_eq(self.files, fsck_files, 'file')\n\n for fname, fcontents in self.files.items():\n md5 = hashlib.md5(fcontents.encode('utf-8')).hexdigest()\n if md5 != fsck_files[fname][0]:\n fmtcontents = get_printable(fcontents, 100)\n expsize = len(fcontents)\n imgmd5 = fsck_files[fname][0]\n imgsize = fsck_files[fname][1]\n raise TestError(('Contents of {fname} do not match:\\n'\n 'expected hash: {md5} (filesize {expsize})\\n'\n 'hash in image: {imgmd5} (filesize {imgsize})\\n'\n '(start of) expected contents: {fmtcontents}')\n .format(**locals()))\n\n\n def get_host_path(self, path):\n if not path.startswith('/'):\n raise Exception('{path} is not image path: does not start with /'\n .format(**locals()))\n return os.path.join(self.mountpoint, path[1:])\n\n\n def get_image_path(self, path, is_dir=False, should_exist=False):\n if not path.startswith('/'):\n raise Exception('{path} is not image path: does not start with /'\n .format(**locals()))\n if path == '/':\n return path\n if is_dir and not path.endswith('/'):\n path = path + '/'\n if should_exist and path not in self.dirs and path not in self.files:\n raise TestError('get_image_path: {path} is not a known file or '\n 'directory'.format(**locals()))\n return path\n\n\n def checked(func):\n \"\"\"Decorator that wraps a function to do two things:\n 1) Call fsck before and after the function to ensure integrity of the\n filesystem.\n 2) Add an expect_error argument to the function, which ensures the\n function *will* raise that error. If an integer is provided, it is\n assumed to be the errno of an OSError.\n \"\"\"\n def wrapper(self, *args, **kwargs):\n expect_error = kwargs.pop('expect_error', None)\n path = kwargs.get('path', '')\n self.fsck()\n ret = None\n try:\n ret = func(self, *args, **kwargs)\n except OSError as e:\n if not isinstance(expect_error, int):\n raise TestError('{func.__name__} {path} returned error '\n '{e.__class__.__name__} {e}'.format(**locals())) \\\n from e\n if e.errno != expect_error:\n experr = errno.errorcode[expect_error]\n errname = errno.errorcode[e.errno]\n raise TestError('{func.__name__} {path} returned wrong '\n 'error. Expected {experr}, got {errname} '\n '({e.strerror})'.format(**locals())) from e\n else:\n if expect_error:\n errname = errno.errorcode[expect_error]\n raise TestError('{func.__name__} {path} did not return an '\n 'error, should return {errname}'.format(**locals()))\n self.fsck()\n return ret\n return wrapper\n\n\n @checked\n def check_readdir(self, path):\n path = self.get_image_path(path, is_dir=True)\n hostpath = self.get_host_path(path)\n fuse_dirs, fuse_files = [], {}\n for entry in os.listdir(hostpath):\n entrypath = os.path.join(path, entry)\n s = os.stat(os.path.join(hostpath, entry))\n if stat.S_ISDIR(s.st_mode):\n fuse_dirs.append(entrypath + '/')\n elif stat.S_ISREG(s.st_mode):\n fuse_files[entrypath] = s.st_size\n else:\n raise TestError('readdir: {entrypath} is not a directory or '\n 'regular file according to FUSE (st_mode = {s.st_mode})'\n .format(**locals()))\n\n ref_dirs = []\n for dirname in self.dirs:\n if is_in_dir(dirname, path):\n ref_dirs.append(dirname)\n\n ref_files = {}\n for fname, fcontents in self.files.items():\n if is_in_dir(fname, path):\n ref_files[fname] = len(fcontents)\n\n check_list_eq(ref_dirs, fuse_dirs, 'directory', 'fuse readdir')\n check_list_eq(ref_files, fuse_files, 'file', 'fuse readdir')\n\n for fname, fsize in ref_files.items():\n fuse_size = fuse_files[fname]\n if fsize != fuse_size:\n raise TestError('File size of {fname} reported via FUSE is '\n 'incorrect. Expected size: {fsize}, reported size: '\n '{fuse_size}'.format(**locals()))\n\n\n @checked\n def check_read(self, path):\n path = self.get_image_path(path)\n hostpath = self.get_host_path(path)\n\n with open(hostpath, 'rb') as f:\n fuse_contents = f.read()\n\n expected_contents = self.files[path].encode('utf-8')\n if expected_contents != fuse_contents:\n expfmt = get_printable(expected_contents)\n fusefmt = get_printable(fuse_contents)\n raise TestError('read: Data read from {path} through FUSE did not '\n 'match expected file contents.\\n'\n 'Expected: {expfmt}\\n'\n 'Data read: {fusefmt}\\n'.format(**locals()))\n\n\n @checked\n def check_pread(self, path, size, offset):\n path = self.get_image_path(path)\n hostpath = self.get_host_path(path)\n\n expected_contents = self.files[path].encode('utf-8')\n expected_contents = expected_contents[offset:offset + size]\n\n with lowlevel_open(hostpath, os.O_RDONLY) as fd:\n fuse_contents = os.pread(fd, size, offset)\n\n if expected_contents != fuse_contents:\n expfmt = get_printable(expected_contents)\n fusefmt = get_printable(fuse_contents)\n raise TestError('pread: Data read from {path} at offset {offset}, '\n 'size {size}, through FUSE did not match expected file '\n 'contents.\\n'\n 'Expected: {expfmt}\\n'\n 'Data read: {fusefmt}\\n'.format(**locals()))\n\n\n @checked\n def check_mkdir(self, path):\n path = self.get_image_path(path, is_dir=True)\n hostpath = self.get_host_path(path)\n\n os.makedirs(hostpath, exist_ok=True)\n self.add_intermediate_dirs(path)\n\n\n @checked\n def check_rmdir(self, path):\n path = self.get_image_path(path, is_dir=True)\n hostpath = self.get_host_path(path)\n\n os.rmdir(hostpath)\n self.dirs.remove(path)\n\n\n @checked\n def check_rm(self, path):\n path = self.get_image_path(path)\n hostpath = self.get_host_path(path)\n\n os.remove(hostpath)\n del self.files[path]\n\n\n @checked\n def check_create(self, path):\n path = self.get_image_path(path)\n hostpath = self.get_host_path(path)\n\n with lowlevel_open(hostpath, os.O_WRONLY | os.O_CREAT) as fd:\n pass\n\n self.files[path] = ''\n\n\n @checked\n def check_truncate(self, path, size):\n path = self.get_image_path(path)\n hostpath = self.get_host_path(path)\n\n os.truncate(hostpath, size)\n self.files[path] = self.files[path][:size] + \\\n '\\0' * (size - len(self.files[path]))\n\n\n @checked\n def check_write(self, path, data):\n path = self.get_image_path(path)\n hostpath = self.get_host_path(path)\n\n with lowlevel_open(hostpath, os.O_WRONLY) as fd:\n os.write(fd, data.encode('utf-8'))\n\n self.files[path] = data + self.files[path][len(data):]\n\n\n @checked\n def check_pwrite(self, path, data, offset):\n path = self.get_image_path(path)\n hostpath = self.get_host_path(path)\n\n with lowlevel_open(hostpath, os.O_WRONLY) as fd:\n os.pwrite(fd, data.encode('utf-8'), offset)\n\n if len(self.files[path]) < offset:\n self.files[path] = self.files[path] + '\\0' * (offset\n - len(self.files[path]))\n self.files[path] = self.files[path][:offset] + data + \\\n self.files[path][len(data) + offset:]\n\n\n @checked\n def check_exists(self, path):\n if path not in self.dirs and path not in self.files:\n raise Exception('{path} is not a known file or directory'\n .format(**locals()))\n\n hostpath = self.get_host_path(path)\n try:\n os.stat(hostpath)\n except FileNotFoundError:\n raise TestError('Existing file {path} not found on filesystem '\n 'through FUSE'.format(**locals()))\n\n\n @checked\n def check_not_exists(self, path):\n if path in self.dirs or path in self.files:\n raise Exception('{path} exists on filesystem'.format(**locals()))\n\n hostpath = self.get_host_path(path)\n try:\n os.stat(hostpath)\n except FileNotFoundError:\n pass\n else:\n raise TestError('Non-existing file {path} was found on filesystem '\n 'through FUSE'.format(**locals()))\n\n\ndef test_list_root_files():\n emptyfile = randpath()\n conts = generate_random_contents(num_dirs=0, max_depth=0, avoid=emptyfile)\n with Filesystem(emptyfile, *conts, padding=False) as fs:\n fs.check_readdir('/')\n\n\ndef test_list_root_dirs():\n conts = generate_random_contents(num_files=0)\n with Filesystem(\n '/averylongdirectorynamethatjustneverseemstoendandjustkeeps/',\n *conts,\n padding=False\n ) as fs:\n fs.check_readdir('/')\n\n\ndef test_list_root_full():\n conts = ('file' + str(i) + random.choice(('', '/')) for i in range(64))\n with Filesystem(*conts, padding=False) as fs:\n fs.check_readdir('/')\n\n\ndef test_list_root_random():\n with Filesystem() as fs:\n fs.check_readdir('/')\n\n\ndef test_read_root(what):\n def _inner():\n target_filename = randpath()\n with Filesystem((target_filename, what)) as fs:\n fs.check_read(target_filename)\n return _inner\n\n\ndef test_read_noexist_root():\n existing_file = randpath()\n nonexisting_file = randpath(avoid=existing_file)\n with Filesystem(existing_file, avoid=nonexisting_file) as fs:\n fs.check_exists(existing_file)\n fs.check_not_exists(nonexisting_file)\n\n\ndef test_read_offset_root():\n fname = randpath()\n fconts = randstr(6 * 4096, 8 * 4096)\n with Filesystem((fname, fconts)) as fs:\n size = random.randrange(512 + 1, 4096)\n off = random.randrange(3 * 4096, 5 * 4096)\n fs.check_pread(fname, size, off)\n\n\ndef test_list_subdir_1():\n subdir = randpath(is_dir=True)\n subdir_conts = generate_random_contents(prefix_dir=subdir)\n with Filesystem(*subdir_conts) as fs:\n fs.check_readdir(subdir)\n\n\ndef test_list_subdir_n():\n subdir = randpath(depth=random.randrange(5, 10), is_dir=True)\n subdir_conts = generate_random_contents(prefix_dir=subdir)\n with Filesystem(*subdir_conts) as fs:\n fs.check_readdir(subdir)\n\n\ndef test_list_subdir_noexist():\n subdir = randpath(is_dir=True)\n subdir_conts = generate_random_contents(prefix_dir=subdir)\n subdir_invalid = randpath(is_dir=True)\n with Filesystem(*subdir_conts, avoid=subdir_invalid) as fs:\n fs.check_readdir(subdir)\n fs.check_readdir(subdir_invalid, expect_error=errno.ENOENT)\n\n\ndef test_read_subdir_1():\n fname = randpath(depth=1)\n with Filesystem((fname, randstr(8, 12))) as fs:\n fs.check_read(fname)\n\n\ndef test_read_subdir_n():\n fname = randpath(depth=random.randrange(5, 10))\n with Filesystem((fname, randstr(8, 12))) as fs:\n fs.check_read(fname)\n\n\ndef test_read_subdir_noexist():\n validfile = randpath(depth=1)\n invalidfile = randpath(depth=1)\n with Filesystem((validfile, randstr(8, 12)), avoid=invalidfile) as fs:\n fs.check_read(validfile)\n fs.check_read(invalidfile, expect_error=errno.ENOENT)\n\n\ndef test_mkdir_1():\n dirname = randpath(is_dir=True)\n with Filesystem(avoid=dirname) as fs:\n fs.check_mkdir(dirname)\n\n\ndef test_mkdir_n():\n dirname = randpath(depth=random.randrange(5, 10), is_dir=True)\n with Filesystem(avoid=dirname) as fs:\n fs.check_mkdir(dirname)\n\n\ndef test_mkdir_toolong():\n validname = randpath(minpartlen=57, is_dir=True)\n validtree = randpath(depth=60, minpartlen=1, maxpartlen=1, is_dir=True)\n invalidname = randpath(minpartlen=58, is_dir=True)\n with Filesystem(avoid=(validname, validtree)) as fs:\n # First, check if mkdir works in the first place\n fs.check_mkdir(validname)\n\n # And whether the limit is imposed on each subdir, not the full path\n fs.check_mkdir(validtree)\n\n # Then, try to create the too long entry and see if we get error\n fs.check_mkdir(invalidname, expect_error=errno.ENAMETOOLONG)\n\n\ndef test_rmdir_root():\n target = randpath(is_dir=True)\n with Filesystem(target) as fs:\n fs.check_rmdir(target)\n\n\ndef test_rmdir_tree():\n target = randpath(depth=5, is_dir=True)\n with Filesystem(target) as fs:\n for path in list(path_iter(target))[::-1]:\n fs.check_rmdir(path)\n\n\ndef test_rmdir_nonempty():\n emptydir = randpath(is_dir=True)\n target = randpath(depth=1)\n with Filesystem(emptydir, target) as fs:\n # Test if rmdir works normally\n fs.check_rmdir(emptydir)\n\n # Try to remove the non-empty dir\n fs.check_rmdir(os.path.dirname(target), expect_error=errno.ENOTEMPTY)\n\n\ndef test_rm_empty():\n emptyfile = randpath()\n with Filesystem(emptyfile) as fs:\n fs.check_rm(emptyfile)\n\n\ndef test_rm_small():\n smallfile = randpath()\n fconts = randstr(32, 500)\n with Filesystem((smallfile, fconts)) as fs:\n fs.check_rm(smallfile)\n\n\ndef test_rm_large():\n largefile = randpath()\n fconts = randstr(4096, 8192)\n with Filesystem((largefile, fconts)) as fs:\n fs.check_rm(largefile)\n\n\ndef test_rm_subdir():\n subdir = randpath(is_dir=True)\n subdir_file = subdir + randpath()[1:]\n subdir_padding = generate_random_contents(prefix_dir=subdir,\n avoid=subdir_file)\n fconts = randstr(32, 500)\n with Filesystem((subdir_file, fconts), *subdir_padding) as fs:\n fs.check_rm(subdir_file)\n\n\ndef test_create_root():\n fname = randpath()\n with Filesystem(avoid=fname) as fs:\n fs.check_create(fname)\n\n\ndef test_create_subdir():\n subdir = randpath(depth=random.randrange(5, 10), is_dir=True)\n fname = subdir + randpath()[1:]\n with Filesystem(subdir) as fs:\n fs.check_create(fname)\n\n\ndef test_create_toolong():\n subdir = randpath(depth=60, minpartlen=1, maxpartlen=1, is_dir=True)\n validfile = subdir + randpath(minpartlen=57)[1:]\n invalidfile = subdir + randpath(minpartlen=58)[1:]\n with Filesystem(subdir, avoid=validfile) as fs:\n fs.check_create(validfile)\n fs.check_create(invalidfile, expect_error=errno.ENAMETOOLONG)\n\n\ndef test_truncate_grow():\n emptyfile = randpath()\n smallfile = randpath()\n mediumfile = randpath()\n alignedfile = randpath()\n\n with Filesystem((emptyfile, ''),\n (smallfile, randstr(400)),\n (mediumfile, randstr(512 * 3 + 300)),\n (alignedfile, randstr(512))) as fs:\n\n # Test truncating to same size\n fs.check_truncate(emptyfile, 0)\n fs.check_truncate(smallfile, 400)\n fs.check_truncate(mediumfile, 512 * 3 + 300)\n fs.check_truncate(alignedfile, 512)\n\n # Then grow each by at least 1 block\n fs.check_truncate(emptyfile, 800)\n fs.check_truncate(smallfile, 1100)\n fs.check_truncate(mediumfile, 512 * 5 + 200)\n fs.check_truncate(alignedfile, 512 * 3)\n\n\ndef test_truncate_shrink():\n emptyfile = randpath()\n smallfile = randpath()\n mediumfile = randpath()\n alignedfile = randpath()\n\n with Filesystem((emptyfile, ''),\n (smallfile, randstr(400)),\n (mediumfile, randstr(512 * 3 + 300)),\n (alignedfile, randstr(512 * 4))) as fs:\n\n # Test truncating to same size\n fs.check_truncate(emptyfile, 0)\n fs.check_truncate(smallfile, 400)\n fs.check_truncate(mediumfile, 512 * 3 + 300)\n fs.check_truncate(alignedfile, 512 * 4)\n\n # Shrink each file by a bit\n fs.check_truncate(smallfile, 222)\n fs.check_truncate(mediumfile, 512 * 1 + 100)\n fs.check_truncate(alignedfile, 512 * 3)\n\n # Shrink each file to 0\n fs.check_truncate(smallfile, 0)\n fs.check_truncate(mediumfile, 0)\n fs.check_truncate(alignedfile, 0)\n\n\ndef test_write_simple():\n emptyfile = randpath()\n with Filesystem(emptyfile) as fs:\n fs.check_write(emptyfile, randstr(4, 10))\n fs.check_write(emptyfile, randstr(100, 200))\n fs.check_write(emptyfile, randstr(512))\n\n\ndef test_write_multiblock():\n emptyfile = randpath()\n with Filesystem(emptyfile) as fs:\n fs.check_write(emptyfile, randstr(1100, 1400))\n fs.check_write(emptyfile, randstr(2700, 3000))\n\n\ndef test_write_subset():\n emptyfile = randpath()\n with Filesystem(emptyfile) as fs:\n fs.check_write(emptyfile, randstr(1600, 2000))\n fs.check_write(emptyfile, randstr(600, 900))\n\n\ndef test_write_offset():\n emptyfile = randpath()\n somefile = randpath()\n alignedfile = randpath()\n with Filesystem(emptyfile,\n (somefile, randstr(1600, 2000)),\n (alignedfile, randstr(512 * 3))) as fs:\n fs.check_pwrite(emptyfile, randstr(600, 800),\n random.randrange(600, 800))\n fs.check_pwrite(emptyfile, randstr(1600, 2000),\n random.randrange(1100, 1500))\n fs.check_pwrite(somefile, randstr(600, 700), random.randrange(600, 700))\n fs.check_pwrite(alignedfile, randstr(512), 512)\n fs.check_pwrite(alignedfile, randstr(512), 512 * 5)\n\n\ndef check_warnings():\n if g_compiler_warnings is not None:\n raise TestError('Got compiler warnings:\\n%s' % g_compiler_warnings)\n\n\ndef check_compile():\n run_cmd(['make', 'clean'])\n\n out, err = run_cmd(['make'])\n err = '\\n'.join([l for l in err.split('\\n') if not l.startswith('make:')])\n if 'warning' in err:\n global g_compiler_warnings\n g_compiler_warnings = err\n\n run_cmd([FUSE_BIN, '-h'])\n\n\ndef main():\n os.chdir(os.path.dirname(sys.argv[0]) or '.')\n\n parser = argparse.ArgumentParser(\n description='Run automated tests for the FS assignment, and output '\n 'a (tentative) grade.'\n )\n parser.add_argument(\n '--no-color',\n dest='color',\n action='store_const',\n const=False,\n help='disable colorized output',\n )\n parser.add_argument(\n '--force-color',\n dest='color',\n action='store_const',\n const=True,\n help='force colorized output when directing to file',\n )\n parser.add_argument(\n '--codegrade-out',\n action='store_true',\n help='output final result for codegrade',\n )\n parser.add_argument(\n '-o',\n '--out-file',\n type=argparse.FileType('w'),\n help='redirect output to this file (default: stdout)',\n )\n parser.add_argument(\n '-s',\n '--seed',\n type=int,\n help='seed to use for random values (default: random)',\n )\n parser.add_argument(\n '-d',\n '--debug-testerror',\n action='store_true',\n help='halt on the first test error, and preserve the test image',\n )\n parser.add_argument(\n nargs='*',\n dest='tests',\n help='which tests to run (default: run all tests). Test names are '\n 'displayed in parenthesis with each category.',\n )\n args = parser.parse_args()\n\n color = args.color if args.color is not None else args.out_file is None\n output = Output(enable_color=color, outfile=args.out_file)\n\n seed = args.seed if args.seed is not None else random.getrandbits(32)\n random.seed(seed)\n global g_rand_seed\n g_rand_seed = seed\n output.write('Using random seed %d (use --seed %d to repeat this run)'\n % (seed, seed))\n\n global g_debug_testerror\n g_debug_testerror = args.debug_testerror\n\n try:\n if args.tests:\n grade = partial_run(args.tests, output)\n else:\n grade = full_run(output)\n except Exception:\n output.write_traceback()\n\n\n if args.codegrade_out:\n fraction = min(max(1.0, grade), 10.0) / 10.\n print(fraction)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":38099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"609776780","text":"import math\nimport random\nimport sys\nimport time\nfrom collections import deque\nfrom copy import deepcopy\nfrom typing import Any, Callable, Deque, List, Tuple\n\n\ndef now():\n return time.time()\n\n\nclass TabuList:\n def __init__(self, max_size: int):\n self._list: Deque[Any] = deque([], max_size)\n\n def push(self, element: Any):\n self._list.append(element)\n\n def __str__(self):\n return str(list(self._list))\n\n def contains(self, element: Any):\n return element in self._list\n\n\ndef random_path(n: int) -> List[int]:\n result: List[int] = []\n for _ in range(n - 2):\n result.append(random.choice(list(set(range(1, n - 1)) - set(result))))\n return [0] + result + [0]\n\n\ndef greedy_path(costs: List[List[int]]) -> List[int]:\n path = [0]\n curr_city = 0\n while len(path) < len(costs):\n min_cost = math.inf\n min_index = 0\n\n for i, _ in enumerate(costs[curr_city]):\n if costs[curr_city][i] < min_cost and i not in path:\n min_cost = costs[curr_city][i]\n min_index = i\n\n path.append(min_index)\n curr_city = min_index\n return path + [0]\n\n\ndef tweak_path(path: List[int]) -> List[int]:\n if random.random() < 0.7:\n city1 = random.randrange(1, len(path) - 1)\n city2 = random.choice([c for c in range(1, len(path) - 1) if c != city1])\n path[city1], path[city2] = path[city2], path[city1]\n return path\n city1 = random.randrange(1, len(path) - 2)\n city2 = random.choice([c for c in range(1, len(path) - 1) if c > city1])\n return path[:city1] + list(reversed(path[city1:city2])) + path[city2:]\n\n\ndef path_cost(costs: List[List[int]]) -> Callable[[List[int]], int]:\n return lambda path: sum(costs[path[i]][path[i + 1]] for i in range(len(path) - 1))\n\n\ndef tabu_search(\n initial: List[int],\n tweak: Callable[[List[int]], List[int]],\n quality: Callable[[List[int]], int],\n tabu_size: int,\n num_of_tweaks: int,\n timeout: float,\n) -> Tuple[List[int], int]:\n s = initial\n best = s\n tabu = TabuList(tabu_size)\n tabu.push(s)\n\n start = now()\n while now() - start <= timeout:\n r = tweak(deepcopy(s))\n for _ in range(num_of_tweaks):\n if now() - start > timeout:\n break\n w = tweak(deepcopy(s))\n if not tabu.contains(w) and (tabu.contains(r) or quality(w) < quality(r)):\n r = w\n if not tabu.contains(r):\n s = r\n tabu.push(r)\n if quality(s) < quality(best):\n print(f\"new best! {quality(s)} - after {now() - start} s.\", file=sys.stderr)\n best = s\n return best, quality(best)\n\n\ndef main():\n t, n = map(int, input().split())\n costs = [[*map(int, input().split())] for i in range(n)]\n\n path, cost = tabu_search(\n initial=greedy_path(costs),\n tweak=tweak_path,\n quality=path_cost(costs),\n tabu_size=n * 10,\n num_of_tweaks=int(n ** 2 / 3),\n timeout=float(t),\n )\n\n print(*list(map(lambda city: city + 1, path)), file=sys.stderr)\n print(cost)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"l1/z2/tabu.py","file_name":"tabu.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"218808477","text":"import pandas as pd\r\n\r\n\r\ndef pct_to_first(yds_to_go, offense_yards):\r\n \"\"\"\r\n Percentage of progress towards first down on a play\r\n \"\"\"\r\n pct = float(offense_yards) / float(yds_to_go)\r\n return pct\r\n\r\n\r\ndef pct_of_avg_yds_remaining(avg_yds_remain, offense_yds):\r\n \"\"\"\r\n Percentage of avg yards per down needed for a first down that was allowed by the defense on a play\r\n Ex: 2nd & 6 = 2 yds needed per down (2nd, 3rd, and 4th down). A run for 1 yd would be 50% of the yards needed\r\n on that down.\r\n \"\"\"\r\n pct_of_yds_remaining = float(offense_yds) / float(avg_yds_remain)\r\n return pct_of_yds_remaining\r\n\r\n\r\ndef avg_yds_per_down_to_first(down, yds_to_go):\r\n \"\"\"\r\n AVG number of yards needed per down by the offense to get a first down\r\n Ex: 1st & 10 = 2.5yds per down\r\n \"\"\"\r\n downs_remaining = 5 - down\r\n avg_yds_remaining_per_down = float(yds_to_go) / float(downs_remaining)\r\n return avg_yds_remaining_per_down\r\n\r\n\r\ndef tfl_pct_chage(tfl, yds_to_go, tfl_yds):\r\n tfl_yds = tfl_yds * tfl # tfl = 1 if solo, 0.5 if assist\r\n new_yds_to_go = yds_to_go + tfl_yds\r\n\r\n pct_change = (new_yds_to_go - yds_to_go) / yds_to_go\r\n return pct_change\r\n\r\n\r\ndef contribute_in_redzone(player_contribution, start_yd):\r\n contribution_pt = 0\r\n if player_contribution > 0:\r\n if start_yd < 10:\r\n contribution_pt = 1\r\n elif start_yd < 20:\r\n contribution_pt = 0.5\r\n return contribution_pt\r\n\r\n\r\ndef pct_away_from_endzone(start_yd, tfl_yds, def_sideoffield):\r\n # Calculate distance from offense endzone\r\n if def_sideoffield == 'Oppo':\r\n start = start_yd\r\n if def_sideoffield == 'Own':\r\n start = 100 - start_yd\r\n pct_to_offense_endzone = tfl_yds / start\r\n return pct_to_offense_endzone\r\n\r\n\r\ndef pct_away_from_def_endzone(start_yd, offense_yds, def_sideoffield):\r\n # Calculate distance from offense endzone\r\n if def_sideoffield == 'Oppo':\r\n start = 100 - start_yd\r\n if def_sideoffield == 'Own':\r\n start = start_yd\r\n pct_to_offense_endzone = offense_yds / start\r\n return pct_to_offense_endzone\r\n\r\n\r\ndef yds_allow(start_yd, down, yds_to_go, touchdown, safety, player_safety, def_sideoffield, offense_yards,\r\n proximity_to_play, tfl, tfl_yds, contributions, f_fumble, rec_fumble, turnover, avgs):\r\n\r\n # AVG yards from ADI (Avg defensive impact)\r\n avg_yds = avgs['counted_yards'].iloc[0]\r\n\r\n # Can be positive or negative\r\n yds_allow_over_avg = offense_yards - avg_yds\r\n\r\n # AVG yards to go per down to get first\r\n avg_yds_per_down = avg_yds_per_down_to_first(down=down, yds_to_go=yds_to_go)\r\n\r\n # pct of ACG yards to go per down allowed by defense\r\n pct_of_avg_yds_per_down = pct_of_avg_yds_remaining(avg_yds_remain=avg_yds_per_down, offense_yds=offense_yards)\r\n\r\n # pct to first down\r\n pct_to_first_down = pct_to_first(yds_to_go=yds_to_go, offense_yards=offense_yards)\r\n\r\n redzone_contribution = contribute_in_redzone(player_contribution=contributions, start_yd=start_yd)\r\n\r\n # Add up points\r\n total_points = 0\r\n\r\n total_points += redzone_contribution\r\n\r\n if proximity_to_play is True:\r\n if touchdown == 1:\r\n total_points -= 6\r\n else:\r\n pass\r\n if safety == 1:\r\n total_points += 2 * player_safety # solo = 1, assist = 0.5\r\n else:\r\n pass\r\n\r\n if f_fumble == 1:\r\n if turnover == 1:\r\n total_points += 2\r\n if rec_fumble == 1:\r\n total_points += 1\r\n elif f_fumble == 0:\r\n if rec_fumble == 1:\r\n total_points += 1\r\n else:\r\n pass\r\n\r\n if tfl > 0:\r\n tmp_points = tfl_pct_chage(tfl=tfl,\r\n yds_to_go=yds_to_go,\r\n tfl_yds=tfl_yds)\r\n total_points += tmp_points\r\n\r\n # pct pushed away from defense endzone\r\n pct_away_endzone = pct_away_from_endzone(start_yd=start_yd, tfl_yds=tfl_yds, def_sideoffield=def_sideoffield)\r\n total_points += pct_away_endzone\r\n else:\r\n # If more than 100%, add all yds\r\n # Calc pct to first down\r\n tmp_yds_to_first = 0\r\n if pct_to_first_down > 1:\r\n tmp_yds_to_first -= 1\r\n elif 0 < pct_to_first_down < 1:\r\n tmp_yds_to_first -= pct_to_first_down\r\n\r\n # Calc pct to TD\r\n pct_towards_endzone = pct_away_from_def_endzone(start_yd=start_yd, offense_yds=offense_yards,\r\n def_sideoffield=def_sideoffield)\r\n\r\n subtotal = tmp_yds_to_first + pct_towards_endzone\r\n total_points -= subtotal\r\n else:\r\n # Potentially add some other metrics here to determine how far away from potentially being involved in the play\r\n total_points -= 0\r\n\r\n return total_points\r\n\r\n\r\ndef score_plays(df, avgs):\r\n df = df.copy()\r\n df['yds_allow_points'] = df[\r\n ['StartYard', 'Down', 'ToGo', 'FirstDown', 'Touchdown', 'Safety', 'PlayerSafety', 'DefSideOfField', 'OffensiveYardage',\r\n 'proximity_to_play', 'TackleForLoss', 'tfl_yards', 'offense_neg_yds', 'ForcedFumble', 'RecoveredFumble', 'Turnover']\r\n ].apply(\r\n lambda x: yds_allow(\r\n start_yd=x['StartYard'],\r\n down=x['Down'],\r\n yds_to_go=x['ToGo'],\r\n touchdown=x['Touchdown'],\r\n safety=x['Safety'],\r\n player_safety=x['PlayerSafety'],\r\n def_sideoffield=x['DefSideOfField'],\r\n offense_yards=x['OffensiveYardage'],\r\n proximity_to_play=x['proximity_to_play'],\r\n tfl=x['TackleForLoss'],\r\n tfl_yds=x['tfl_yards'],\r\n contributions=x['offense_neg_yds'],\r\n f_fumble=x['ForcedFumble'],\r\n rec_fumble=x['RecoveredFumble'],\r\n turnover=x['Turnover'],\r\n avgs=avgs),\r\n axis=1)\r\n\r\n return df\r\n\r\n\r\ndef main():\r\n df_avg = pd.read_csv('./data/03_primary/rush_avg_player_contribution.csv')\r\n df = pd.read_csv('./data/03_primary/rush_processed.csv')\r\n\r\n unique_positions = df['TechniqueNameLR'].unique()\r\n print(unique_positions)\r\n\r\n master_position_df = score_plays(df=df, avgs=df_avg)\r\n # print(master_position_df['yds_allow_points'].mean())\r\n # exit()\r\n pdiaa_dict = {}\r\n for position in unique_positions:\r\n df_pos = df.loc[df['TechniqueNameLR'] == position]\r\n df_pos = df_pos.copy()\r\n score_df = score_plays(df=df_pos, avgs=df_avg)\r\n # Positional Defensive Rush Impact Above Average\r\n pdiaa_avg = score_df['yds_allow_points'].mean()\r\n pdiaa_sum = score_df['yds_allow_points'].sum()\r\n pdiaa_count = score_df['yds_allow_points'].count()\r\n pdiaa_dict[position] = [pdiaa_avg, pdiaa_sum, pdiaa_count]\r\n\r\n # Convert dictionary to DataFrame\r\n pdiaa_df = pd.DataFrame.from_dict(pdiaa_dict, orient='index')\r\n pdiaa_df = pdiaa_df.reset_index()\r\n pdiaa_df.columns = ['position', 'pdira_avg', 'pdira_sum', 'pdira_count']\r\n pdiaa_df = pdiaa_df.sort_values(by=['position'], ascending=True)\r\n pdiaa_df.to_csv('./data/04_graph_inputs/pdira.csv', index=False)\r\n print(pdiaa_df)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n# POSITIONAL\r\n# PDIRA -- positional defensive impact rush avg\r\n# PDIPA\r\n# PDIA\r\n\r\n# CUMULATIVE DEFENSE\r\n# DRIA\r\n# DPIA\r\n# DIA\r\n\r\n","sub_path":"Submissions/matt.karan93@gmail.com/code/score_rush_position.py","file_name":"score_rush_position.py","file_ext":"py","file_size_in_byte":7505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"306482547","text":"#!/usr/bin/python3\n'''\n * This file is part of the EK9000 device support module. It is subject to \n * the license terms in the LICENSE.txt file found in the top-level directory \n * of this distribution and at: \n * https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. \n * No part of the EK9000 device support module, including this file, may be \n * copied, modified, propagated, or distributed except according to the terms \n * contained in the LICENSE.txt file.\n'''\n\nimport os, sys, json\nimport CppTk\n\nout = \"\"\ninp = \"\"\nout_file = False\nin_file = False\nfor arg in sys.argv:\n if arg == '-o' and not (out_file or in_file):\n out_file = True\n elif arg == '-h' or arg == '--help':\n print(\"USAGE: tgen.py -o outputfile.h -i terminals.json\")\n sys.exit(0)\n elif arg == '-i':\n in_file = True\n elif out_file:\n out = arg\n out_file = False\n elif in_file:\n inp = arg\n in_file = False\n elif arg is not sys.argv[0]:\n print(\"Unknown option \\\"\" + arg + \"\\\"\")\n\nif not os.path.isfile(inp):\n print(\"Could not find input file, make sure it's a file and that it exists\")\n exit(1)\n\nheader = CppTk.Header(out)\n\nheader.add_block_comment(\"AUTOMATICALLY GENERATED FILE. DO NOT EDIT.\")\nheader.include_std(\"stdlib.h\")\nheader.include_std(\"stdint.h\")\n\nheader.begin_struct(\"STerminalInfoConst_t\")\nheader.add_variable(\"m_pString\", \"const char*\")\nheader.add_variable(\"m_nID\", \"uint32_t\")\nheader.add_variable(\"m_nOutputSize\", \"uint16_t\")\nheader.add_variable(\"m_nInputSize\", \"uint16_t\")\nheader.end_struct()\n\n_json = json.load(open(inp))\n\nterms = list()\ni = 0\nfor t in _json[\"terminals\"]:\n name = t[\"name\"]\n terms.append(\"&\" + name + \"_Info\")\n header.add_block_comment(name)\n header.add_define(name + \"_STRING\", \"\\\"\" + name + \"\\\"\")\n header.add_define(name + \"_ID\", name.replace(\"EL\", \"\"))\n header.add_define(name + \"_OUTPUT_SIZE\", str(t[\"pdo_out_size\"]))\n header.add_define(name + \"_INPUT_SIZE\", str(t[\"pdo_in_size\"]))\n header.add_variable(name + \"_Info\", \"static const STerminalInfoConst_t\", \"{\" +\n name + \"_STRING, \" + name + \"_ID, \" + name + \"_OUTPUT_SIZE, \" + name + \"_INPUT_SIZE}\")\n i += 1\n\nheader.add_block_comment(\"Array of terminal info structures\")\n\nheader.add_define(\"NUM_TERMINALS\", str(i))\n\nheader.add_array_variable(\"g_pTerminalInfos\", \"STerminalInfoConst_t*\", terms, const=True, static=True)\n","sub_path":"tgen.py","file_name":"tgen.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"60806079","text":"##############################################################################################\n## Description: displays error values and dcl\n## Values displayed: errors and dcl\n## Units: amps\n## Written for: BOLT Senior Design Team\n## Author: Henry Trease\n## Written: Spring 2018\n## Modified: Spring 2018\n##############################################################################################\n\nimport sys\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QLCDNumber, QLabel, QAction, QFrame\nfrom PyQt5.QtGui import QIcon, QPainter, QColor, QPen\nfrom PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal, Qt\n\nfrom args import Arg_Class\n\nclass Error(QWidget):\n def __init__(self, parent):\n\n super(Error, self).__init__(parent)\n\n self.arguments = Arg_Class()\n\n self.rpmCutValue = 0\n self.rpmCutValuePrev = 0\n self.cutFlag = 0\n \n self.DCLValue = 0\n self.errorCodePL = 0 # post low\n self.errorCodePH = 0 # post high\n self.errorCodeRL = 0 # run low\n self.errorCodeRH = 0 # run high\n \n self.DCLGauge = QLCDNumber(self)\n self.DCLGauge.display(str(self.DCLValue).zfill(1))\n self.DCLGauge.move(200,0)\n self.DCLGauge.resize(80,80)\n self.DCLGauge.setFrameShape(QFrame.NoFrame)\n self.DCLGauge.setSegmentStyle(QLCDNumber.Flat)\n \n self.DCLlabel = QLabel(self)\n self.DCLlabel.setText(\"DCL: \")\n self.DCLlabel.move(200,0)\n\n self.PLErrorGauge = QLCDNumber(self)\n self.PLErrorGauge.display(str(self.errorCodePL).zfill(1))\n self.PLErrorGauge.move(0,0)\n self.PLErrorGauge.resize(80,80)\n self.PLErrorGauge.setFrameShape(QFrame.NoFrame)\n self.PLErrorGauge.setSegmentStyle(QLCDNumber.Flat)\n\n self.PHErrorGauge = QLCDNumber(self)\n self.PHErrorGauge.display(str(self.errorCodePH).zfill(1))\n self.PHErrorGauge.move(20,0)\n self.PHErrorGauge.resize(80,80)\n self.PHErrorGauge.setFrameShape(QFrame.NoFrame)\n self.PHErrorGauge.setSegmentStyle(QLCDNumber.Flat)\n\n self.RLErrorGauge = QLCDNumber(self)\n self.RLErrorGauge.display(str(self.errorCodeRL).zfill(1))\n self.RLErrorGauge.move(40,0)\n self.RLErrorGauge.resize(80,80)\n self.RLErrorGauge.setFrameShape(QFrame.NoFrame)\n self.RLErrorGauge.setSegmentStyle(QLCDNumber.Flat)\n\n self.RHErrorGauge = QLCDNumber(self)\n self.RHErrorGauge.display(str(self.errorCodeRH).zfill(1))\n self.RHErrorGauge.move(60,0)\n self.RHErrorGauge.resize(80,80)\n self.RHErrorGauge.setFrameShape(QFrame.NoFrame)\n self.RHErrorGauge.setSegmentStyle(QLCDNumber.Flat)\n \n self.errorlabel = QLabel(self)\n self.errorlabel.setText(\"Error code: \")\n self.errorlabel.move(0,0)\n\n self.rpmCutGauge = QLCDNumber(self)\n self.rpmCutGauge.display(str(self.DCLValue).zfill(1))\n self.rpmCutGauge.move(300,0)\n self.rpmCutGauge.resize(100,100)\n self.rpmCutGauge.setFrameShape(QFrame.NoFrame)\n self.rpmCutGauge.setSegmentStyle(QLCDNumber.Flat)\n self.rpmCutGauge.hide()\n \n self.rpmCutLabel = QLabel(self)\n self.rpmCutLabel.setText(\"RPM Before Cut: \")\n self.rpmCutLabel.move(300,0)\n self.rpmCutLabel.hide()\n\n\n @pyqtSlot(float)\n def DCL_update(self, value):\n self.DCLGauge.display(value)\n\n @pyqtSlot(float)\n def RPMCut_update(self, value):\n rpmCutValue = value\n if value > 10 and self.cutFlag == 0:\n self.rpmCutGauge.hide()\n #self.rpmCutGauge.display(value)\n self.rpmCutValuePrev = value\n else:\n self.rpmCutGauge.display(self.rpmCutValuePrev)\n self.rpmCutGauge.show()\n self.rpmCutLabel.show()\n self.cutFlag = 1\n\n @pyqtSlot(int, int, int, int)\n def error_update(self, value1, value2, value3, value4):\n self.PLErrorGauge.display(value1)\n self.PHErrorGauge.display(value2)\n self.RLErrorGauge.display(value3)\n self.RHErrorGauge.display(value4)\n \n","sub_path":"errorGauge.py","file_name":"errorGauge.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"584590171","text":"import sys, os\nfrom AWS import AWS\n\nconn = AWS.get_ec2_connection()\nc = 'f'\ninstance = sys.argv[1]\n\nfor i in ['vol-67e00719','vol-49e00737','vol-3ee00740','vol-0ee00770','vol-f0e0078e']:\n conn.attach_volume(i, instance, '/dev/sd{0}'.format(c))\n c = chr(ord(c) + 1)\n","sub_path":"bin/add_volumes.py","file_name":"add_volumes.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"631880340","text":"import os\n\n\ndef downloadServer(conn):\n print(\"DOWNLOADING: \")\n fileNameEncode = conn.recv(4096)\n fileName = str(fileNameEncode.decode())\n print(fileName)\n if os.path.isfile(fileName):\n with open(fileName, \"r+b\") as file:\n try:\n data = file.read(4096)\n conn.send(data)\n print(\"FILE SENT SUCCESSFULLY\")\n except:\n print(\"COULD NOT SEND FILE\")\n else:\n print(\"FILE DOES NOT EXIST\")\n errorNoFile = \"FILE DOES NOT EXIST #OmIGPbRHuY3tWx7Df7UJA0bFILEDOESNOTEXISTXp9In5pRNxZ9gpt91\"\n conn.send(errorNoFile.encode())\n\n\ndef downloadClient(conn):\n FileNameDecode = input(\"FILENAME: \")\n conn.send(FileNameDecode.encode())\n with open(FileNameDecode, \"w+b\") as file:\n try:\n dataEncode = conn.recv(4096)\n data = dataEncode.decode()\n if data != \"FILE DOES NOT EXIST #OmIGPbRHuY3tWx7Df7UJA0bFILEDOESNOTEXISTXp9In5pRNxZ9gpt91\":\n file.write(data)\n print(\"FILE DOWNLOADED SUCCESSFULLY\")\n else:\n print(\"FILE DOES NOT EXIST\")\n except:\n print(\"FILE COULD NOT DOWNLOAD\")\n","sub_path":"dlul.py","file_name":"dlul.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"214856284","text":"#! /usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport logging.config\nimport os\nimport sys\nimport time\nimport re\nfrom concurrent import futures\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nfrom fbprophet import Prophet\nimport json\nimport math\n\n# Add Generated folder to module path.\nPARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(os.path.join(PARENT_DIR, 'generated'))\n\nimport ServerSideExtension_pb2 as SSE\nimport grpc\nfrom ssedata import FunctionType\nfrom scripteval import ScriptEval\n\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n\nclass ExtensionService(SSE.ConnectorServicer):\n \"\"\"\n A simple SSE-plugin created for the HelloWorld example.\n \"\"\"\n\n def __init__(self, funcdef_file):\n \"\"\"\n Class initializer.\n :param funcdef_file: a function definition JSON file\n \"\"\"\n self._function_definitions = funcdef_file\n self.ScriptEval = ScriptEval()\n os.makedirs('logs', exist_ok=True)\n log_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../logger.config')\n logging.config.fileConfig(log_file)\n logging.info('Logging enabled')\n\n @property\n def function_definitions(self):\n \"\"\"\n :return: json file with function definitions\n \"\"\"\n return self._function_definitions\n\n @property\n def functions(self):\n \"\"\"\n :return: Mapping of function id and implementation\n \"\"\"\n return {\n 0: '_prophet',\n 1: '_prophetScript'\n }\n\n @staticmethod\n def _get_function_id(context):\n \"\"\"\n Retrieve function id from header.\n :param context: context\n :return: function id\n \"\"\"\n metadata = dict(context.invocation_metadata())\n header = SSE.FunctionRequestHeader()\n header.ParseFromString(metadata['qlik-functionrequestheader-bin'])\n\n return header.functionId\n\n \"\"\"\n Implementation of added functions.\n \"\"\"\n\n @staticmethod\n def _prophetScript(request, context):\n \"\"\"\n Mirrors the input and sends back the same data.\n :param request: iterable sequence of bundled rows\n :return: the same iterable sequence as received\n \"\"\"\n\n # instantiate a list for measure data\n dateStampList = []\n figuresList = []\n forecastPeriods = None\n forecastType = None\n m = None\n yhat = None\n changePoint = None\n minFloor = None\n maxCap = None\n \n for request_rows in request:\n \n # iterate over each request row (contains rows, duals, numData)\n\n # pull duals from each row, and the numData from duals\n for row in request_rows.rows:\n # the first numData contains the date stamps\n dateStamps = [d.numData for d in row.duals][0]\n pythonDate = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + int(dateStamps) - 2)\n dateStampList.append(pythonDate)\n\n # the second numData contains the figures\n figures = int([d.numData for d in row.duals][1])\n figuresList.append(figures)\n \n # this is redundant and is the same in every row\n if not forecastPeriods:\n forecastPeriods = int([d.numData for d in row.duals][2])\n if not forecastType:\n forecastType = [d.strData for d in row.duals][3]\n if not yhat:\n yhat = [d.strData for d in row.duals][6]\n if not changePoint:\n changePoint = int([d.numData for d in row.duals][7])\n if not minFloor:\n minFloor = int([d.numData for d in row.duals][8]) \n if not maxCap:\n maxCap = int([d.numData for d in row.duals][9]) \n\n # create data frame\n dataFrame = pd.DataFrame({'ds': dateStampList,'y': figuresList})\n print(dataFrame)\n if forecastType == 'hourly':\n # fit data to prophet\n m = Prophet(changepoint_prior_scale=changePoint)\n m.fit(dataFrame)\n \n #create future dataframe\n future = m.make_future_dataframe(periods=forecastPeriods, freq='H')\n \n if forecastType == 'daily':\n # fit data to prophet\n m = Prophet(changepoint_prior_scale=changePoint)\n m.fit(dataFrame)\n\n #create future dataframe\n future = m.make_future_dataframe(periods=forecastPeriods)\n \n if forecastType == 'monthly':\n # fit data to prophet\n \n m = Prophet(weekly_seasonality=False, changepoint_prior_scale=changePoint)\n m.add_seasonality(name='monthly', period=30.5, fourier_order=5)\n m.fit(dataFrame)\n \n #create future dataframe\n future = m.make_future_dataframe(periods=forecastPeriods, freq='MS')\n\n #create forecast and create a list\n if not m:\n # fit data to prophet\n \n m = Prophet(weekly_seasonality=False, changepoint_prior_scale=changePoint)\n m.add_seasonality(name='monthly', period=30.5, fourier_order=5)\n m.fit(dataFrame)\n\n forecast = m.predict(future) \n forecastList = forecast[yhat].values.tolist()\n dateList = pd.to_datetime(forecast['ds'].values.tolist())\n\n #convert forecast results to ints\n resultsList = []\n for val in forecastList:\n try:\n resultsList.append(int(val))\n except:\n resultsList.append(0) \n \n finalDateList = []\n for ds in dateList:\n try:\n finalDateList.append(str(ds))\n except:\n finalDateList.append(0) \n \n # Create an iterable of dual with the result\n dualsList = []\n dualsList.append([SSE.Dual(numData=d) for d in resultsList])\n dualsList.append([SSE.Dual(strData=d) for d in finalDateList])\n \n #create response rows\n response_rows = []\n for i in range(len(resultsList)):\n duals = [dualsList[z][i] for z in range(len(dualsList))]\n response_rows.append(SSE.Row(duals=iter(duals)))\n \n #set and send table header\n table = SSE.TableDescription(name='ProphetForecast')\n table.fields.add(dataType=SSE.NUMERIC)\n table.fields.add(dataType=SSE.STRING)\n md = (('qlik-tabledescription-bin', table.SerializeToString()),)\n context.send_initial_metadata(md)\n\n yield SSE.BundledRows(rows=response_rows)\n\n @staticmethod\n def _prophet(request, context):\n \"\"\"\n Mirrors the input and sends back the same data.\n :param request: iterable sequence of bundled rows\n :return: the same iterable sequence as received\n \"\"\"\n\n # instantiate a list for measure data\n dateStampList = []\n figuresList = []\n forecastPeriods = None\n outliers = None\n forecastType = None\n adjustments = None\n forecastReturnType = None\n changePoint = None\n fourierOrder = None\n m = None\n holidays = None\n \n for request_rows in request:\n # iterate over each request row (contains rows, duals, numData)\n # pull duals from each row, and the numData from duals\n for row in request_rows.rows:\n # this is redundant and is the same in every row\n if not adjustments:\n adjustments = [d.strData for d in row.duals][0]\n \n if not changePoint:\n tmpChangePoint = [d.numData for d in row.duals][1]\n if math.isnan(tmpChangePoint):\n changePoint = 0.05\n else:\n changePoint = tmpChangePoint \n \n # the first numData contains the date stamps\n dateStamp = [d.numData for d in row.duals][2]\n try: \n pythonDate = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + int(dateStamp) - 2)\n dateStampList.append(pythonDate)\n except ValueError:\n dateStampList.append(dateStamp)\n\n # the second numData contains the figures\n figures = int([d.numData for d in row.duals][3])\n figuresList.append(figures)\n\n if not forecastType:\n forecastType = [d.strData for d in row.duals][4] \n\n if not forecastPeriods:\n forecastPeriods = int([d.numData for d in row.duals][5])\n\n if not forecastReturnType:\n forecastReturnType = [d.strData for d in row.duals][6]\n\n if not fourierOrder:\n tmpFourierOrder = [d.numData for d in row.duals][7]\n if math.isnan(tmpFourierOrder):\n fourierOrder = 5\n else:\n fourierOrder = int(tmpFourierOrder) \n if not holidays:\n holidays = [d.strData for d in row.duals][8] \n\n if not outliers:\n outliers = [d.strData for d in row.duals][9]\n\n\n\n \n # create data frame\n dataFrame = pd.DataFrame({'ds': dateStampList,'y': figuresList})\n print(dataFrame)\n\n # Store the original indexes for re-ordering output later\n index = dataFrame.copy()\n\n # remove null values from df\n dataFrame = dataFrame.dropna()\n \n # Sort the Request Data Frame based on dates, as Qlik may send unordered data\n dataFrame = dataFrame.sort_values('ds')\n \n # drop extra periods from data frame\n dataFrame = dataFrame[:-forecastPeriods]\n maxDate = max(dataFrame['ds'])\n \n # remove outliers\n if len(outliers) > 2:\n outliersList = outliers.split(\",\")\n for outlier in outliersList:\n dataFrame.loc[dataFrame['ds'] == outlier,'y'] = None\n\n # create holidays\n if len(holidays) > 2:\n holidays_list = holidays.split(',')\n holidays_df = pd.DataFrame({\n 'holiday': 'holiday',\n 'ds': pd.to_datetime(holidays_list)\n }) \n \n if forecastType == 'hourly':\n # fit data to prophet\n if len(holidays) > 2:\n m = Prophet(changepoint_prior_scale=changePoint, holidays=holidays_df)\n else:\n m = Prophet(changepoint_prior_scale=changePoint) \n m.fit(dataFrame)\n \n #create future dataframe\n future = m.make_future_dataframe(periods=forecastPeriods, freq='H')\n \n if forecastType == 'daily':\n # fit data to prophet\n if len(holidays) > 2:\n m = Prophet(changepoint_prior_scale=changePoint, holidays=holidays_df)\n else:\n m = Prophet(changepoint_prior_scale=changePoint) \n m.add_seasonality(name='daily', period=1, fourier_order=fourierOrder)\n m.fit(dataFrame)\n\n #create future dataframe\n future = m.make_future_dataframe(periods=forecastPeriods)\n\n if forecastType == 'weekly':\n if len(holidays) > 2:\n m = Prophet(weekly_seasonality=True, changepoint_prior_scale=changePoint, holidays=holidays_df)\n else:\n m = Prophet(weekly_seasonality=True, changepoint_prior_scale=changePoint)\n m.add_seasonality(name='weekly', period=7, fourier_order=fourierOrder)\n m.fit(dataFrame) \n future = m.make_future_dataframe(periods=forecastPeriods, freq='W')\n \n if forecastType == 'monthly':\n # fit data to prophet\n if len(holidays) > 2:\n m = Prophet(weekly_seasonality=False, changepoint_prior_scale=changePoint, holidays=holidays_df)\n else:\n m = Prophet(weekly_seasonality=False, changepoint_prior_scale=changePoint) \n m.add_seasonality(name='yearly', period=365.25, fourier_order=fourierOrder)\n m.fit(dataFrame)\n \n #create future dataframe\n future = m.make_future_dataframe(periods=forecastPeriods, freq='MS')\n\n if not m:\n # fit data to prophet\n if len(holidays) > 2:\n m = Prophet(seasonality_mode='multiplicative', holidays=holidays_df)\n else:\n m = Prophet(seasonality_mode='multiplicative') \n m.fit(dataFrame)\n \n #create forecast\n forecast = m.predict(future)\n\n #loop through adjustments for each time period and change yhat\n try:\n adjJson = json.loads(adjustments)\n for i in range(len(forecast)):\n for item in adjJson:\n dt = datetime.strptime(item['firstField'], '%Y-%m-%d')\n if dt == forecast.at[i, 'ds']:\n adjustmentString = item[\"adjustment\"].replace(\"m\", \"000000\").replace(\"M\", \"000000\").replace(\"k\", \"0000\").replace(\"K\", \"0000\")\n if \"%\" in adjustmentString:\n adjustmentPercent = float(adjustmentString.replace(\"%\", \"\"))/100 + 1\n forecast.at[i, forecastReturnType] = float(forecast.at[i,forecastReturnType]) * adjustmentPercent\n else: \n forecast.at[i, forecastReturnType] = float(forecast.at[i,forecastReturnType]) + float(adjustmentString)\n except:\n print('No adjustments!') \n \n #drop index column from data frame\n #dataFrame.drop(columns=['index'], inplace=True)\n\n # keep only the needed columns from the forecast\n forecast = forecast[['ds', forecastReturnType]]\n print(forecast)\n\n # merge two dataframes\n index = index.merge(forecast, on='ds', how='left')\n print(index)\n index['y'] = index.apply(lambda row: row[forecastReturnType] if row['ds'] > maxDate else row['y'], axis=1)\n #forecast['result'] = forecast.apply(lambda row: row[forecastReturnType] if row['y'] == 0 else row['y'], axis=1)\n\n forecastList = index['y'].values.tolist()\n\n #convert forecast results to ints\n resultsList = []\n for i, val in enumerate(forecastList):\n try:\n resultsList.append(int(val))\n except:\n resultsList.append(0) \n\n # Create an iterable of dual with the result\n duals = iter([[SSE.Dual(numData=d)] for d in resultsList])\n\n # Yield the row data as bundled rows\n yield SSE.BundledRows(rows=[SSE.Row(duals=d) for d in duals])\n\n \n\n def GetCapabilities(self, request, context):\n \"\"\"\n Get capabilities.\n Note that either request or context is used in the implementation of this method, but still added as\n parameters. The reason is that gRPC always sends both when making a function call and therefore we must include\n them to avoid error messages regarding too many parameters provided from the client.\n :param request: the request, not used in this method.\n :param context: the context, not used in this method.\n :return: the capabilities.\n \"\"\"\n logging.info('GetCapabilities')\n # Create an instance of the Capabilities grpc message\n # Enable(or disable) script evaluation\n # Set values for pluginIdentifier and pluginVersion\n capabilities = SSE.Capabilities(allowScript=True,\n pluginIdentifier='Prophet',\n pluginVersion='v1.1.0')\n\n # If user defined functions supported, add the definitions to the message\n with open(self.function_definitions) as json_file:\n # Iterate over each function definition and add data to the capabilities grpc message\n for definition in json.load(json_file)['Functions']:\n function = capabilities.functions.add()\n function.name = definition['Name']\n function.functionId = definition['Id']\n function.functionType = definition['Type']\n function.returnType = definition['ReturnType']\n\n # Retrieve name and type of each parameter\n for param_name, param_type in sorted(definition['Params'].items()):\n function.params.add(name=param_name, dataType=param_type)\n\n logging.info('Adding to capabilities: {}({})'.format(function.name,[p.name for p in function.params]))\n\n return capabilities\n\n def ExecuteFunction(self, request_iterator, context):\n \"\"\"\n Execute function call.\n :param request_iterator: an iterable sequence of Row.\n :param context: the context.\n :return: an iterable sequence of Row.\n \"\"\"\n # Retrieve function id\n func_id = self._get_function_id(context)\n\n # Call corresponding function\n logging.info('ExecuteFunction (functionId: {})'.format(func_id))\n\n return getattr(self, self.functions[func_id])(request_iterator, context)\n\n def EvaluateScript(self, request, context):\n \"\"\"\n This plugin provides functionality only for script calls with no parameters and tensor script calls.\n :param request:\n :param context:\n :return:\n \"\"\"\n # Parse header for script request\n metadata = dict(context.invocation_metadata())\n header = SSE.ScriptRequestHeader()\n header.ParseFromString(metadata['qlik-scriptrequestheader-bin'])\n\n # Retrieve function type\n func_type = self.ScriptEval.get_func_type(header)\n\n # Verify function type\n if (func_type == FunctionType.Aggregation) or (func_type == FunctionType.Tensor):\n return self.ScriptEval.EvaluateScript(header, request, context, func_type)\n else:\n # This plugin does not support other function types than aggregation and tensor.\n # Make sure the error handling, including logging, works as intended in the client\n msg = 'Function type {} is not supported in this plugin.'.format(func_type.name)\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(msg)\n # Raise error on the plugin-side\n raise grpc.RpcError(grpc.StatusCode.UNIMPLEMENTED, msg)\n\n \"\"\"\n Implementation of the Server connecting to gRPC.\n \"\"\"\n\n def Serve(self, port, pem_dir):\n \"\"\"\n Sets up the gRPC Server with insecure connection on port\n :param port: port to listen on.\n :param pem_dir: Directory including certificates\n :return: None\n \"\"\"\n # Create gRPC server\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n SSE.add_ConnectorServicer_to_server(self, server)\n\n if pem_dir:\n # Secure connection\n with open(os.path.join(pem_dir, 'sse_server_key.pem'), 'rb') as f:\n private_key = f.read()\n with open(os.path.join(pem_dir, 'sse_server_cert.pem'), 'rb') as f:\n cert_chain = f.read()\n with open(os.path.join(pem_dir, 'root_cert.pem'), 'rb') as f:\n root_cert = f.read()\n credentials = grpc.ssl_server_credentials([(private_key, cert_chain)], root_cert, True)\n server.add_secure_port('[::]:{}'.format(port), credentials)\n logging.info('*** Running server in secure mode on port: {} ***'.format(port))\n else:\n # Insecure connection\n server.add_insecure_port('[::]:{}'.format(port))\n logging.info('*** Running server in insecure mode on port: {} ***'.format(port))\n\n # Start gRPC server\n server.start()\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', nargs='?', default='50066')\n parser.add_argument('--pem_dir', nargs='?')\n parser.add_argument('--definition_file', nargs='?', default='../functions.json')\n args = parser.parse_args()\n\n # need to locate the file when script is called from outside it's location dir.\n def_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.definition_file)\n\n calc = ExtensionService(def_file)\n calc.Serve(args.port, args.pem_dir)\n","sub_path":"code/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":21052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"155024782","text":"# M. P. Hayes UCECE\nimport numpy as np\nfrom ipywidgets import interact, interactive, fixed\nfrom matplotlib.pyplot import show\nfrom .lib.signal_plot import signal_plot2\nfrom .lib.utils import gauss\n\ndef gauss_scaled_demo1_plot(muX=0, sigmaX=1, a=1, autoscale=False):\n\n if a == 0:\n a = 1e-3\n \n N = 401\n x = np.linspace(-10, 10, N)\n\n muY = a * muX\n sigmaY = abs(a) * sigmaX\n \n fX = gauss(x, muX, sigmaX)\n fY = gauss(x, muY, sigmaY)\n\n ylim = None\n if not autoscale:\n ylim = [0, 0.55] \n \n signal_plot2(x, fX, x, fY, ylim=ylim)\n show()\n\ndef gauss_scaled_demo1():\n interact(gauss_scaled_demo1_plot, muX=(-5, 5), sigmaX=(0.01, 5, 0.01),\n a = (0.0, 5, 0.1))\n \n \n\n \n\n","sub_path":"sensor-fusion/demos/gauss_scaled_demo1.py","file_name":"gauss_scaled_demo1.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"60605235","text":"import os\nfrom glob import glob\nfrom PIL import Image\n\n\ndef make_gif(source_dir, output):\n \"\"\"\n Make gif file from set of .jpeg images.\n Args:\n source_dir (str): path with .jpeg images\n output (str): path to the output .gif file\n\n Returns: None\n\n \"\"\"\n batch_sort = lambda s: int(s[s.index('-')+1:s.index('.')])\n image_paths = sorted(glob(os.path.join(source_dir, \"*.jpg\")),\n key=batch_sort)\n frames = []\n for path in image_paths:\n img = Image.open(path)\n frames.append(img)\n frames[0].save(output, format='GIF', append_images=frames[1:],\n save_all=True, duration=1*len(frames), loop=0)\n\n\nif __name__ == \"__main__\":\n make_gif(source_dir=\"results/mnist/gen_output\",\n output=\"results/mnist/gen_output/gen.gif\")","sub_path":"dcgan/utils/gifmaker.py","file_name":"gifmaker.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"126746201","text":"import json\n\n\ndef format_to_fit_ui(geo_parsed_response):\n ui_formatted_response = list()\n geo_parsed_response = json.loads(geo_parsed_response)\n\n for location_data in geo_parsed_response['annotated_text']['location_data']:\n try:\n latd = location_data['latd']\n except:\n latd = 0\n\n try:\n long = location_data['long']\n except:\n long = 0\n\n try:\n location_name = location_data['entity_name']\n except:\n location_name = \"None\"\n\n try:\n number_of_migrants = geo_parsed_response['annotated_text']['numbers'][0]\n except:\n number_of_migrants = 0\n\n ui_formatted_response.append({\"latitude\": latd, \"longitude\": long, \"locationName\": location_name, \"numberOfMigrants\": number_of_migrants})\n\n return json.dumps(ui_formatted_response)\n","sub_path":"crisis_map_backend/services/geoparsing/format_to_fit_ui/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"131947667","text":"#!/usr/bin/python3\n\"\"\"places reviews\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import abort, jsonify, make_response, request\nfrom models import storage\nfrom models.review import Review\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\n\n\n@app_views.route('/places//reviews', methods=['GET'],\n strict_slashes=False)\ndef get_reviews(place_id):\n \"\"\"get review for a place\"\"\"\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n reviews = []\n for review in place.reviews:\n reviews.append(review.to_dict())\n return jsonify(reviews)\n\n\n@app_views.route('/reviews/', methods=['GET'],\n strict_slashes=False)\ndef get_review(review_id):\n \"\"\"specified reviewd informa\"\"\"\n review = storage.get(Review, review_id)\n if review is None:\n abort(404)\n return jsonify(review.to_dict())\n\n\n@app_views.route('/reviews/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_review(review_id):\n \"\"\"delet a review1\"\"\"\n review = storage.get(Review, review_id)\n if review is None:\n abort(404)\n storage.delete(review)\n storage.save()\n return (jsonify({}))\n\n\n@app_views.route('/places//reviews', methods=['POST'],\n strict_slashes=False)\ndef post_review(place_id):\n \"\"\"create review LOL]\"\"\"\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n if not request.get_json():\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n jsonfile = request.get_json()\n if 'user_id' not in jsonfile:\n return make_response(jsonify({'error': 'Missing user_id'}), 400)\n user = storage.get(User, jsonfile['user_id'])\n if user is None:\n abort(404)\n if 'text' not in jsonfile:\n return make_response(jsonify({'error': 'Missing text'}), 400)\n jsonfile['place_id'] = place_id\n review = Review(**jsonfile)\n review.save()\n return make_response(jsonify(review.to_dict()), 201)\n\n\n@app_views.route('/reviews/', methods=['PUT'],\n strict_slashes=False)\ndef put_review(review_id):\n \"\"\"updat review\"\"\"\n review = storage.get(Review, review_id)\n if review is None:\n abort(404)\n if not request.get_json():\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n for attr, val in request.get_json().items():\n if attr not in ['id', 'user_id', 'place_id',\n 'created_at', 'updated_at']:\n setattr(review, attr, val)\n review.save()\n return jsonify(review.to_dict())\n","sub_path":"api/v1/views/places_reviews.py","file_name":"places_reviews.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"69475739","text":"#!/usr/bin/env python\n\nimport os\n\nmyfilename = \"/opt/housing.data.txt\"\n\nfeatures_dict = {}\n\nfor col in range(0,14):\n features_dict[col]=[]\n \n# if os.path.isfile(myfilename):\n# print(\"yay, I have a file\")\n# if sky == blue:\n# print('yay the sky is blue')\n# else:\n# print ('boo, no files for me')\n\nwith open(myfilename, 'r') as file_handle:\n for line in file_handle.readlines():\n line_clean = line.replace(' ', ' ').replace(' ', ' ')\n line_clean = line_clean.strip()\n values = line_clean.split(' ')\n #print(values)\n list=[] \n i=0\n for value in values:\n # for homework:\n # identify what type of data each value is, and cast it\n # to the appropriate type, then print the new, properly-typed\n # list to the screen.\n # I.e. ['0.04741', '0.00', '11.930', '0', '0.5730', '6.0300', '80.80', '2.5050', '1', '273.0', '21.00', '396.90', '7.88', '11.90']\n # becomes: [0.04741, 0.0, 11.93, 0, 0.573, '6.03, 80.8, 2.505, 1, 273.0, 21.0, 396.90, 7.88, 11.90] \n int_list = [3,8]\n two_decimal_positions = [11,13]\n \n if (i in int_list):\n list.append(int(value))\n \n elif (i in two_decimal_positions):\n #here i tried multiple stuff to keep the second decimal zero like in 11.90 whithout having the whole thing as a string, unfortunately it did not work for me\n #list.append(\"{0:.2f}\".format(float(value)))\n #list.append('%.2f' % float(value))\n #list.append(float(format(float(value), '.2f')))\n list.append(round(float(value),2)) \n \n else: list.append(float(value))\n \n i +=1 \n\n print (list)\n \n #new list (here i used a dictionary) for each of the columns in the dataset\n for col in range(0,14):\n features_dict[col].append(list[col])\n\n print (\"\\n**Print Features Columns Data**\\n\")\n for col in range(0,14):\n print (features_dict[col])\n\n print('finished!')\n \n","sub_path":"our_python_parser.py","file_name":"our_python_parser.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"206859313","text":"import pandas as pd\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndef decision():\n \"\"\"\n 决策树对泰坦尼克号进行预测生死\n :return: None\n \"\"\"\n\n # 获取数据\n titan = pd.read_csv(\"http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt\")\n # print(titan.head(5))\n\n # 处理数据 筛选出特征值和目标值\n x = titan[[\"pclass\", \"age\", \"sex\"]] # 特征值\n y = titan[[\"survived\"]] # 目标值\n\n # 处理缺失值\n # x[\"age\"].fillna(x[\"age\"].mean(), inplace=True)\n\n # 分割数据\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)\n\n # 进行处理 特征工程 特征-类别-one_hot 编码\n # 文本类��才用TfidfVectorizer 分词 重要性 单个属性用DictVectorizer 注意转换成字典格式\n # [{\"age\":11, \"pclass\": \"1st\", \"sex\":\"male\"},{},{}]\n dict = DictVectorizer(sparse=False)\n x_train = dict.fit_transform(x_train.to_dict(orient=\"records\"))\n print(dict.get_feature_names())\n x_test = dict.transform(x_test.to_dict(orient=\"records\"))\n # print(x_train)\n\n # 缺失值\n data = SimpleImputer(missing_values=np.nan, strategy='mean')\n x_train = data.fit_transform(x_train)\n x_test = data.transform(x_test)\n # print(\"处理缺失值后的:\\n\", x_train, x_test)\n # print(x)\n\n # 用决策树进行预测\n # dec = DecisionTreeClassifier(max_depth=8)\n # dec.fit(x_train, y_train)\n #\n # # 预测准确率\n # gc = GridSearchCV(dec, param_grid={\"max_depth\": [5, 6],}, cv=2)\n # gc.fit(x_train, y_train)\n # print(\"预测的准确率:\", dec.score(x_test, y_test))\n # print(\"在测试集上准确率:\", gc.score(x_test, y_test))\n # print(\"在交叉验证当中最好的结果:\", gc.best_score_)\n # print(\"选择最好的模型是:\", gc.best_estimator_)\n # print(\"每个超参数每次交叉验证的结果:\", gc.cv_results_)\n\n # 决策树的结构 本地保存 DOT格式\n # 导出决策树的结构\n # export_graphviz(dec, out_file=\"./tree.dot\", feature_names=['年龄', 'pclass=1st', 'pclass=2nd', 'pclass=3rd', '女性', '男性'])\n\n # 随机森林进行预测 (超参数调优)\n rf = RandomForestClassifier()\n params = {\n \"n_estimators\": [120, 200, 300, 500, 800, 1200],\n \"max_depth\": [5, 8, 15, 25, 30]\n }\n print(y_train.values.flatten())\n # 网格搜索 交叉验证\n gc = GridSearchCV(rf, param_grid=params, cv=2)\n gc.fit(x_train, y_train.values.flatten())\n print(\"查看选择的参数模型:\", gc.best_params_)\n print(\"在测试集上准确率:\", gc.score(x_test, y_test))\n print(\"在交叉验证当中最好的结果:\", gc.best_score_)\n print(\"*\" * 100)\n # print(\"选择最好的模型是:\", gc.best_estimator_)\n # print(\"每个超参数每次交叉验证的结果:\", gc.cv_results_)\n\n return None\n\n\nif __name__ == \"__main__\":\n decision()\n","sub_path":"14_随机森林.py","file_name":"14_随机森林.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"563283570","text":"### Helpers for pairwise distance notebook\nfrom pathlib import Path\nimport numpy as np\nfrom scipy.stats import wasserstein_distance\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport seaborn as sns \n\n# To prevent bug in figure export as pdf: \nimport matplotlib as mpl\nmpl.rcParams['pdf.fonttype'] = 42\n\n#### NEW FUNCTIONS MARCH 2021\n\ndef norm_pairw_nn_df(df, cols_to_norm, cols, norm_to):\n '''\n Return normalized pairwise distance or NN results\n (Fetched from PairwDist.PairwD or PairwDist.NN)\n\n Parameter\n ---------\n df : Pandas dataframe with PairwDist results\n cols_to_norm: list: Columns that should be kept and normalized \n cols : list : Additional columns that should be kept but not normalized\n norm_to : string: Column name that the others (cols) should be normalized to\n '''\n if isinstance(cols_to_norm, str):\n cols_to_norm = [cols_to_norm]\n if isinstance(cols, str):\n cols = [cols]\n if isinstance(norm_to, str):\n norm_to = [norm_to]\n \n print(f'Normalising to {norm_to}')\n \n df_kept = df.copy()\n df_kept = df_kept[set(cols_to_norm + cols + norm_to)]\n\n # Normalize\n for col in df_kept.columns.values: \n if col in norm_to + cols:\n continue\n else:\n df_kept[col] = df_kept[col] / df_kept[norm_to[0]]\n df_kept[norm_to] = np.ones_like(df_kept[norm_to])\n \n return df_kept\n\n\ndef norm_nn_df(df, norm_to='mean_nn_shuff_all'):\n '''\n Return normalized NN result (NN over n nearest neighbours)\n\n Parameter\n ---------\n df : Pandas dataframe with PairwDist.NN() results\n norm_to : string: Column name that \"mean_nn\" (data) should be normalized to\n CAVE Only possible option atm is norm_to = mean_nn_shuff_all\n\n Returns\n -------\n mean_nns : numpy array\n Normalized mean NN distances over NN \n mean_nn_shuff_refs : numpy array\n Normalized mean NN distances of reference population over NN \n '''\n assert norm_to == 'mean_nn_shuff_all', f'Normalisation of NN to {norm_to} is not implemented yet'\n print(f'Normalising mean_nn and mean_nn_shuff_ref to {norm_to}')\n \n mean_nns = []\n mean_nn_shuff_refs = []\n\n # Normalize all to 'mean_nn_shuff_all'\n for _, nn in df.iterrows():\n mean_nns.append(np.array(nn['mean_nn']) / np.array(nn[norm_to]))\n mean_nn_shuff_refs.append(np.array(nn['mean_nn_shuff_ref']) / np.array(nn[norm_to]))\n\n mean_nns = np.stack(mean_nns)\n mean_nn_shuff_refs = np.stack(mean_nn_shuff_refs)\n return mean_nns, mean_nn_shuff_refs\n\n\n\n\n\n\ndef plot_pairw_nn_summary(pairw_df_norm, \n cols_to_norm, \n colors, \n xlabels=None, \n save_path=None, \n label=''\n ):\n '''\n Plot pairwise distance or NN summary (line + boxplots)\n '''\n\n sns.set(style='white',font_scale=1.4)\n plt.rcParams['xtick.major.size'] = 10\n plt.rcParams['xtick.major.width'] = 1\n plt.rcParams['ytick.major.size'] = 10\n plt.rcParams['ytick.major.width'] = 1\n plt.rcParams['xtick.bottom'] = True\n plt.rcParams['ytick.left'] = True\n\n figure = plt.figure(constrained_layout=False, figsize=(3,4))\n gs = figure.add_gridspec(nrows=1, ncols=9, wspace=0, hspace=1)\n ax_lines = figure.add_subplot(gs[:, :5])\n ax_box = figure.add_subplot(gs[:, 6:]) \n\n no_entries = len(pairw_df_norm)\n sqrt_no_entries = np.sqrt(no_entries)\n\n # LINE PLOT\n for no, row in pairw_df_norm.iterrows():\n ax_lines.plot(np.arange(len(cols_to_norm)),\n np.array([row[col] for col in cols_to_norm]),\n color=colors[no], alpha=.3)\n\n averages = np.array([np.nanmean(pairw_df_norm[col]) for col in cols_to_norm])\n sems = np.array([np.std(pairw_df_norm[col])/sqrt_no_entries for col in cols_to_norm])\n\n ax_lines.errorbar(np.arange(len(cols_to_norm)),\n averages,\n sems,\n color='k', lw=3, zorder=10, marker='.',alpha=.9\n )\n\n ax_lines.set_xticks(np.arange(len(cols_to_norm)))\n if xlabels is None:\n ax_lines.set_xticklabels([col for col in cols_to_norm], rotation=40,ha='right')\n else:\n ax_lines.set_xticklabels(xlabels, rotation=40, ha='right')\n ax_lines.axhline(y=1, ls=':', color='k')\n \n ax_lines.set_xlim([-.25, len(cols_to_norm)-.75])\n ax_lines.set_ylabel(f'Norm. {label} distance')\n ylim = ax_lines.get_ylim()\n \n #BOX PLOT\n ax_box.boxplot(pairw_df_norm[cols_to_norm], widths=.7, showmeans=True, meanprops={'marker':'+','markeredgecolor':'k'})\n ax_box.axhline(y=1, ls=':', color='k')\n ax_box.set_xticks(np.arange(len(cols_to_norm))+1)\n if xlabels is None:\n ax_box.set_xticklabels([col for col in cols_to_norm], rotation=40, ha='right')\n else:\n ax_box.set_xticklabels(xlabels, rotation=40, ha='right')\n ax_box.set_ylim(ylim[0], ylim[1])\n ax_box.get_yaxis().set_visible(False)\n \n sns.despine(left=True)\n\n if save_path is not None: \n save_path = Path(save_path)\n figure.savefig(save_path / f'dist_summary_{label}.pdf', dpi=300, bbox_inches='tight')\n\n #plt.show()\n\n\n\ndef plot_mean_nn_over_nn(mean_nns, mean_nn_shuff_refs, save_path=None):\n '''\n PairwDist.NN()\n Create plot of normalised mean NN distance over number\n of nearest neighbours (PairwDist.NN)\n \n \n '''\n sns.set(style='white',font_scale=1.5)\n plt.rcParams['xtick.major.size'] = 10\n plt.rcParams['xtick.major.width'] = 1\n plt.rcParams['ytick.major.size'] = 10\n plt.rcParams['ytick.major.width'] = 1\n plt.rcParams['xtick.bottom'] = True\n plt.rcParams['ytick.left'] = True\n\n\n\n figure = plt.figure(figsize=(3,4))\n ax = figure.add_subplot(111)\n\n ax.axhline(y=1, ls=':', color='k', lw=1)\n\n assert mean_nns.shape == mean_nn_shuff_refs.shape, 'Matrices do not have the same dimensions'\n\n # Plot mean / std\n n = mean_nns.shape[0]\n sqrt_n = np.sqrt(n)\n\n # Ref\n for row in np.arange(mean_nn_shuff_refs.shape[0]):\n ax.plot(np.arange(mean_nn_shuff_refs.shape[1]),\n mean_nn_shuff_refs[row,:],\n color='cornflowerblue', lw=1, alpha=.1)\n\n\n ax.errorbar(np.arange(mean_nn_shuff_refs.shape[1]),\n np.nanmean(mean_nn_shuff_refs, axis=0),\n np.nanstd(mean_nn_shuff_refs, axis=0)/sqrt_n,\n color='cornflowerblue', lw=2, zorder=10, marker='.',alpha=.9\n )\n\n # Data\n for row in np.arange(mean_nns.shape[0]):\n ax.plot(np.arange(mean_nns.shape[1]),\n mean_nns[row,:],\n color='k', lw=1, alpha=.1)\n\n\n ax.errorbar(np.arange(mean_nns.shape[1]),\n np.nanmean(mean_nns, axis=0),\n np.nanstd(mean_nns, axis=0)/sqrt_n,\n color='k', lw=2, zorder=10, marker='.',alpha=.9\n )\n\n ax.set_xticks(np.arange(mean_nns.shape[1]))\n ax.set_xticklabels(np.arange(mean_nns.shape[1])+1)\n ax.set_xlim([-.25, mean_nns.shape[1]-.75])\n\n sns.despine(left=True,bottom=True)\n\n if save_path is not None: \n save_path = Path(save_path)\n figure.savefig(save_path / f'mean_nn_over_nn.pdf', dpi=300, bbox_inches='tight')\n\n\n\n","sub_path":"helpers_topography/notebooks/pairw_distances.py","file_name":"pairw_distances.py","file_ext":"py","file_size_in_byte":7459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"141060564","text":"#!/usr/bin/env python3\nimport json\nimport re\n\nfrom env import env\nfrom run_common import AWSCli\nfrom run_common import print_message\nfrom run_common import print_session\nfrom run_common import reset_template_dir\n\nargs = []\n\nif __name__ == \"__main__\":\n from run_common import parse_args\n\n args = parse_args()\n\n\ndef create_sms_log():\n aws_cli = AWSCli('ap-northeast-1')\n\n role_name = 'aws-sns-sms-log-role'\n policy_name = 'aws-sns-sms-log-policy'\n\n print_message(f'create role: {role_name}')\n\n role = aws_cli.get_iam_role(role_name)\n\n if not role:\n cmd = ['iam', 'create-role']\n cmd += ['--role-name', role_name]\n cmd += ['--assume-role-policy-document', f'file://aws_iam/{role_name}.json']\n role = aws_cli.run(cmd)\n\n cmd = ['iam', 'put-role-policy']\n cmd += ['--role-name', role_name]\n cmd += ['--policy-name', policy_name]\n cmd += ['--policy-document', f'file://aws_iam/{policy_name}.json']\n aws_cli.run(cmd)\n\n role_arn = role['Role']['Arn']\n\n print_message('start sms log')\n\n dd = {'attributes': {'DeliveryStatusSuccessSamplingRate': '100',\n 'DeliveryStatusIAMRole': role_arn}}\n cmd = ['sns', 'set-sms-attributes']\n cmd += ['--cli-input-json', json.dumps(dd)]\n aws_cli.run(cmd)\n\n\ndef run_create_cw_dashboard_elasticbeanstalk(name, settings):\n dashboard_region = settings['AWS_DEFAULT_REGION']\n aws_cli = AWSCli(dashboard_region)\n\n print_message(f'get elasticbeanstalk environment info: {name}')\n\n cmd = ['elasticbeanstalk', 'describe-environments']\n cmd += ['--no-include-deleted']\n result = aws_cli.run(cmd)\n\n env_list = list()\n for ee in result['Environments']:\n ename = ee['EnvironmentName']\n if ename.startswith(name):\n env_list.append(ee)\n\n env_instances_list = list()\n env_asg_list = list()\n env_elb_list = list()\n env_tg_list = list()\n\n for ee in env_list:\n cmd = ['elasticbeanstalk', 'describe-environment-resources']\n cmd += ['--environment-id', ee['EnvironmentId']]\n result = aws_cli.run(cmd)\n ee_res = result['EnvironmentResources']\n for instance in ee_res['Instances']:\n ii = dict()\n ii['Id'] = instance['Id']\n ii['EnvironmentName'] = ee_res['EnvironmentName']\n env_instances_list.append(ii)\n for asg in ee_res['AutoScalingGroups']:\n ii = dict()\n ii['Name'] = asg['Name']\n ii['EnvironmentName'] = ee_res['EnvironmentName']\n env_asg_list.append(ii)\n for elb in ee_res['LoadBalancers']:\n ii = dict()\n ii['Name'] = elb['Name']\n ii['EnvironmentName'] = ee_res['EnvironmentName']\n env_elb_list.append(ii)\n for elb in ee_res['LoadBalancers']:\n cmd = ['elbv2', 'describe-target-groups']\n cmd += ['--load-balancer-arn', elb['Name']]\n result = aws_cli.run(cmd, ignore_error=True)\n for tg in result.get('TargetGroups', list()):\n tt = re.match(r'^.+(targetgroup/.+)$', tg['TargetGroupArn'])\n ll = re.match(r'^.+loadbalancer/(.+)$', elb['Name'])\n ii = dict()\n ii['Name'] = tt[1]\n ii['LoadBalancer'] = ll[1]\n ii['EnvironmentName'] = ee_res['EnvironmentName']\n env_tg_list.append(ii)\n\n ################################################################################\n dashboard_name = '%s_%s' % (name, dashboard_region)\n print_message('create or update cloudwatch dashboard: %s' % dashboard_name)\n\n template_name = env['template']['NAME']\n filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name)\n with open(filename_path, 'r') as ff:\n dashboard_body = json.load(ff)\n\n for dw in dashboard_body['widgets']:\n if not dw['properties'].get('metrics'):\n continue\n pm = dw['properties']['metrics']\n\n dimension_type = 'env'\n for dimension in pm[0]:\n if dimension == 'InstanceId':\n dimension_type = 'instance'\n elif dimension == 'AutoScalingGroupName':\n dimension_type = 'asg'\n elif dimension == 'LoadBalancerName':\n dimension_type = 'elb'\n elif dimension == 'TargetGroup':\n dimension_type = 'tg'\n\n template = json.dumps(pm[0])\n new_metrics_list = list()\n if dimension_type == 'asg':\n for ii in env_asg_list:\n new_metric = template.replace('AUTO_SCALING_GROUP_NAME', ii['Name'])\n new_metric = new_metric.replace('ENVIRONMENT_NAME', ii['EnvironmentName'])\n new_metric = json.loads(new_metric)\n new_metrics_list.append(new_metric)\n elif dimension_type == 'instance':\n for ii in env_instances_list:\n new_metric = template.replace('INSTANCE_ID', ii['Id'])\n new_metric = new_metric.replace('ENVIRONMENT_NAME', ii['EnvironmentName'])\n new_metric = json.loads(new_metric)\n new_metrics_list.append(new_metric)\n elif dimension_type == 'elb':\n for ii in env_elb_list:\n new_metric = template.replace('LOAD_BALANCER_NAME', ii['Name'])\n new_metric = new_metric.replace('ENVIRONMENT_NAME', ii['EnvironmentName'])\n new_metric = json.loads(new_metric)\n new_metrics_list.append(new_metric)\n elif dimension_type == 'tg':\n for ii in env_tg_list:\n new_metric = template.replace('TARGET_GROUP', ii['Name'])\n new_metric = new_metric.replace('LOAD_BALANCER', ii['LoadBalancer'])\n new_metric = new_metric.replace('ENVIRONMENT_NAME', ii['EnvironmentName'])\n new_metric = json.loads(new_metric)\n new_metrics_list.append(new_metric)\n else:\n for ii in env_list:\n new_metric = template.replace('ENVIRONMENT_NAME', ii['EnvironmentName'])\n new_metric = json.loads(new_metric)\n new_metrics_list.append(new_metric)\n\n dw['properties']['metrics'] = new_metrics_list\n\n dashboard_body = json.dumps(dashboard_body)\n\n cmd = ['cloudwatch', 'put-dashboard']\n cmd += ['--dashboard-name', dashboard_name]\n cmd += ['--dashboard-body', dashboard_body]\n aws_cli.run(cmd)\n\n\ndef run_create_cw_dashboard_rds_aurora(name, settings):\n if not env.get('rds'):\n print_message('No RDS settings in config.json')\n return\n\n if env['rds'].get('ENGINE') != 'aurora':\n print_message('Only RDS Aurora supported')\n\n dashboard_region = settings['AWS_DEFAULT_REGION']\n aws_cli = AWSCli(dashboard_region)\n\n cluster_id = env['rds']['DB_CLUSTER_ID']\n instance_role_list = list()\n instance_role_list.append('WRITER')\n instance_role_list.append('READER')\n\n dashboard_name = '%s_%s' % (name, dashboard_region)\n print_message('create or update cloudwatch dashboard: %s' % dashboard_name)\n\n template_name = env['template']['NAME']\n\n filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name)\n with open(filename_path, 'r') as ff:\n dashboard_body = json.load(ff)\n\n for dw in dashboard_body['widgets']:\n pm = dw['properties']['metrics']\n\n cluster_id_only = True\n for dimension in pm[0]:\n if dimension == 'Role':\n cluster_id_only = False\n\n template = json.dumps(pm[0])\n new_metrics_list = list()\n if cluster_id_only:\n new_metric = template.replace('DB_CLUSTER_IDENTIFIER', cluster_id)\n new_metric = json.loads(new_metric)\n new_metrics_list.append(new_metric)\n else:\n for ir in instance_role_list:\n new_metric = template.replace('DB_CLUSTER_IDENTIFIER', cluster_id)\n new_metric = new_metric.replace('ROLE', ir)\n new_metric = json.loads(new_metric)\n new_metrics_list.append(new_metric)\n\n dw['properties']['metrics'] = new_metrics_list\n\n dashboard_body = json.dumps(dashboard_body)\n\n cmd = ['cloudwatch', 'put-dashboard']\n cmd += ['--dashboard-name', dashboard_name]\n cmd += ['--dashboard-body', dashboard_body]\n aws_cli.run(cmd)\n\n\ndef run_create_cw_dashboard_sqs_lambda_sms(name, settings):\n print_message('create sms log')\n create_sms_log()\n\n phase = env['common']['PHASE']\n dashboard_region = settings['AWS_DEFAULT_REGION']\n aws_cli = AWSCli(dashboard_region)\n\n dashboard_name = '%s_%s' % (name, dashboard_region)\n print_message('create or update cloudwatch dashboard: %s' % dashboard_name)\n\n template_name = env['template']['NAME']\n\n filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name)\n with open(filename_path, 'r') as ff:\n dashboard_body = json.load(ff)\n\n for dw in dashboard_body['widgets']:\n pm = dw['properties']['metrics']\n\n current_index = 0\n\n for pp in pm:\n template = json.dumps(pp)\n template = template.replace('PHASE-', '%s-' % phase)\n pm[current_index] = json.loads(template)\n current_index += 1\n\n dw['properties']['metrics'] = pm\n\n title = dw['properties']['title']\n if title.startswith('SQS: PHASE-'):\n title = title.replace('SQS: PHASE-', 'SQS: %s-' % phase)\n dw['properties']['title'] = title\n\n dashboard_body = json.dumps(dashboard_body)\n\n cmd = ['cloudwatch', 'put-dashboard']\n cmd += ['--dashboard-name', dashboard_name]\n cmd += ['--dashboard-body', dashboard_body]\n aws_cli.run(cmd)\n\n\ndef run_create_cw_dashboard_alarm(name, settings):\n phase = env['common']['PHASE']\n alarm_region = settings['AWS_DEFAULT_REGION']\n aws_cli = AWSCli(alarm_region)\n\n dashboard_name = '%s_%s' % (name, alarm_region)\n\n widgets = list()\n cmd = ['cloudwatch', 'describe-alarms']\n cmd += ['--alarm-name-prefix', '%s-' % phase]\n rr = aws_cli.run(cmd)\n\n for (ii, aa) in enumerate(rr['MetricAlarms']):\n y = ii // 4 * 6\n x = ii % 4 * 6\n widgets.append({\n \"height\": 6,\n \"properties\": {\n \"title\": aa['AlarmName'],\n \"annotations\": {\n \"alarms\": [\n aa['AlarmArn']\n ]\n },\n \"view\": \"timeSeries\",\n \"stacked\": False\n },\n \"type\": \"metric\",\n \"width\": 6,\n \"x\": x,\n \"y\": y\n })\n\n cmd = ['cloudwatch', 'put-dashboard']\n cmd += ['--dashboard-name', dashboard_name]\n cmd += ['--dashboard-body', json.dumps({\n 'widgets': widgets\n })]\n aws_cli.run(cmd)\n\n\n################################################################################\n#\n# start\n#\n################################################################################\nprint_session('create cloudwatch dashboard')\n\nreset_template_dir()\n\ncw = env.get('cloudwatch', dict())\ntarget_cw_dashboard_name = None\nregion = None\ncheck_exists = False\n\nif len(args) > 1:\n target_cw_dashboard_name = args[1]\n\nif len(args) > 2:\n region = args[2]\n\nfor cw_dashboard_env in cw.get('DASHBOARDS', list()):\n if target_cw_dashboard_name and cw_dashboard_env['NAME'] != target_cw_dashboard_name:\n continue\n\n if region and cw_dashboard_env.get('AWS_DEFAULT_REGION') != region:\n continue\n\n if target_cw_dashboard_name:\n check_exists = True\n\n if cw_dashboard_env['TYPE'] == 'elasticbeanstalk':\n run_create_cw_dashboard_elasticbeanstalk(cw_dashboard_env['NAME'], cw_dashboard_env)\n elif cw_dashboard_env['TYPE'] == 'rds/aurora':\n run_create_cw_dashboard_rds_aurora(cw_dashboard_env['NAME'], cw_dashboard_env)\n elif cw_dashboard_env['TYPE'] == 'sqs,lambda,sms':\n run_create_cw_dashboard_sqs_lambda_sms(cw_dashboard_env['NAME'], cw_dashboard_env)\n elif cw_dashboard_env['TYPE'] == 'alarm':\n run_create_cw_dashboard_alarm(cw_dashboard_env['NAME'], cw_dashboard_env)\n else:\n print('\"%s\" is not supported' % cw_dashboard_env['TYPE'])\n raise Exception()\n\nif not check_exists and target_cw_dashboard_name and not region:\n print('\"%s\" is not exists in config.json' % target_cw_dashboard_name)\n\nif not check_exists and target_cw_dashboard_name and region:\n print('\"%s, %s\" is not exists in config.json' % (target_cw_dashboard_name, region))\n","sub_path":"run_create_cloudwatch_dashboard.py","file_name":"run_create_cloudwatch_dashboard.py","file_ext":"py","file_size_in_byte":12585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"507245937","text":"#Grocery List App\r\nimport datetime\r\n\r\nprint(\"Welcome to our Grocery List App\")\r\n\r\nGlist = [\"Meat\",\"Cheese\"]\r\n\r\ntime = datetime.datetime.now()\r\nmonth = str(time.month)\r\nday = str(time.day)\r\nhour = str(time.hour)\r\nminute = str(time.minute)\r\n\r\nprint(time)\r\n\r\nprint(\"Your current list is \" + str(len(Glist))+ \" items long and includes: \", end = ' ')\r\nfor i in Glist:\r\n if i == Glist[-1]:\r\n print(i, end = ' ')\r\n else:\r\n print(i, end = ', ')\r\n\r\nGlist.append(input(\"\\nWhat else would you like to add? \").title())\r\n\r\nprint(\"Your current list is \" + str(len(Glist))+ \" items long and includes: \", end = ' ')\r\nfor i in Glist:\r\n if i == Glist[-1]:\r\n print(i, end = '\\n')\r\n else:\r\n print(i, end = ', ')\r\n\r\n\r\nfor i in Glist:\r\n if i == Glist[-2]:\r\n print(\"The store didnt have \" + i)\r\n Glist.append(input(\"What would you like instead? \").title())\r\n else:\r\n print(\"You bought \" + i)\r\n print(\"Removing \" + i + \" from the list\")\r\n Glist.remove(i)\r\n \r\nprint(\"Your current list is \" + str(len(Glist))+ \" items long and includes: \", end = ' ')\r\nfor i in Glist:\r\n if i == Glist[-1]:\r\n print(i, end = ' ')\r\n else:\r\n print(i, end = ', ') \r\n","sub_path":"Art_of_doing/Grocery List App.py","file_name":"Grocery List App.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"448621854","text":"from ticker_service import *\nfrom bins_runner import *\nfrom db_serv import *\n\ndb_service = get_db_serv(\"bithumb.db\")\n\nbins_trader = BINS_RUNNER(\"BTC\", db_service)\nbins_trader.daemon=True\nbins_trader.start()\n\n\ntick_btc_bithumb = Bithumb_Price_Service(\"BTC\", db_service, bins_trader)\n# tick_xrp_bithumb = Bithumb_Price_Service(\"BTC\")\ntick_btc_bithumb.daemon=True\ntick_btc_bithumb.start()\n\ni=1\nwhile i>0:\n sleep(1000)\n","sub_path":"go_btc.py","file_name":"go_btc.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"420486841","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nimport csv\r\nimport os.path\r\nimport os\r\nfrom openpyxl import Workbook\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.utils import coordinate_from_string, column_index_from_string, get_column_letter\r\nimport tempfile\r\n\r\nprompt = '--> '\r\n\r\ndef main():\r\n if len(sys.argv) > 1:\r\n interactiveMode = False\r\n\r\n script, srcDir, srcFile, destDir = sys.argv\r\n\r\n srcDirFile = os.path.normpath(os.path.join(srcDir, srcFile))\r\n\r\n dirfile_vals = {\r\n \"srcDirFile\": srcDirFile,\r\n \"srcDir\": srcDir,\r\n \"srcFile\": srcFile,\r\n \"destDir\": destDir\r\n }\r\n\r\n else:\r\n interactiveMode = True\r\n\r\n if os.name == 'nt':\r\n clearscreen = 'cls'\r\n else:\r\n clearscreen = 'clear'\r\n\r\n clear = lambda: os.system(clearscreen)\r\n clear()\r\n\r\n dirfile_vals = getDirFiles()\r\n\r\n hql_vals = readSource(dirfile_vals[\"srcDirFile\"])\r\n\r\n hqlDirFile = createDDLFile(dirfile_vals[\"destDir\"], hql_vals)\r\n #removeDOSChars(hqlDirFile)\r\n\r\n if interactiveMode:\r\n print(\"\\n\\n\")\r\n print(\"*** SUCCESS: DDL has been created ***\")\r\n print(\"\\n\\n\")\r\n\r\ndef getDirFiles():\r\n print(\"\\nPlease enter the data dictionary directory location: \")\r\n srcDir = input(prompt)\r\n\r\n print(\"\\nPlease enter the data dictionary filename: \")\r\n srcFile = input(prompt)\r\n\r\n print(\"\\nPlease enter the HQL destination directory location: \")\r\n destDir = input(prompt)\r\n\r\n if os.name == 'nt':\r\n srcDir = srcDir.replace(\"/c/\", \"c:/\")\r\n destDir = destDir.replace(\"/c/\", \"c:/\")\r\n\r\n srcDirFile = os.path.normpath(os.path.join(srcDir, srcFile))\r\n\r\n dirfile_vals = {\r\n \"srcDirFile\": srcDirFile,\r\n \"srcDir\": srcDir,\r\n \"srcFile\": srcFile,\r\n \"destDir\": destDir\r\n }\r\n\r\n return dirfile_vals\r\n\r\ndef readSource(inSrcDirFile):\r\n # Initialize vars\r\n colhdrs = 0\r\n cols = \"\"\r\n col_idx = 0\r\n part_idx = 0\r\n\r\n # Initialize HQL list\r\n hql_vals = {\r\n \"initial_comment\": \"-- Should be run via Hive CLI with the following eg: \",\r\n \"database\": \"\",\r\n \"dest_db\": \"analytics\",\r\n \"viewnm\": \"\",\r\n \"create_view\": \"\",\r\n \"col_nms\": \"\",\r\n \"cols\": \"\"\r\n }\r\n\r\n # Open the data dictionary XLSX\r\n wb = load_workbook(inSrcDirFile, data_only=True)\r\n ws = wb.get_sheet_by_name(name='Data Source')\r\n\r\n # Start reading the rows\r\n colhdrs = False\r\n db = \"\"\r\n col_def = \"\"\r\n col_idx = 0\r\n\r\n for row in ws.iter_rows():\r\n # Start reading the cells\r\n for cell in row:\r\n cellContent = str(cell.value)\r\n cellXY = coordinate_from_string(str(cell.coordinate))\r\n if cellContent == 'None':\r\n continue\r\n elif cellContent == 'Target Database':\r\n db = getNextCellValue(ws, cell, cellXY)\r\n hql_vals[\"database\"] = db\r\n elif cellContent == 'Table Name':\r\n tblnm = getNextCellValue(ws, cell, cellXY)\r\n hql_vals[\"viewnm\"] = tblnm\r\n hql_vals[\"create_view\"] = \"CREATE VIEW IF NOT EXISTS {0}.view_{1} AS \".format(hql_vals[\"dest_db\"], tblnm)\r\n elif cellContent == 'Column Name':\r\n colhdrs = True\r\n # The next iteration will start the column list\r\n continue\r\n\r\n src_column_nm_ltr = 'A'\r\n dest_column_nm_ltr = 'B'\r\n\r\n if colhdrs:\r\n currcol = column_index_from_string(cellXY[0])\r\n if currcol == 1:\r\n if (ws[src_column_nm_ltr + str(cell.row)].value\r\n and ws[src_column_nm_ltr + str(cell.row)].value.strip()):\r\n src_column_nm = ws[src_column_nm_ltr + str(cell.row)].value\r\n if src_column_nm == 'Partition':\r\n continue\r\n if src_column_nm.lower() in 'audit':\r\n continue\r\n else:\r\n src_column_nm = \"\"\r\n\r\n if (ws[dest_column_nm_ltr + str(cell.row)].value\r\n and ws[dest_column_nm_ltr + str(cell.row)].value.strip()):\r\n dest_column_nm = ws[dest_column_nm_ltr + str(cell.row)].value\r\n if dest_column_nm in 'audit':\r\n continue\r\n else:\r\n dest_column_nm = \"\"\r\n\r\n aliased_nm = dest_column_nm + ' AS ' + src_column_nm\r\n\r\n if aliased_nm.strip():\r\n col_def = aliased_nm\r\n else:\r\n col_def = \"\"\r\n\r\n if col_idx == 0:\r\n hql_vals[\"cols\"] = hql_vals[\"cols\"] + \"\\t \"\r\n elif col_idx > 0 and col_def.strip():\r\n hql_vals[\"cols\"] = hql_vals[\"cols\"] + \"\\n\\t,\"\r\n else:\r\n hql_vals[\"cols\"] = hql_vals[\"cols\"]\r\n\r\n hql_vals[\"cols\"] = hql_vals[\"cols\"] + col_def\r\n\r\n col_idx += 1\r\n else:\r\n continue\r\n wb = None\r\n\r\n return hql_vals\r\n\r\ndef getNextCellValue(inWS, inCell, inCellXY):\r\n currcol = column_index_from_string(inCellXY[0])\r\n newidx = (currcol + 1)\r\n\r\n newcol = get_column_letter(newidx)\r\n\r\n return inWS[str(newcol) + str(inCell.row)].value\r\n\r\ndef createDDLFile(inDestDir, inHQLVals):\r\n # Initialize HQL list\r\n # hql_vals = {\r\n # \"initial_comment\": \"-- Should be run via Hive CLI with the following eg: \",\r\n # \"database\": \"\",\r\n # \"dest_db\": \"analytics\",\r\n # \"viewnm\": \"\",\r\n # \"create_view\": \"\",\r\n # \"col_nms\": \"\",\r\n # \"cols\": \"\"\r\n # }\r\n\r\n # Use the view nm to create the filename\r\n destFileExt = '.hql'\r\n\r\n filenm = 'hive_create_' + inHQLVals[\"dest_db\"] + '_view_' + inHQLVals[\"viewnm\"] + destFileExt\r\n\r\n hqlDirFile = os.path.normpath(os.path.join(inDestDir, filenm))\r\n\r\n view_create = \"-- 'hive -f \" + filenm + \"'\"\r\n db_lc = inHQLVals[\"database\"].lower().replace(' ', '_')\r\n\r\n ddlFile = open(hqlDirFile, \"wt\")\r\n\r\n ddlFile.write(inHQLVals[\"initial_comment\"] + \"\\n\")\r\n ddlFile.write(view_create + \"\\n\\n\")\r\n ddlFile.write(inHQLVals[\"create_view\"] + \"\\n\")\r\n ddlFile.write(\"(\\n\")\r\n ddlFile.write(\"SELECT\")\r\n ddlFile.write(\"\\n\")\r\n ddlFile.write(inHQLVals[\"cols\"])\r\n ddlFile.write(\"\\n\")\r\n ddlFile.write(\"FROM\")\r\n ddlFile.write(\"\\n\\t\" + db_lc + \".\" + inHQLVals[\"viewnm\"])\r\n ddlFile.write(\"\\n)\\n\")\r\n\r\n ddlFile.close()\r\n\r\n return hqlDirFile\r\n\r\ndef removeDOSChars(inFile):\r\n filename = inFile\r\n text = open(filename, 'rb').read().replace('\\r\\n', '\\n')\r\n open(filename, 'wb').write(text)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"common/python/createViewDDLfromXLSX.py","file_name":"createViewDDLfromXLSX.py","file_ext":"py","file_size_in_byte":6957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"236899216","text":"from pymongo import MongoClient\nzips = MongoClient(\"127.0.0.1\").zips.zip\nsource = open(\"/home/washington/zips.csv\", \"r\")\nsource.readline()\nfor i in source:\n string = i.replace('\\n', '').replace('\\r', '').replace('\"','').split(',')\n zips.insert({\n \"_id\": string[0],\n \"zip\": string[1],\n \"city\": string[3],\n \"state\": string[4],\n \"location\": {\"lat\": string[6], \"long\": string[7]}\n })\n\n","sub_path":"insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"235399498","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nfrom neuralnetwork import *\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keras.models import load_model\nimport random\n\ndef scale_image(image,old_w,old_h,new_w,new_h):\n res = []\n x_ratio = old_w/new_w\n y_ratio = old_h/new_h\n for y in range(new_h):\n row = []\n for x in range(new_w):\n row.append(image[int(y*y_ratio)][int(x*x_ratio)])\n res.append(row)\n return res\n\nclass BBox:\n def __init__(self):\n self.minx = float(\"inf\")\n self.miny = float(\"inf\")\n self.maxx = float(\"-inf\")\n self.maxy = float(\"-inf\")\n \n def addPoint(self, point):\n if type(point) is QPoint:\n point = [point.x(),point.y()]\n self.minx = min(self.minx, point[0])\n self.miny = min(self.miny, point[1])\n self.maxx = max(self.maxx, point[0])\n self.maxy = max(self.maxy, point[1])\n \n \nclass MainWidget(QWidget):\n def __init__(self, parent=None):\n super(MainWidget, self).__init__(parent)\n \n self.initBrush()\n self.initPaintingCanvas()\n self.initPlotCanvas()\n self.initLayouts()\n \n self.items = []\n self.currItem = []\n self.currbbox = BBox()\n \n self.perceptron = Perceptron([784,30,10])\n self.perceptron.load(\"digit_recognition_coefs.txt\")\n \n self.convolution = load_model(\"digit_recognition.nn\")\n \n def initPlotCanvas(self):\n self.perceptronLabel = QLabel(\"\")\n self.perceptronLabel.setAlignment(Qt.AlignVCenter)\n self.perceptronLabel.setStyleSheet(\"font: 25pt Comic Sans MS\")\n self.convolutionLabel = QLabel(\"\")\n self.convolutionLabel.setAlignment(Qt.AlignVCenter)\n self.convolutionLabel.setStyleSheet(\"font: 25pt Comic Sans MS\")\n \n self.figure = plt.figure()\n self.plot = self.figure.add_subplot()\n self.plotCanvas = FigureCanvas(self.figure)\n \n def initPaintingCanvas(self):\n self.drawing = False\n self.paintingCanvas = QLabel()\n self.paintingCanvas.mousePressEvent = self.mousePressEvent\n self.paintingCanvas.mouseMoveEvent = self.mouseMoveEvent\n self.paintingCanvas.mouseReleaseEvent = self.mouseReleaseEvent\n \n self.image = QPixmap(800,600)\n self.image.fill(Qt.white)\n self.paintingCanvas.setPixmap(self.image)\n \n def mousePressEvent(self, event):\n if event.button() == Qt.LeftButton:\n self.drawing = True\n self.lastPoint = event.pos()\n self.currItem = [self.lastPoint]\n \n def mouseMoveEvent(self, event):\n if(event.buttons() & Qt.LeftButton) & self.drawing:\n painter = QPainter(self.paintingCanvas.pixmap())\n painter.setPen(QPen(self.brushColor, self.brushSize, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))\n painter.drawLine(self.lastPoint, event.pos())\n painter.end()\n self.lastPoint = event.pos()\n self.currItem.append(self.lastPoint)\n self.update()\n \n def mouseReleaseEvent(self, event):\n if event.button() == Qt.LeftButton:\n self.drawing = False\n bbox = BBox()\n for point in self.currItem:\n bbox.addPoint(point)\n bbox.minx -= 15\n bbox.miny -= 15\n bbox.maxx += 15\n bbox.maxy += 15\n file = str(len(self.items)) + \".png\"\n width = bbox.maxx - bbox.minx\n height = bbox.maxy - bbox.miny\n pixmap = QPixmap(width,height)\n pixmap.fill(Qt.white)\n for i in range(1,len(self.currItem)):\n painter = QPainter(pixmap)\n painter.setPen(QPen(self.brushColor, self.brushSize, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))\n painter.drawLine(QPoint(self.currItem[i].x() - bbox.minx,self.currItem[i].y() - bbox.miny), QPoint(self.currItem[i-1].x() - bbox.minx,self.currItem[i-1].y() - bbox.miny))\n painter.end()\n self.update()\n image = pixmap.toImage()\n bits = image.bits()\n bits.setsize(width*height*4)\n arr = np.frombuffer(bits, np.uint8).reshape((height, width,4))\n bitmask = []\n for row in arr:\n maskrow = []\n for pixel in row:\n maskrow.append(0.0 if pixel[0] > 0 or pixel[1] > 0 or pixel[2] > 0 else 1.0)\n bitmask.append(maskrow)\n \n new_width = 28\n new_height = 28\n \n bitmask = scale_image(bitmask,width,height,new_width,new_height)\n self.figure.canvas.flush_events()\n self.plot.imshow(bitmask,cmap='gray')\n self.figure.canvas.draw()\n #print(np.array(bitmask))\n input = np.reshape(np.array(bitmask),(1,784))\n perceptron_prediction = self.perceptron.predict(input)\n convolution_prediction = self.convolution.predict(np.reshape(bitmask,(1,28,28,1)))\n self.perceptronLabel.setText(\"Perceptron prediction: \"+str(np.argmax(perceptron_prediction)))\n self.convolutionLabel.setText(\"Convolution prediction: \"+str(np.argmax(convolution_prediction)))\n self.items.append(self.currItem)\n \n def undo(self):\n if len(self.items) > 0:\n self.items.pop()\n \n self.image = QPixmap(800,600)\n self.image.fill(Qt.white)\n self.paintingCanvas.setPixmap(self.image)\n for item in self.items:\n for i in range(1,len(item)):\n painter = QPainter(self.paintingCanvas.pixmap())\n painter.setPen(QPen(self.brushColor, self.brushSize, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))\n painter.drawLine(item[i], item[i-1])\n painter.end()\n \n \n def initBrush(self):\n self.brushSize = 2\n self.brushColor = Qt.black\n self.lastPoint = QPoint()\n \n \n def threePixel(self):\n self.brushSize = 3\n \n def fivePixel(self):\n self.brushSize = 5\n \n def sevenPixel(self):\n self.brushSize = 7\n \n def ninePixel(self):\n self.brushSize = 9\n \n \n def blackColor(self):\n self.brushColor = Qt.black\n \n def whiteColor(self):\n self.brushColor = Qt.white\n \n def redColor(self):\n self.brushColor = Qt.red\n \n def greenColor(self):\n self.brushColor = Qt.green\n \n def yellowColor(self):\n self.brushColor = Qt.yellow\n\n \n def initLayouts(self):\n self.mainLayout = QHBoxLayout()\n \n self.mainLayout.addWidget(self.paintingCanvas)\n \n self.plotCanvasLayout = QVBoxLayout()\n self.responseLayout = QVBoxLayout()\n self.responseLayout.addWidget(self.perceptronLabel)\n self.responseLayout.addWidget(self.convolutionLabel)\n self.plotCanvasLayout.addLayout(self.responseLayout)\n self.plotCanvasLayout.addWidget(self.plotCanvas)\n self.mainLayout.addLayout(self.plotCanvasLayout)\n \n self.setLayout(self.mainLayout)\n\nclass Window(QMainWindow):\n def __init__(self, parent=None):\n super(Window, self).__init__(parent)\n \n self.setWindowTitle(\"2+2\")\n self.setGeometry(100, 100, 800, 600)\n \n self.mainWidget = MainWidget()\n self.setCentralWidget(self.mainWidget)\n \n self.initMenuBar()\n \n \n def clear(self):\n self.image.fill(Qt.white)\n self.update() \n \n \n def initMenuBar(self):\n mainMenu = self.menuBar()\n \n fileMenu = mainMenu.addMenu(\"File\")\n \n saveAction = QAction( \"Save\",self)\n saveAction.setShortcut(\"Ctrl+S\")\n fileMenu.addAction(saveAction)\n \n clearAction = QAction( \"Clear\", self)\n clearAction.setShortcut(\"Ctrl+C\")\n fileMenu.addAction(clearAction)\n clearAction.triggered.connect(self.clear)\n \n undoAction = QAction(\"Undo\", self)\n undoAction.setShortcut(\"Ctrl+Z\")\n fileMenu.addAction(undoAction)\n undoAction.triggered.connect(self.mainWidget.undo)\n \n brushSize = mainMenu.addMenu(\"Brush Size\")\n \n threepxAction = QAction( \"3px\", self)\n brushSize.addAction(threepxAction)\n threepxAction.triggered.connect(self.mainWidget.threePixel)\n \n fivepxAction = QAction( \"5px\", self)\n brushSize.addAction(fivepxAction)\n fivepxAction.triggered.connect(self.mainWidget.fivePixel)\n \n sevenpxAction = QAction(\"7px\", self)\n brushSize.addAction(sevenpxAction)\n sevenpxAction.triggered.connect(self.mainWidget.sevenPixel)\n \n ninepxAction = QAction(\"9px\", self)\n brushSize.addAction(ninepxAction)\n ninepxAction.triggered.connect(self.mainWidget.ninePixel) \n \n brushColor = mainMenu.addMenu(\"Brush Color\")\n \n blackAction = QAction( \"Black\", self)\n blackAction.setShortcut(\"Ctrl+B\")\n brushColor.addAction(blackAction)\n blackAction.triggered.connect(self.mainWidget.blackColor)\n \n whitekAction = QAction( \"White\", self)\n whitekAction.setShortcut(\"Ctrl+W\")\n brushColor.addAction(whitekAction)\n whitekAction.triggered.connect(self.mainWidget.whiteColor)\n \n redAction = QAction( \"Red\", self)\n redAction.setShortcut(\"Ctrl+R\")\n brushColor.addAction(redAction)\n redAction.triggered.connect(self.mainWidget.redColor)\n \n greenAction = QAction( \"Green\", self)\n greenAction.setShortcut(\"Ctrl+G\")\n brushColor.addAction(greenAction)\n greenAction.triggered.connect(self.mainWidget.greenColor)\n \n yellowAction = QAction( \"Yellow\", self)\n yellowAction.setShortcut(\"Ctrl+Y\")\n brushColor.addAction(yellowAction)\n yellowAction.triggered.connect(self.mainWidget.yellowColor)\n\n \n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n main = Window()\n main.show()\n\n sys.exit(app.exec_())","sub_path":"4thCourseS1/NeuralNetwork/2+2/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":10276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"360538401","text":"# 可以用元组、二维数组、字典\n# 先进行排序\nstring = input()\noverallList = []\ncouple = []\n\n\ndef indexDel(index):\n global string\n listTemp = list(string)\n del listTemp[index]\n string = ''.join(listTemp)\n\n\ncount = 0\nwhile len(string) > 0:\n character = string[0]\n couple.append(character)\n\n i = 0\n while i < len(string):\n if string[i] == character:\n count += 1\n indexDel(i)\n i -= 1\n i += 1\n couple.append(count)\n overallList.append(couple)\n count = 0\n couple = []\n\n\ndef sortKey(elem):\n return elem[1]\n\n\noverallList.sort(key=sortKey,reverse=True)\nfor elem in overallList:\n print(elem[0]*elem[1],end='')\nprint()","sub_path":"Code/CodeRecords/2531/60678/241211.py","file_name":"241211.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"396255706","text":"# Copyright 2020 Cortex Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport yaml\n\n\ndef remove_cli_config(cli_config_file_path, operator_endpoint):\n removed_env_names = []\n\n with open(cli_config_file_path, \"r\") as f:\n cli_config = yaml.safe_load(f)\n\n if cli_config is None:\n return\n\n prev_envs = cli_config.get(\"environments\", [])\n updated_envs = []\n\n for prev_env in prev_envs:\n if prev_env.get(\"operator_endpoint\", \"\").endswith(operator_endpoint):\n removed_env_names.append(prev_env[\"name\"])\n else:\n updated_envs.append(prev_env)\n\n if len(updated_envs) == len(prev_envs):\n return\n\n cli_config[\"environments\"] = updated_envs\n\n prev_default = cli_config.get(\"default_environment\")\n if prev_default in removed_env_names:\n cli_config[\"default_environment\"] = \"local\"\n\n with open(cli_config_file_path, \"w\") as f:\n yaml.dump(cli_config, f, default_flow_style=False)\n\n if len(removed_env_names) == 1:\n print(f\"✓ deleted the {removed_env_names[0]} environment configuration\")\n elif len(removed_env_names) == 2:\n print(\n f\"✓ deleted the {removed_env_names[0]} and {removed_env_names[1]} environment configurations\"\n )\n elif len(removed_env_names) > 2:\n print(\n f\"✓ deleted the {', '.join(removed_env_names[:-1])}, and {removed_env_names[-1]} environment configurations\"\n )\n\n if prev_default in removed_env_names:\n print(f\"✓ set the default environment to local\")\n\n\n# this is best effort, will not error in any circumstances\nif __name__ == \"__main__\":\n try:\n remove_cli_config(cli_config_file_path=sys.argv[1], operator_endpoint=sys.argv[2])\n except:\n pass\n","sub_path":"manager/remove_cli_config.py","file_name":"remove_cli_config.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"350327811","text":"def getPrefix(number, k):\n if k > getSize(number):\n return number\n return number // 10**(getSize(number) - k)\n\ndef getSize(d):\n size = 0\n while d >= 10**size:\n size += 1\n return size\n\ndef prefixMatched(number, d):\n return getPrefix(number, getSize(d)) == d\n\ndef sumOfOddPlace(number):\n total = 0\n for i in range(getSize(number), 0, -2):\n total += getPrefix(number, i) % 10\n return total\n\ndef getDigit(number):\n if getSize(number) <= 1:\n return number\n else:\n return number % 10 + number // 10\n\ndef sumOfDoubleEvenPalce(number):\n total = 0\n for i in range(1, getSize(number) + 1, 2):\n total += getDigit(2 * (getPrefix(number, i) % 10))\n return total\n\ndef isValid(number):\n if not 13 <= getSize(number) <= 16:\n return False\n if not (prefixMatched(number, 4) or prefixMatched(number, 5) or prefixMatched(number, 6) or prefixMatched(number, 37)):\n return False\n num1 = sumOfDoubleEvenPalce(number)\n num2 = sumOfOddPlace(number)\n return (num1 + num2) % 10 == 0\n\ndef main():\n cc = eval(input(\"Enter a credit card number: \"))\n if isValid(cc):\n print(\"That is a valid credit card number\")\n else:\n print(\"That is not a valid credit card number\")\n\nmain()","sub_path":"PythonProgramming/cp06/프로그래밍 연습문제(cp06)/6.29.py","file_name":"6.29.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"82954292","text":"# This file is part of pyunicorn.\n# Copyright (C) 2008--2023 Jonathan F. Donges and pyunicorn authors\n# URL: \n# License: BSD (3-clause)\n#\n# Please acknowledge and cite the use of this software and its authors\n# when results are used in publications or published elsewhere.\n#\n# You can use the following reference:\n# J.F. Donges, J. Heitzig, B. Beronov, M. Wiedermann, J. Runge, Q.-Y. Feng,\n# L. Tupikina, V. Stolbova, R.V. Donner, N. Marwan, H.A. Dijkstra,\n# and J. Kurths, \"Unified functional network and nonlinear time series analysis\n# for complex systems science: The pyunicorn package\"\n\n\"\"\"\nTests for the EventSeriesClimateNetwork class.\n\"\"\"\nimport numpy as np\n\nfrom pyunicorn.core.data import Data\nfrom pyunicorn.climate.eventseries_climatenetwork import\\\n EventSeriesClimateNetwork\n\n\ndef test_str(capsys):\n data = EventSeriesClimateNetwork.SmallTestData()\n print(EventSeriesClimateNetwork(data, method='ES',\n threshold_method='quantile',\n threshold_values=0.8, taumax=16,\n threshold_types='above'))\n out, err = capsys.readouterr()\n out_ref = \"Extracting network adjacency matrix by thresholding...\\n\" + \\\n \"Setting area weights according to type surface ...\\n\" + \\\n \"Setting area weights according to type surface ...\\n\" + \\\n \"EventSeriesClimateNetwork:\\n\" + \\\n \"EventSeries: 6 variables, 10 timesteps, taumax: 16.0, \" \\\n \"lag: 0.0\" + \\\n \"\\nClimateNetwork:\\n\" + \\\n \"GeoNetwork:\\n\" + \\\n \"SpatialNetwork:\\n\" + \\\n \"Network: directed, 6 nodes, 0 links, link density 0.000.\\n\" + \\\n \"Geographical boundaries:\\n\" + \\\n \" time lat lon\\n\" + \\\n \" min 0.0 0.00 2.50\\n\" + \\\n \" max 9.0 25.00 15.00\\n\" + \\\n \"Threshold: 0\\n\" + \\\n \"Local connections filtered out: False\\n\" + \\\n \"Type of event series measure to construct \" + \\\n \"the network: directedES\\n\"\n assert out == out_ref\n\n\ndef test_SmallTestData():\n res = Data.SmallTestData().observable()\n exp = np.array([[0., 1., 0., -1., -0., 1.],\n [0.309, 0.9511, -0.309, -0.9511, 0.309, 0.9511],\n [0.5878, 0.809, -0.5878, -0.809, 0.5878, 0.809],\n [0.809, 0.5878, -0.809, -0.5878, 0.809, 0.5878],\n [0.9511, 0.309, -0.9511, -0.309, 0.9511, 0.309],\n [1., 0., -1., -0., 1., 0.],\n [0.9511, -0.309, -0.9511, 0.309, 0.9511, -0.309],\n [0.809, -0.5878, -0.809, 0.5878, 0.809, -0.5878],\n [0.5878, -0.809, -0.5878, 0.809, 0.5878, -0.809],\n [0.309, -0.9511, -0.309, 0.9511, 0.309, -0.9511]])\n assert np.allclose(res, exp, atol=1e-04)\n","sub_path":"tests/test_climate/test_eventseries_climatenetwork.py","file_name":"test_eventseries_climatenetwork.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"250310199","text":"\"\"\"\nModule that implements pure-python equivalents of the functions in the\n_speedups extension module.\n\"\"\"\n\nfrom numpy import invert, isnan, array, transpose, zeros, compress\nimport operator\n\ndef array_combine(a, b, op=operator.and_, func=lambda x: x):\n \"\"\" Returns op(func(a), func(b)) if a and b are both not None;\n if one is None, then returns func() on the non-None array;\n if both are None, then returns None.\n \"\"\"\n if a is not None and b is not None:\n return op(func(a), func(b))\n elif a is not None:\n return func(a)\n elif b is not None:\n return func(b)\n else:\n return None\n\n\ndef scatterplot_gather_points(index, index_low, index_high,\n value, value_low, value_high,\n index_mask=None, index_sel=None, index_sel_mask=None,\n value_mask=None, value_sel=None, value_sel_mask=None):\n \"\"\"\n Takes index and value arrays, masks, and optional selection arrays,\n and returns the list of points and corresponding selection mask for\n those points.\n\n Parameters\n ----------\n index : float array (1D)\n Array of indexes of the points\n index_low : float or None\n The minimum acceptable value in the index array\n index_high : float or None\n The maximum acceptable value in the index array\n value : float array (1D)\n Array of values of the points\n value_low : float or None\n The minimum acceptable value in the value array\n value_high : float or None\n The maximum acceptable value in the value array\n\n Optional Parameters\n -------------------\n index_mask : bool or int array (1D)\n Mask array for the indexes\n index_sel : sequence of ints\n A list/tuple/array of indices of selected positions in the index array\n index_sel_mask : array of ints or bools\n An mask array with True values indicating which points are selected\n value_mask : bool or int array (1D)\n Mask array for the values\n value_sel : sequence of ints\n A list/tuple/array of indices of selected positions in the value array\n value_sel_mask : array of ints or bools\n An mask array with True values indicating which points are selected\n\n Returns\n -------\n points : float array (Nx2)\n The points that match all the masking criteria\n sel_mask : bool array (1D)\n Mask indicating which indices in **points** are selected\n \"\"\"\n\n index_range_mask = (index_low < index) & (index < index_high)\n value_range_mask = (value_low < value) & (value < value_high)\n\n nan_mask = array_combine(index_mask, value_mask,\n func = lambda x: invert(isnan(x)) & x)\n\n if nan_mask is not None:\n point_mask = nan_mask & index_range_mask & value_range_mask\n else:\n point_mask = index_range_mask & value_range_mask\n points = transpose(array((index, value)))\n\n # Handle the selection mask\n selection_mask = array_combine(index_sel_mask, value_sel_mask)\n\n if index_sel is None and value_sel is None:\n pass\n else:\n if index_sel is not None and value_sel is not None:\n mask2 = zeros(len(index), int)\n mask2[index_sel] = 1\n mask2[value_sel] &= 1\n elif index_sel is not None:\n mask2 = zeros(len(index), int)\n mask2[index_sel] = 1\n elif value_sel is not None:\n mask2 = zeros(len(index), int)\n mask2[value_sel] = 1\n if selection_mask is None:\n selection_mask = mask2\n else:\n selection_mask &= mask2\n\n points = compress(point_mask, points, axis=0)\n if selection_mask is not None:\n selections = compress(point_mask, selection_mask)\n else:\n selections = None\n return points, selections\n\n","sub_path":"chaco/_speedups_fallback.py","file_name":"_speedups_fallback.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"265977182","text":"#This file registeres the Host who has to attend a Client\nfrom tkinter import*\n\nclass Hosts:\n #Host Details\n hostname=\"\"\n hostmail=\"\"\n hostphone=\"\"\n \n #Registration of the host\n def register_host(self):\n Hosts.hostname=username.get()\n Hosts.hostmail=email.get()\n Hosts.hostphone=phone.get()\n print(\"Registered Successfully\")\n screen.destroy()\n from Guest import Guests\n guest = Guests()\n guest.guest_screen()\n \n #GUI screen for the host registration\n def main_screen(self):\n global screen\n screen = Tk();\n screen.geometry(\"300x290\")\n global username,email,phone\n username = StringVar()\n email = StringVar()\n phone = StringVar()\n screen.title(\"Host Info\")\n \n Label(text=\"Host Registration\",bg=\"Yellow\",width=\"15\",height=\"1\",font=(\"Calibri\",13)).pack()\n Label(text=\"\").pack()\n Label(text=\"Please enter details below\").pack()\n Label(text=\"\").pack()\n Label(text=\"Username \").pack()\n Entry(textvariable=username).pack()\n Label(text=\"Email \").pack()\n Entry(textvariable=email).pack()\n Label(text=\"phone \").pack()\n Entry(textvariable=phone).pack()\n Label(text=\"\").pack()\n button = Button(text=\"Register\",width=10,height=1,command=self.register_host)\n button.pack()\n screen.mainloop()\n\n","sub_path":"Host.py","file_name":"Host.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"165514750","text":"from contextlib import contextmanager\nfrom dataclasses import dataclass\n\nimport torch\n\nimport torch.utils._pytree as pytree\n\nfrom torch._C import _ExcludeDispatchKeyGuard, DispatchKey, DispatchKeySet\nfrom torch._dynamo.exc import CondOpArgsMismatchError\n\nfrom torch._functorch.eager_transforms import (\n _unwrap_all_tensors_from_functional,\n _wrap_all_tensors_to_functional,\n functionalize,\n)\nfrom torch._higher_order_ops.utils import autograd_not_implemented\nfrom torch._ops import HigherOrderOperator\nfrom torch._subclasses.fake_tensor import FakeTensorMode\nfrom torch.fx.experimental.proxy_tensor import (\n disable_proxy_modes_tracing,\n make_fx,\n ProxyTorchDispatchMode,\n track_tensor_tree,\n)\nfrom torch.fx.passes.shape_prop import _extract_tensor_metadata\nfrom torch.multiprocessing.reductions import StorageWeakRef\nfrom torch.utils._python_dispatch import (\n _get_current_dispatch_mode,\n _pop_mode_temporarily,\n)\n\n\n@contextmanager\ndef _set_compilation_env():\n _old_is_tracing = torch.fx._symbolic_trace._is_fx_tracing_flag\n try:\n # We need to turn off the is_fx_tracing_flag. Remove this flag check from dyanmo\n # once we are confident fx tracing works with dynamo.\n torch.fx._symbolic_trace._is_fx_tracing_flag = False\n yield\n finally:\n torch.fx._symbolic_trace._is_fx_tracing_flag = _old_is_tracing\n\n\n@dataclass\nclass UnsupportedAliasMutationException(RuntimeError):\n reason: str\n\n\ndef cond(pred, true_fn, false_fn, operands):\n r\"\"\"\n Conditionally applies ``true_fn`` or ``false_fn``.\n\n ``cond`` is structured control flow operator. That is, it is like a Python if-statement,\n but has limitations on ``true_fn``, ``false_fn``, and ``operands`` that enable it to be\n capturable using torch.compile and torch.export.\n\n Assuming the constraints on ``cond``'s arguments are met, ``cond`` is equivalent to the following::\n\n def cond(pred, true_branch, false_branch, operands):\n if pred:\n return true_branch(*operands)\n else:\n return false_branch(*operands)\n\n .. warning::\n cond is a prototype feature in PyTorch, included as a part of the torch.export release. The main limitations are that\n it may not work in eager-mode PyTorch and you may encounter various failure modes while using it.\n Please look forward to a more stable implementation in a future version of PyTorch.\n\n Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype\n\n Args:\n - `pred (Union[bool, torch.Tensor])`: A boolean expression or a tensor with one element,\n indicating which branch function to apply.\n\n - `true_fn (Callable)`: A callable function (a -> b) that is within the\n scope that is being traced.\n\n - `false_fn (Callable)`: A callable function (a -> b) that is within the\n scope that is being traced. The true branch and false branch must have\n consistent input and outputs, meaning the inputs have to be the same, and\n the outputs have to be the same type and shape.\n\n - `operands (Tuple[torch.Tensor])`: A tuple of inputs to the true/false\n branches.\n\n Example:\n\n def true_fn(x: torch.Tensor):\n return x.cos()\n def false_fn(x: torch.Tensor):\n return x.sin()\n return cond(x.shape[0] > 4, true_fn, false_fn, (x,))\n\n Restrictions:\n - The conditional statement (aka `pred`) must meet one of the following constraints:\n\n - It's a `torch.Tensor` with only one element, and torch.bool dtype\n\n - It's a boolean expression, e.g. `x.shape[0] > 10` or `x.dim() > 1 and x.shape[1] > 10`\n\n - The branch function (aka `true_fn`/`false_fn`) must meet all of the following constraints:\n\n - The function signature must match with operands.\n\n - The function must return a tensor with the same metadata, e.g. shape,\n dtype, etc.\n\n - The function cannot have in-place mutations on inputs or global variables. (Note: in-place tensor\n operations such as `add_` for intermediate results are allowed in a branch)\n\n .. warning::\n\n Temporal Limitations:\n\n - `cond` only supports **inference** right now. Autograd will be supported in the future.\n\n - The **operands** must be a **tuple of tensors**. Pytree of tensors will be supported in the future.\n\n - The **output** of branches must be a **single Tensor**. Pytree of tensors will be supported in the future.\n\n \"\"\"\n\n if torch._dynamo.is_compiling():\n return cond_op(pred, true_fn, false_fn, operands)\n\n def _validate_input(pred, true_fn, false_fn, operands):\n if not isinstance(pred, (bool, torch.Tensor)):\n raise RuntimeError(f\"Expected pred to be bool or tensor, but got {pred}.\")\n\n if isinstance(pred, torch.Tensor) and pred.numel() != 1:\n raise RuntimeError(\n f\"Expected pred to be bool or single-element tensor, but got {pred}.\"\n )\n\n if not callable(true_fn) or not callable(false_fn):\n raise RuntimeError(\"Expect both branches to be callbale.\")\n\n if not isinstance(operands, (tuple, list)) or any(\n not isinstance(t, torch.Tensor) for t in operands\n ):\n raise RuntimeError(\n f\"Expect operands to be a tuple of Tensors, but got {operands}.\"\n )\n\n _validate_input(pred, true_fn, false_fn, operands)\n\n if not torch._dynamo.is_dynamo_supported():\n raise RuntimeError(\"torch.cond requires dynamo support.\")\n\n with _set_compilation_env():\n return torch.compile(cond_op, backend=\"eager\", fullgraph=True)(\n pred, true_fn, false_fn, operands\n )\n\n\n\"\"\"\nWe're going to define a `cond_op` operation.\nIn order to do this, we need implementations for each of the dispatch keys.\n\"\"\"\ncond_op = HigherOrderOperator(\"cond\")\n\n\ndef trace_cond(proxy_mode, func_overload, pred, true_fn, false_fn, operands):\n assert isinstance(\n operands, (list, tuple)\n ), \"Cond operands must be a list or tuple of tensors\"\n assert all(\n isinstance(o, torch.Tensor) for o in operands\n ), \"Cond operands must be a list of tensors\"\n\n pre_dispatch = getattr(proxy_mode, \"pre_dispatch\", False)\n with disable_proxy_modes_tracing():\n true_graph = make_fx(true_fn, pre_dispatch=pre_dispatch)(*operands)\n false_graph = make_fx(false_fn, pre_dispatch=pre_dispatch)(*operands)\n\n true_outs = []\n false_outs = []\n for node in true_graph.graph.nodes:\n if node.op == \"output\":\n true_outs.extend(node.args)\n\n for node in false_graph.graph.nodes:\n if node.op == \"output\":\n false_outs.extend(node.args)\n\n flat_true_outs, _ = pytree.tree_flatten(true_outs)\n flat_false_outs, _ = pytree.tree_flatten(false_outs)\n if len(flat_true_outs) != len(flat_false_outs):\n raise CondOpArgsMismatchError(\n f\"Expected to return same number of outputs but got:\"\n f\"\\n {true_fn.__name__} returns {len(flat_true_outs)} item(s)\"\n f\"\\n {false_fn.__name__} returns {len(flat_false_outs)} item(s)\"\n )\n\n for i in range(0, len(flat_true_outs)):\n true_out = flat_true_outs[i]\n false_out = flat_false_outs[i]\n if true_out.meta[\"tensor_meta\"] != false_out.meta[\"tensor_meta\"]:\n raise CondOpArgsMismatchError(\n f\"Expected each tensor to have same metadata but got:\"\n f\"\\n {true_fn.__name__} returns {true_out.meta['tensor_meta']}\"\n f\"\\n {false_fn.__name__} returns {false_out.meta['tensor_meta']}\"\n )\n\n # There are probably better ways - I know that create_arg has some self incrementing name\n # magic to it, but since we explicitly have to get the name for register_module,\n # I was not sure how to do that. This kinda simulates it.\n next_name = None\n i = 0\n while not next_name:\n candidate = f\"true_graph_{i}\"\n if hasattr(proxy_mode.tracer.root, candidate):\n i += 1\n else:\n next_name = candidate\n\n true_name = next_name\n false_name = f\"false_graph_{i}\"\n assert not hasattr(proxy_mode.tracer.root, false_name)\n\n proxy_mode.tracer.root.register_module(true_name, true_graph)\n proxy_mode.tracer.root.register_module(false_name, false_graph)\n\n args = (pred, true_graph, false_graph, operands)\n\n proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args)\n\n out_proxy = proxy_mode.tracer.create_proxy(\n \"call_function\", func_overload, proxy_args, {}, name=\"conditional\"\n )\n\n # At this point, we're *guaranteed* that whether an output came from the\n # true or false branch is indistinguishable. So, as this is just for tracing\n # purposes, choose the true branch.\n\n # TODO: Uhh.... it shouldn't matter, but changing this to true_fn results in\n # a FakeTensorMode error :\n # `Current active mode not registered`\n # TODO Sometimes the operands are not completely FakeTensor, something seems went wrong in\n # dynamo? Because of that it runs real computation sometimes and re-triggering downstream dispatch keys.\n out = false_fn(*operands)\n\n return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)\n\n\n@cond_op.py_impl(DispatchKey.CompositeExplicitAutograd)\ndef cond_op_dense(pred, true_fn, false_fn, operands):\n mode = _get_current_dispatch_mode()\n assert mode is None, \"Mode should never be enabled for CPU/CUDA key\"\n if pred:\n return true_fn(*operands)\n else:\n return false_fn(*operands)\n\n\ncond_op.py_impl(DispatchKey.Autograd)(\n autograd_not_implemented(cond_op, deferred_error=True)\n)\n\n\n@cond_op.py_impl(ProxyTorchDispatchMode)\ndef inner(pred, true_fn, false_fn, operands):\n # TODO Move this to proper utility function\n from torch._ops import mode_stack_per_key, temporarily_pop_mode\n\n # torch.cond expects ProxyTorchDispatchMode to **still** be on the stack\n # at the time that its proxy implementation is called.\n # However, the mode can live in one of two places, depending on\n # whether we're doing pre_dispatch tracing or normal tracing.\n pre_dispatch_modes = mode_stack_per_key().get(DispatchKey.PreDispatch, []) # type: ignore[attr-defined]\n if len(pre_dispatch_modes) > 0:\n with temporarily_pop_mode(pre_dispatch_modes) as mode:\n if mode.enable_tracing:\n return trace_cond(mode, cond_op, pred, true_fn, false_fn, operands)\n else:\n return cond_op(pred, true_fn, false_fn, operands)\n mode = _get_current_dispatch_mode()\n assert mode is not None, \"Mode should always be enabled for python fallback key\"\n with _pop_mode_temporarily() as mode:\n if mode.enable_tracing:\n return trace_cond(mode, cond_op, pred, true_fn, false_fn, operands)\n else:\n return cond_op(pred, true_fn, false_fn, operands)\n\n\n@cond_op.py_impl(FakeTensorMode)\ndef cond_fake_tensor_mode(pred, true_fn, false_fn, operands):\n true_outs = true_fn(*operands)\n flat_true_outs, _ = pytree.tree_flatten(true_outs)\n flat_false_outs, _ = pytree.tree_flatten(false_fn(*operands))\n if len(flat_true_outs) != len(flat_false_outs):\n raise RuntimeError(\"Unmatched number of outputs from cond() branches.\")\n\n for true_out, false_out in zip(flat_true_outs, flat_false_outs):\n true_meta = _extract_tensor_metadata(true_out)\n false_meta = _extract_tensor_metadata(false_out)\n if true_meta != false_meta:\n raise CondOpArgsMismatchError(\n f\"Expected each tensor to have same metadata but got:\"\n f\"\\n {true_fn.__name__} returns {true_meta}\"\n f\"\\n {false_fn.__name__} returns {false_meta}\"\n )\n return true_outs\n\n\ndef _has_potential_branch_input_mutation(branch, inputs):\n \"\"\"\n Dispatch-trace the branch with inputs and check if\n producing graph has mutable op on the input. This is\n bit restrictive as the branch must be traceable.\n \"\"\"\n try:\n gm = make_fx(branch)(*inputs)\n except UnsupportedAliasMutationException:\n # this can happen when nested cond_op is\n # functionalized\n return True\n except Exception as e:\n raise e\n\n def _detect_input_mutation(gm):\n input_nodes = set()\n for node in gm.graph.nodes:\n if node.op == \"placeholder\":\n input_nodes.add(node)\n if node.op == \"call_function\":\n target = node.target\n if (\n isinstance(target, torch._ops.OpOverload)\n and target._schema.is_mutable\n ):\n for arg in node.args:\n if arg in input_nodes:\n return True\n\n for _, module in gm.named_children():\n if isinstance(module, torch.fx.GraphModule):\n if _detect_input_mutation(module):\n return True\n\n return False\n\n return _detect_input_mutation(gm)\n\n\ndef _has_potential_branch_input_alias(branch, inputs):\n \"\"\"\n Dispatch-trace the branch with inputs and check if\n producing graph has output aliasing the branch input. This is\n bit restrictive as the branch must be traceable.\n \"\"\"\n try:\n gm = make_fx(branch)(*inputs)\n\n except UnsupportedAliasMutationException:\n # this can happen when nested cond_op is\n # functionalized\n return True\n except Exception as e:\n raise e\n\n def _detect_input_alias(gm):\n input_storages = set()\n for node in gm.graph.nodes:\n # We need to check existence of \"val\" because we reuse the logic here\n # for map operator, where num_mapped_args is a scalar\n # and doesn't have a \"val\" meta.\n if node.op == \"placeholder\" and \"val\" in node.meta:\n input_storages.add(StorageWeakRef(node.meta[\"val\"]._typed_storage()))\n if node.op == \"output\":\n\n def check_alias(out):\n if out is not None and \"val\" in out.meta:\n out_storage = StorageWeakRef(out.meta[\"val\"]._typed_storage())\n return out_storage in input_storages\n return False\n\n if any(pytree.tree_flatten(pytree.tree_map(check_alias, node.args))[0]):\n return True\n\n for _, module in gm.named_children():\n if isinstance(module, torch.fx.GraphModule) and _detect_input_alias(module):\n return True\n\n return False\n\n return _detect_input_alias(gm)\n\n\n@cond_op.py_impl(DispatchKey.Functionalize)\ndef cond_func(pred, true_fn, false_fn, inputs):\n reapply_views = torch._C._functionalization_reapply_views_tls()\n unwrapped_inputs = _unwrap_all_tensors_from_functional(\n inputs, reapply_views=reapply_views\n )\n unwrapped_pred = _unwrap_all_tensors_from_functional(\n pred, reapply_views=reapply_views\n )\n mode = \"mutations_and_views\" if reapply_views else \"mutations\"\n with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):\n functional_true = functionalize(true_fn, remove=mode)\n functional_false = functionalize(false_fn, remove=mode)\n for branch in [true_fn, false_fn]:\n if _has_potential_branch_input_mutation(branch, unwrapped_inputs):\n raise UnsupportedAliasMutationException(\n \"One of torch.cond branch \" \"might be modifying the input!\"\n )\n\n if _has_potential_branch_input_alias(branch, unwrapped_inputs):\n raise UnsupportedAliasMutationException(\n \"One of torch.cond branch \" \"might be aliasing the input!\"\n )\n\n cond_return = cond_op(\n unwrapped_pred, functional_true, functional_false, unwrapped_inputs\n )\n return _wrap_all_tensors_to_functional(cond_return, level=0)\n\n\n@cond_op.py_impl(torch._C._functorch.TransformType.Functionalize)\ndef cond_functionalize(interpreter, pred, true_fn, false_fn, inputs):\n \"\"\"\n Functionalization implementation for torch.cond. Currently:\n 1. We don't allow any input mutation inside the branches\n 2. Our check for above condition is not exhaustive\n \"\"\"\n reapply_views = interpreter.functionalize_add_back_views()\n mode = \"mutations_and_views\" if reapply_views else \"mutations\"\n # At this point, we will see functionalized tensors, so need to unwrap them first\n unwrapped_inputs = _unwrap_all_tensors_from_functional(\n inputs, reapply_views=reapply_views\n )\n unwrapped_pred = _unwrap_all_tensors_from_functional(\n pred, reapply_views=reapply_views\n )\n\n functional_true_fn = functionalize(true_fn, remove=mode)\n functional_false_fn = functionalize(false_fn, remove=mode)\n\n with interpreter.lower():\n for branch in [functional_true_fn, functional_false_fn]:\n if _has_potential_branch_input_mutation(branch, unwrapped_inputs):\n raise UnsupportedAliasMutationException(\n \"One of torch.cond branch \" \"might be modifying the input!\"\n )\n for branch in [true_fn, false_fn]:\n if _has_potential_branch_input_alias(branch, unwrapped_inputs):\n raise UnsupportedAliasMutationException(\n \"One of torch.cond branch \" \"might be aliasing the input!\"\n )\n\n cond_return = cond_op(\n unwrapped_pred, functional_true_fn, functional_false_fn, unwrapped_inputs\n )\n return _wrap_all_tensors_to_functional(cond_return, level=interpreter.level())\n\n\n# TODO(voz): Make this automatic for keys, this is very ugly atm\ncond_op.fallthrough(DispatchKey.PythonDispatcher) # type: ignore[attr-defined]\ncond_op.fallthrough(DispatchKey.PythonTLSSnapshot) # type: ignore[attr-defined]\ncond_op.fallthrough(DispatchKey.ADInplaceOrView)\ncond_op.fallthrough(DispatchKey.BackendSelect)\ncond_op.fallthrough(DispatchKey.AutocastCPU) # type: ignore[attr-defined]\ncond_op.fallthrough(DispatchKey.AutocastCUDA) # type: ignore[attr-defined]\n","sub_path":"torch/_higher_order_ops/cond.py","file_name":"cond.py","file_ext":"py","file_size_in_byte":18378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"284883776","text":"def calcular_ingresos(datos:list) -> dict:\n pre: str = \"prepagada\"\n sub: str = \"subsidiada\"\n total: int = 0 \n total_pre: int = 0\n total_sub: int = 0\n cont_pre: int = 0\n cont_sub: int = 0\n\n for item in datos:\n total = total + item[\"valor_a_pagar\"]\n if item[\"salud\"] == pre:\n total_pre = total_pre + item[\"valor_a_pagar\"] \n cont_pre = cont_pre + 1\n elif item[\"salud\"] == sub:\n total_sub = total_sub + item[\"valor_a_pagar\"]\n cont_sub = cont_sub + 1\n prom_pre: float = 0\n prom_sub: float = 0\n if cont_pre > 0:\n prom_pre= round((total_pre / cont_pre),1)\n if cont_sub > 0:\n prom_sub= round((total_sub / cont_sub),1)\n respuesta : dict = {\n \"total\" : total,\n \"promedio_salud_prepagada\" : prom_pre,\n \"promedio_salud_subsidiada\" : prom_sub\n }\n return respuesta\n \ndatos: list = [\n {\n \"salud\": \"prepagada\",\n \"valor_a_pagar\": 20000\n },\n {\n \"salud\": \"subsidiada\",\n \"valor_a_pagar\": 25000\n },\n {\n \"salud\": \"prepagada\",\n \"valor_a_pagar\": 32000\n },\n {\n \"salud\": \"prepagada\",\n \"valor_a_pagar\": 38000\n },\n {\n \"salud\": \"subsidiada\",\n \"valor_a_pagar\": 25000\n },\n {\n \"salud\": \"prepagada\",\n \"valor_a_pagar\": 33000\n },\n {\n \"salud\": \"subsidiada\",\n \"valor_a_pagar\": 28000\n }\n]\n \nprint(calcular_ingresos(datos))\n\n\n\n \n","sub_path":"ejercicios Python/semana cuatro/reto_3_lambda.py","file_name":"reto_3_lambda.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"30003077","text":"#!/usr/bin/env python\r\n# coding: shift_jis\r\n# Last Change: 2016/11/18 (Fri) 23:20:55.\r\n\r\nfrom PIL import Image\r\nfrom number import *\r\n\r\ndef change(img):\r\n rgb_img = img.convert('RGB')\r\n (w,h) = rgb_img.size\r\n image = Image.new('RGBA', (w, h))\r\n grey = [[0 for i in range(w)] for j in range(h)]\r\n for x in range(w):\r\n print(str(x))\r\n for y in range(h):\r\n (R, G, B) = rgb_img.getpixel((x,y))\r\n # grey[x][y] = number(max(R, G, B) / 256.0, 0)\r\n grey[x][y] = number((R + G + B) / 3 / 256.0, 0)\r\n for x in range(w - 1):\r\n print(str(x))\r\n for y in range(h - 1):\r\n # col = int(256 * mux_nu(xor(grey[x][y], grey[x + 1][y + 1]), xor(grey[x + 1][y], grey[x][y + 1]), 0.5).nu)\r\n # col = 256 - int(256 * edger(grey[x][y], grey[x + 1][y + 1], grey[x + 1][y], grey[x][y + 1]).nu)\r\n col = 256 - int(256 * edger2(grey[x][y], grey[x][y + 1]).nu)\r\n # col = 2 * col - 128\r\n # if col > 192:\r\n # col = 255\r\n # col = 2 * col\r\n image.putpixel((x, y),(col, col, col))\r\n return image\r\n\r\ndef gchange(img):\r\n rgb_img = img.convert('RGB')\r\n (w,h) = rgb_img.size\r\n small = 1\r\n image = Image.new('RGBA', ((int)(w/small), (int)(h/small)))\r\n grey = [[0 for i in range(w)] for j in range(h)]\r\n rate = 0.25\r\n for x in range((int)(w/small)):\r\n print(str(x) + '/' + str(w/small))\r\n for y in range((int)(h/small)):\r\n (R, G, B) = rgb_img.getpixel((small * x,small * y))\r\n # grey = number((R + G + B) / 3 / 256.0, 0)\r\n red = number(R / 256.0, 0)\r\n green = number(G / 256.0, 0)\r\n blue = number(B / 256.0, 0)\r\n # R = int(256 * (gammma(red, rate).nu - rate) / (1 - rate))\r\n # G = int(256 * (gammma(green, rate).nu - rate) / (1 - rate))\r\n # B = int(256 * (gammma(blue, rate).nu - rate) / (1 - rate))\r\n R = int(256 * gammma2(red, rate).nu)\r\n G = int(256 * gammma2(green, rate).nu)\r\n B = int(256 * gammma2(blue, rate).nu)\r\n image.putpixel((x, y),(R, G, B))\r\n return image\r\n","sub_path":"filter_py/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"19252843","text":"# -*- encoding: utf-8 -*-\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom apps.post.models import Post\n\ndef index(request):\n\treturn HttpResponseRedirect('/1')\n\t\ndef home(request, page='1'):\n\tis_home = True\n\tposts = Post.objects\n\tpaginator = Paginator(posts, 5)\n\ttry:\n\t\tposts = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tposts = paginator.page(1)\n\texcept EmptyPage:\n\t\tposts = paginator.page(paginator.num_pages)\n\treturn render_to_response('home.html', locals(), context_instance=RequestContext(request))","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"583348355","text":"import math\nimport numpy as np\nfrom sensor_msgs.msg import PointCloud\nfrom geometry_msgs.msg import Point32\n\n\"\"\"@package docstring\n\nMudule for TF convert\n\n\"\"\"\nclass TFcvt:\n def __init__(self):\n return\n\n\n\n \"\"\"Get 4*4 transform matrix from Quaternion (only rotation 3*3 area is changed)\"\"\"\n def rotMat_from_quat(self, x,y,z,w):\n r,p,y = self.euler_from_quaternion(x, y, z, w)\n return self.euler_to_rotMat(y,p,r)\n\n\n \"\"\"Get Euler RPY from quaternion\"\"\"\n def euler_from_quaternion(self, x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n return roll_x, pitch_y, yaw_z # in radians\n\n \"\"\"Get 4*4 transform matrix from Euler RPY (only rotation 3*3 area is changed)\"\"\"\n def euler_to_rotMat(self, yaw, pitch, roll):\n Rz_yaw = np.array([\n [np.cos(yaw), -np.sin(yaw), 0],\n [np.sin(yaw), np.cos(yaw), 0],\n [ 0, 0, 1]])\n Ry_pitch = np.array([\n [ np.cos(pitch), 0, np.sin(pitch)],\n [ 0, 1, 0],\n [-np.sin(pitch), 0, np.cos(pitch)]])\n Rx_roll = np.array([\n [1, 0, 0],\n [0, np.cos(roll), -np.sin(roll)],\n [0, np.sin(roll), np.cos(roll)]])\n rotMat = np.dot(Rz_yaw, np.dot(Ry_pitch, Rx_roll))\n return rotMat\n\n \"\"\"Convert coord from image plane to camera frame \"\"\"\n def deprojection(self, x, y, depth):\n if depth==0:\n return None, None, None\n cor_x = float(depth)/1000\n scale = cor_x/1.88\n\n cor_y = float((x) - 640)*scale/-1000\n cor_z = float((y)-360)*scale/-1000\n return cor_x, cor_y, cor_z\n\n\n \"\"\"Transform xyz coord with 4*4 transform matrix \"\"\"\n def tfPoint(self, x, y, z, mat):\n vector4 = np.array([[x],[y],[z],[1]])\n result = np.dot(mat, vector4) # base -> point(index1)\n return result\n\n\n \"\"\"Publish point cloud data by TF config instance \"\"\"\n def pc_pub(self, TF_cfg):\n PC = PointCloud()\n PC.header.frame_id='base_link'\n idx = 0\n for y in range(TF_cfg.img_height):\n for x in range(TF_cfg.img_width):\n idx +=1\n if idx % 100 != 0:\n continue\n \n cor_x, cor_y, cor_z = self.deprojection(x, y, TF_cfg.img[y][x])\n if not cor_x:\n continue\n result_1 = self.tfPoint(cor_x, cor_y, cor_z,TF_cfg.TF_matrix)\n point = Point32()\n point.x=(result_1[0])\n point.y=(result_1[1])\n point.z=(result_1[2])\n PC.points.append(point)\n TF_cfg.PC_pub.publish(PC)\n return\n \n \"\"\"uv to robot base coordinates\"\"\"\n def uv2point(self, TF_cfg, u, v):\n cor_x, cor_y, cor_z = self.deprojection(u, v, TF_cfg.img[v][u])\n if not cor_x:\n return np.array(0,1,2,1)\n result = self.tfPoint(cor_x, cor_y, cor_z,TF_cfg.TF_matrix)\n return result\n ","sub_path":"src/TF_converter.py","file_name":"TF_converter.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"65526790","text":"\nfrom ophyd.areadetector import (AreaDetector, PixiradDetectorCam, ImagePlugin,\n TIFFPlugin, StatsPlugin, HDF5Plugin,\n ProcessPlugin, ROIPlugin, TransformPlugin,\n OverlayPlugin)\nfrom ophyd.areadetector.plugins import PluginBase\nfrom ophyd.areadetector.cam import AreaDetectorCam\nfrom ophyd.device import BlueskyInterface, Staged\nfrom ophyd.areadetector.trigger_mixins import SingleTrigger\nfrom ophyd.areadetector.filestore_mixins import (FileStoreIterativeWrite,\n FileStoreHDF5IterativeWrite,\n FileStoreTIFFSquashing,\n FileStoreTIFF)\nfrom ophyd import Signal, EpicsSignal, EpicsSignalRO\nfrom ophyd.status import SubscriptionStatus, DeviceStatus\nfrom ophyd.sim import NullStatus # TODO: remove after complete/collect are defined\nfrom ophyd import Component as Cpt, set_and_wait\nfrom bluesky import __version__ as bluesky_version\n\nfrom pathlib import PurePath\n#from hxntools.detectors.xspress3 import (XspressTrigger, Xspress3Detector,\n# Xspress3Channel, Xspress3FileStore, logger)\nfrom nslsii.detectors.xspress3 import (XspressTrigger, Xspress3Detector,\n Xspress3Channel, Xspress3FileStore, logger)\n\nimport numpy\nimport pandas as pd\nimport itertools, os\nimport time as ttime\nfrom collections import deque, OrderedDict\nfrom itertools import product\n\nimport matplotlib.pyplot as plt\nfrom IPython import get_ipython\nuser_ns = get_ipython().user_ns\n\nfrom BMM.functions import error_msg, warning_msg, go_msg, url_msg, bold_msg, verbosebold_msg, list_msg, disconnected_msg, info_msg, whisper\nfrom BMM.functions import now\nfrom BMM.metadata import mirror_state\n\nfrom databroker.assets.handlers import HandlerBase, Xspress3HDF5Handler, XS3_XRF_DATA_KEY\n\nimport configparser\n\n\n################################################################################\n# Notes:\n#\n# Before every count or scan, must explicitly set the number of points in the\n# measurement:\n# xs.total_points.put(5) \n#\n# This means that Xspress3 will require its own count plan\n# also that a linescan or xafs scan must set total_points up front\n\n\n\n# class BMMXspress3HDF5Handler(Xspress3HDF5Handler):\n# def __call__(self, *args, frame=None, **kwargs):\n# self._get_dataset()\n# shape = self.dataset.shape\n# if len(shape) != 3:\n# raise RuntimeError(f'The ndim of the dataset is not 3, but {len(shape)}')\n# num_channels = shape[1]\n# print(num_channels)\n# chanrois = [f'CHAN{c}ROI{r}' for c, r in product([1, 2, 3, 4], [1, 2, 3, 4])]\n# attrsdf = pd.DataFrame.from_dict(\n# {chanroi: self._file['/entry/instrument/detector/']['NDAttributes'][chanroi] for chanroi in chanrois}\n# )\n# ##print(attrsdf)\n# df = pd.DataFrame(data=self._dataset[frame, :, :].T,\n# columns=[f'ch_{n+1}' for n in range(num_channels)])\n# #return pd.concat([df]+[attrsdf])\n# return df\n\n# db = user_ns['db']\n# db.reg.register_handler(BMMXspress3HDF5Handler.HANDLER_NAME,\n# BMMXspress3HDF5Handler, overwrite=True) \n\nclass Xspress3FileStoreFlyable(Xspress3FileStore):\n def warmup(self):\n \"\"\"\n A convenience method for 'priming' the plugin.\n The plugin has to 'see' one acquisition before it is ready to capture.\n This sets the array size, etc.\n NOTE : this comes from:\n https://github.com/NSLS-II/ophyd/blob/master/ophyd/areadetector/plugins.py\n We had to replace \"cam\" with \"settings\" here.\n Also modified the stage sigs.\n \"\"\"\n print(\"warming up the hdf5 plugin...\")\n set_and_wait(self.enable, 1)\n sigs = OrderedDict([(self.parent.settings.array_callbacks, 1),\n (self.parent.settings.trigger_mode, 'Internal'),\n # just in case the acquisition time is set very long...\n (self.parent.settings.acquire_time, 1),\n # (self.capture, 1),\n (self.parent.settings.acquire, 1)])\n\n original_vals = {sig: sig.get() for sig in sigs}\n\n # Remove the hdf5.capture item here to avoid an error as it should reset back to 0 itself\n # del original_vals[self.capture]\n\n for sig, val in sigs.items():\n ttime.sleep(0.1) # abundance of caution\n set_and_wait(sig, val)\n\n ttime.sleep(2) # wait for acquisition\n\n for sig, val in reversed(list(original_vals.items())):\n ttime.sleep(0.1)\n set_and_wait(sig, val)\n print(\"done\")\n\n def unstage(self):\n \"\"\"A custom unstage method is needed to avoid these messages:\n\n Still capturing data .... waiting.\n Still capturing data .... waiting.\n Still capturing data .... waiting.\n Still capturing data .... giving up.\n \"\"\"\n set_and_wait(self.capture, 0)\n return super().unstage()\n\nclass BMMXspress3Channel(Xspress3Channel):\n extra_rois_enabled = Cpt(EpicsSignal, 'PluginControlValExtraROI')\n\n \nclass BMMXspress3Detector(XspressTrigger, Xspress3Detector):\n roi_data = Cpt(PluginBase, 'ROIDATA:')\n channel1 = Cpt(BMMXspress3Channel, 'C1_', channel_num=1, read_attrs=['rois'])\n channel2 = Cpt(BMMXspress3Channel, 'C2_', channel_num=2, read_attrs=['rois'])\n channel3 = Cpt(BMMXspress3Channel, 'C3_', channel_num=3, read_attrs=['rois'])\n channel4 = Cpt(BMMXspress3Channel, 'C4_', channel_num=4, read_attrs=['rois'])\n # Currently only using four channels. Uncomment these to enable more channels:\n # channel5 = C(Xspress3Channel, 'C5_', channel_num=5)\n # channel6 = C(Xspress3Channel, 'C6_', channel_num=6)\n # channel7 = C(Xspress3Channel, 'C7_', channel_num=7)\n # channel8 = C(Xspress3Channel, 'C8_', channel_num=8)\n #create_dir = Cpt(EpicsSignal, 'HDF5:FileCreateDir')\n\n # mca1_sum = Cpt(EpicsSignal, 'ARRSUM1:ArrayData')\n # mca2_sum = Cpt(EpicsSignal, 'ARRSUM2:ArrayData')\n # mca3_sum = Cpt(EpicsSignal, 'ARRSUM3:ArrayData')\n # mca4_sum = Cpt(EpicsSignal, 'ARRSUM4:ArrayData')\n \n mca1 = Cpt(EpicsSignal, 'ARR1:ArrayData')\n mca2 = Cpt(EpicsSignal, 'ARR2:ArrayData')\n mca3 = Cpt(EpicsSignal, 'ARR3:ArrayData')\n mca4 = Cpt(EpicsSignal, 'ARR4:ArrayData')\n \n hdf5 = Cpt(Xspress3FileStoreFlyable, 'HDF5:',\n read_path_template='/xspress3/BMM/', # path to data folder, as mounted on client (i.e. ws1) \n root='/xspress3/', # path to root, as mounted on client (i.e. ws1)\n write_path_template='/home/xspress3/data/BMM', # full path on IOC server (i.e. xf06bm-ioc-xspress3)\n )\n\n def __init__(self, prefix, *, configuration_attrs=None, read_attrs=None,\n **kwargs):\n if configuration_attrs is None:\n configuration_attrs = ['external_trig', 'total_points',\n 'spectra_per_point', 'settings',\n 'rewindable']\n if read_attrs is None:\n read_attrs = ['channel1', 'channel2', 'channel3', 'channel4', 'hdf5']\n super().__init__(prefix, configuration_attrs=configuration_attrs,\n read_attrs=read_attrs, **kwargs)\n\n self.set_channels_for_hdf5()\n\n self._asset_docs_cache = deque()\n self._datum_counter = None\n \n self.slots = ['Ti', 'V', 'Cr', 'Mn',\n 'Fe', 'Co', 'Ni', 'Cu',\n 'Zn', 'As', 'Pt', 'Pb',\n None, None, None, 'OCR']\n self.restart()\n # self.settings.num_images.put(1) # number of frames\n # self.settings.trigger_mode.put(1) # trigger mode internal\n # self.settings.ctrl_dtc.put(1) # dead time corrections enabled\n # self.set_channels_for_hdf5()\n # self.set_rois()\n\n def trigger(self):\n if self._staged != Staged.yes:\n raise RuntimeError(\"not staged\")\n\n import epics\n #t = '{:%H:%M:%S.%f}'.format(datetime.datetime.now())\n #print('tr1 {} '.format(t))\n self._status = DeviceStatus(self)\n #self.settings.erase.put(1) # this was \n self._acquisition_signal.put(1, wait=False)\n trigger_time = ttime.time()\n #t = '{:%H:%M:%S.%f}'.format(datetime.datetime.now())\n #print('tr2 {} '.format(t))\n\n for sn in self.read_attrs:\n if sn.startswith('channel') and '.' not in sn:\n ch = getattr(self, sn)\n self.dispatch(ch.name, trigger_time)\n #t = '{:%H:%M:%S.%f}'.format(datetime.datetime.now())\n #print('tr3 {} '.format(t))\n\n self._abs_trigger_count += 1\n return self._status\n \n def restart(self):\n for n in range(1,5):\n this = getattr(self, f'channel{n}')\n this.vis_enabled.put(1)\n this.extra_rois_enabled.put(1)\n #XF:06BM-ES{Xsp:1}:C1_PluginControlValExtraROI\n self.settings.num_images.put(1) # number of frames\n self.settings.trigger_mode.put(1) # trigger mode internal\n self.settings.ctrl_dtc.put(1) # dead time corrections enabled\n self.set_rois()\n \n def _acquire_changed(self, value=None, old_value=None, **kwargs):\n super()._acquire_changed(value=value, old_value=old_value, **kwargs)\n status = self._status\n if status is not None and status.done:\n # Clear the state to be ready for the next round.\n self._status = None\n \n def stop(self):\n ret = super().stop()\n self.hdf5.stop()\n return ret\n\n def stage(self):\n if self.spectra_per_point.get() != 1:\n raise NotImplementedError(\n \"multi spectra per point not supported yet\")\n ret = super().stage()\n self._datum_counter = itertools.count()\n return ret\n\n def unstage(self):\n self.settings.trigger_mode.put(0) # 'Software'\n super().unstage()\n self._datum_counter = None\n\n def set_channels_for_hdf5(self, channels=range(1,5)):\n \"\"\"\n Configure which channels' data should be saved in the resulted hdf5 file.\n\n Parameters\n ----------\n channels: tuple, optional\n the channels to save the data for\n \"\"\"\n # The number of channel\n for n in channels:\n getattr(self, f'channel{n}').rois.read_attrs = ['roi{:02}'.format(j) for j in range(1,17)]\n self.hdf5.num_extra_dims.put(0)\n self.settings.num_channels.put(len(channels))\n\n\n def set_roi_channel(self, channel=1, index=16, name='OCR', low=1, high=4095):\n ch = getattr(self, f'channel{channel}')\n rs = ch.rois\n this = getattr(rs, 'roi{:02}'.format(index))\n this.value.name = name\n this.bin_low.put(low)\n this.bin_high.put(high)\n \n def set_rois(self):\n config = configparser.ConfigParser()\n startup_dir = get_ipython().profile_dir.startup_dir\n config.read_file(open(os.path.join(startup_dir, 'rois.ini')))\n for i, el in enumerate(self.slots):\n if el is None:\n continue\n bounds = config.get('rois', el).split(' ')\n for ch in range(1,5):\n self.set_roi_channel(channel=ch, index=i+1, name=f'{el.capitalize()}{ch}', low=bounds[0], high=bounds[1])\n\n def roi_details(self):\n BMMuser = user_ns['BMMuser']\n print(' ROI Elem low high')\n print('==========================')\n template = ' %3d %-4s %4d %4d'\n for i, el in enumerate(self.slots):\n rs = self.channel1.rois\n this = getattr(rs, 'roi{:02}'.format(i+1))\n if el is None:\n print(template % (i+1, 'None', this.bin_low.value, this.bin_high.value))\n elif el == BMMuser.element:\n print(go_msg(template % (i+1, el.capitalize(), this.bin_low.value, this.bin_high.value)))\n else:\n print(template % (i+1, el.capitalize(), this.bin_low.value, this.bin_high.value))\n \n def measure_roi(self):\n BMMuser = user_ns['BMMuser']\n for i in range(16):\n for n in range(1,5):\n ch = getattr(self, f'channel{n}')\n this = getattr(ch.rois, 'roi{:02}'.format(i+1))\n if self.slots[i] == BMMuser.element:\n this.value.kind = 'hinted'\n setattr(BMMuser, f'xs{n}', this.value.name)\n setattr(BMMuser, f'xschannel{n}', this.value)\n else:\n this.value.kind = 'omitted'\n \n\n def show_rois(self):\n BMMuser = user_ns['BMMuser']\n text = 'Xspress3 ROIs:\\n'\n text += bold_msg(' 1 2 3 4 5 6 7 8\\n')\n text += ' '\n for i in range(8):\n if self.slots[i] == BMMuser.element:\n text += go_msg('%4.4s' % self.slots[i]) + ' '\n else:\n text += '%4.4s' % self.slots[i] + ' '\n text += '\\n'\n text += bold_msg(' 9 10 11 12 13 14 15 16\\n')\n text += ' '\n for i in range(8, 16):\n if self.slots[i] == BMMuser.element:\n text += go_msg('%4.4s' % self.slots[i]) + ' '\n else:\n text += '%4.4s' % self.slots[i] + ' '\n text += '\\n'\n return(text)\n \n def plot(self, add=False, only=None):\n dcm = user_ns['dcm']\n plt.cla()\n plt.xlabel('Energy (eV)')\n plt.ylabel('counts')\n plt.title('XRF Spectrum')\n plt.grid(which='major', axis='both')\n plt.xlim(2500, round(dcm.energy.position, -2)+500)\n e = numpy.arange(0, len(self.mca1.value)) * 10\n if only is not None and only in (1, 2, 3, 4):\n this = getattr(self, f'mca{only}')\n plt.plot(e, this.value)\n elif add is True:\n plt.plot(e, self.mca1.value+self.mca2.value+self.mca3.value+self.mca4.value)\n else:\n plt.plot(e, self.mca1.value)\n plt.plot(e, self.mca2.value)\n plt.plot(e, self.mca3.value)\n plt.plot(e, self.mca4.value)\n\n\n def to_xdi(self, filename=None):\n\n dcm, BMMuser, ring = user_ns['dcm'], user_ns['BMMuser'], user_ns['ring']\n\n column_list = ['MCA1', 'MCA2', 'MCA3', 'MCA4']\n #template = \" %.3f %.6f %.6f %.6f %.6f\\n\"\n m2state, m3state = mirror_state()\n\n handle = open(filename, 'w')\n handle.write('# XDI/1.0 BlueSky/%s\\n' % bluesky_version)\n #handle.write('# Scan.uid: %s\\n' % dataframe['start']['uid'])\n #handle.write('# Scan.transient_id: %d\\n' % dataframe['start']['scan_id'])\n handle.write('# Beamline.name: BMM (06BM) -- Beamline for Materials Measurement')\n handle.write('# Beamline.xray_source: NSLS-II three-pole wiggler\\n')\n handle.write('# Beamline.collimation: paraboloid mirror, 5 nm Rh on 30 nm Pt\\n')\n handle.write('# Beamline.focusing: %s\\n' % m2state)\n handle.write('# Beamline.harmonic_rejection: %s\\n' % m3state)\n handle.write('# Beamline.energy: %.3f\\n' % dcm.energy.position)\n handle.write('# Detector.fluorescence: SII Vortex ME4 (4-element silicon drift)\\n')\n handle.write('# Scan.end_time: %s\\n' % now())\n handle.write('# Scan.dwell_time: %.2f\\n' % self.settings.acquire_time.value)\n handle.write('# Facility.name: NSLS-II\\n')\n handle.write('# Facility.current: %.1f mA\\n' % ring.current.value)\n handle.write('# Facility.mode: %s\\n' % ring.mode.value)\n handle.write('# Facility.cycle: %s\\n' % BMMuser.cycle)\n handle.write('# Facility.GUP: %d\\n' % BMMuser.gup)\n handle.write('# Facility.SAF: %d\\n' % BMMuser.saf)\n handle.write('# Column.1: energy (eV)\\n')\n handle.write('# Column.2: MCA1 (counts)\\n')\n handle.write('# Column.3: MCA2 (counts)\\n')\n handle.write('# Column.4: MCA3 (counts)\\n')\n handle.write('# Column.5: MCA4 (counts)\\n')\n handle.write('# ==========================================================\\n')\n handle.write('# energy ')\n\n ## data table\n e=numpy.arange(0, len(self.mca1.value)) * 10\n a=numpy.vstack([self.mca1.value, self.mca2.value, self.mca3.value, self.mca4.value])\n b=pd.DataFrame(a.transpose(), index=e, columns=column_list)\n handle.write(b.to_csv(sep=' '))\n\n handle.flush()\n handle.close()\n print(bold_msg('wrote XRF spectra to %s' % filename))\n \n","sub_path":"startup/BMM/xspress3.py","file_name":"xspress3.py","file_ext":"py","file_size_in_byte":17037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"566460230","text":"\"\"\"\nTests for ProductResource api.\n\n\"\"\"\n\nfrom tests import case\n\n\n\nclass ProductVersionResourceTest(case.api.ApiTestCase):\n\n @property\n def factory(self):\n \"\"\"The model factory for this object.\"\"\"\n return self.F.ProductVersionFactory\n\n\n @property\n def resource_name(self):\n return \"productversion\"\n\n\n def test_productversion_list(self):\n \"\"\"Get a list of existing productversions\"\"\"\n\n pv = self.F.ProductVersionFactory.create(\n version=\"3.2\",\n codename=\"enigma\"\n )\n\n res = self.get_list()\n\n act_meta = res.json[\"meta\"]\n exp_meta = {\n \"limit\" : 20,\n \"next\" : None,\n \"offset\" : 0,\n \"previous\" : None,\n \"total_count\" : 1,\n }\n\n self.assertEquals(act_meta, exp_meta)\n\n act_objects = res.json[\"objects\"]\n exp_objects = []\n\n exp_objects.append({\n u\"codename\": unicode(pv.codename),\n u\"id\": unicode(pv.id),\n u\"product\": unicode(self.get_detail_url(\"product\",pv.product.id)),\n u\"resource_uri\": unicode(self.get_detail_url(\"productversion\",pv.id)),\n u\"version\": u\"3.2\",\n })\n\n self.maxDiff = None\n self.assertEqual(exp_objects, act_objects)\n","sub_path":"tests/model/core/api/test_productversion_resource.py","file_name":"test_productversion_resource.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"350584155","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login', views.login_user, name='login_user'),\n url(r'^create', views.create_account, name='create'),\n url(r'^home', views.home, name='home'),\n url(r'^apply', views.apply, name='apply'),\n url(r'^cancel', views.cancel, name='cancel'),\n url(r'^export', views.export_consent, name='export'),\n url(r'^logout/$', views.user_logout, name='logout'),\n]\n","sub_path":"tnp/consent/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"177540475","text":"import caffe\nimport os\nfrom util import *\nimport numpy as np\nfrom io import BytesIO\nfrom config import *\nfrom collections import OrderedDict\n\nnet = caffe.Net(PROTO_PATH, MODEL_PATH, caffe.TEST)\nresizeInputDim(net, 1)\n# for debug\nCUR_DIR = os.path.dirname(os.path.abspath(__file__))\nf = open(os.path.join(CUR_DIR, 'synset_words.txt'))\nnames = f.read().splitlines()\n\ndef preprocessForShow(fp):\n img = resizeAndCropImg(fp, [RESIZE_HEIGHT, RESIZE_WIDTH], [CROP_HEIGHT, CROP_WIDTH])\n f = BytesIO()\n img.save(f, format='jpeg')\n return f.getvalue()\n\ndef preprocessForFeed(fp):\n return preprocessImg(fp, [RESIZE_HEIGHT, RESIZE_WIDTH], [CROP_HEIGHT, CROP_WIDTH], CHANNEL_ORDER, MEAN)\n\ndef preprocessForFeedWithNoise(fp, x, y):\n return preprocessImgWithNoise(fp, [RESIZE_HEIGHT, RESIZE_WIDTH], [CROP_HEIGHT, CROP_WIDTH], CHANNEL_ORDER, x, y, MEAN)\n\ndef feed(data, channels=None):\n net.blobs['data'].data[0] = data\n net.forward()\n tag = np.argmax(net.blobs[net.outputs[0]].data[0])\n name = names[tag]\n if not channels is None:\n for blob in channels.keys():\n for item in channels[blob.encode('utf8')]:\n channel_id = item[0]\n blob_data = net.blobs[blob].data[0]\n temp = np.max(blob_data, axis=1)\n channel_max = np.max(temp, axis=1)\n item[1] = float(channel_max[channel_id])\n return name\n\ndef getActivatedNeurons():\n activated_neurons = OrderedDict()\n for blob in config.evaluated_blobs:\n blob_data = net.blobs[blob].data[0]\n temp = np.max(blob_data, axis=1)\n channel_max = np.max(temp, axis=1)\n channel_index = np.argsort(channel_max)[::-1] # front element points to larger activation\n\n num_channels = channel_max.shape[0]\n wanted = min(num_channels, max(int(num_channels * CHANNEL_RESERVE_RATIO), 10))\n\n channel_activation_pair_list = []\n\n for i in range(0, wanted):\n channel_id = channel_index[i]\n activation = float(channel_max[channel_id])\n channel_activation_pair_list.append((channel_id, activation))\n activated_neurons[blob] = channel_activation_pair_list\n\n return activated_neurons\n\ndef getLink(down, i, up, j):\n layers = findLayersBetween(down, up)\n kernels = []\n for layer in layers:\n if net.params.has_key(layer):\n kernels.append(net.params[layer])\n\ndef getLinkFromKernel(kernels, i, j):\n pass\n\ndef findLayersBetween(downBlob, upBlob):\n layerNames = net.bottom_names.keys()\n for i in range(len(layerNames)):\n layerName = layerNames[i]\n bottomName = net.bottom_names[layerName]\n topName = net.top_names[layerName]\n if bottomName == downBlob and topName != downBlob:\n down = i\n if bottomName != upBlob and topName == upBlob:\n up = i\n layers = []\n for i in range(down, up + 1):\n layers.append(layerNames[i])\n return layers\n","sub_path":"src/website/net/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"521980703","text":"# Copyright 2016 Red Hat, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\nfrom ansible.constants import get_config, load_config_file\n\nDEFAULT_ARA_DIR = os.path.expanduser('~/.ara')\nDEFAULT_DATABASE_PATH = os.path.join(DEFAULT_ARA_DIR, 'ansible.sqlite')\nDEFAULT_DATABASE = 'sqlite:///{}'.format(DEFAULT_DATABASE_PATH)\nDEFAULT_ARA_LOGFILE = os.path.join(DEFAULT_ARA_DIR, 'ara.log')\nDEFAULT_ARA_LOG_LEVEL = 'INFO'\nDEFAULT_ARA_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nDEFAULT_ARA_SQL_DEBUG = False\nDEFAULT_ARA_PATH_MAX = 30\n\nconfig, path = load_config_file()\n\nARA_DIR = get_config(\n config, 'ara', 'dir', 'ARA_DIR',\n DEFAULT_ARA_DIR)\nARA_LOG_FILE = get_config(\n config, 'ara', 'logfile', 'ARA_LOG_FILE',\n DEFAULT_ARA_LOGFILE)\nARA_LOG_LEVEL = get_config(\n config, 'ara', 'loglevel', 'ARA_LOG_LEVEL',\n DEFAULT_ARA_LOG_LEVEL).upper()\nARA_LOG_FORMAT = get_config(\n config, 'ara', 'logformat', 'ARA_LOG_FORMAT',\n DEFAULT_ARA_LOG_FORMAT)\nARA_PATH_MAX = get_config(\n config, 'ara', 'path_max', 'ARA_PATH_MAX',\n DEFAULT_ARA_PATH_MAX)\nARA_ENABLE_DEBUG_VIEW = get_config(\n config, 'ara', 'enable_debug_view', 'ARA_ENABLE_DEBUG_VIEW',\n False)\nARA_AUTOCREATE_DATABASE = get_config(\n config, 'ara', 'autocreate_database', 'ARA_AUTOCREATE_DATABASE',\n True)\n\n\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nSQLALCHEMY_DATABASE_URI = get_config(config, 'ara', 'database',\n 'ARA_DATABASE',\n DEFAULT_DATABASE)\nSQLALCHEMY_ECHO = get_config(config, 'ara', 'sqldebug',\n 'ARA_SQL_DEBUG',\n DEFAULT_ARA_SQL_DEBUG)\n","sub_path":"ara/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"491818351","text":"##\nfrom soup import *\nimport matplotlib as mpl\nfrom routines import psy, psy_fit, chro, cache_trial_timing_bins, load_data, get_choice, rc_regress, learning_curve, heatmap, boxplotkwargs, plot_multiple_psy, fixdif, num2side, future\n\nfancy_fig = 'regression' # psy_bysub, rates, fixdif, future, psychometric_animation, difficulty, chro, bylast, psy, regression, heatmap\nfontsize = 'x-large'\nrc_bins = 0.600\n\n## load data\ndata_path = '/Users/ben/data/puffs/cohort_7/'\ndata_file = data_path + 'data_compressed.h5'\ntrials_all,trials_timing = load_data(data_file, resync=False, cohort=7, nrigs=1)\nrc_regress(trials_all, trials_timing, bin_width=rc_bins, data_path=data_path) # because will subsequently split trials so messes with caching\n\n## filters\nsince = pd.to_datetime('2017-02-01')\nfilters = (trials_all.subj!=0)\\\n & (trials_all.outcome<2)\\\n & (trials_all.level>=6)\\\n & (trials_all.rule==0)\\\n & (trials_all.session >= since)\\\n #& (trials_all.manipulation==0)\\\n #& (~(trials_all.difficulty==0))\n #& (trials_all.distractors>3)\n #& (trials_all.rule==0)\\\n #& (trials_all.condition==0)\\\n #& (trials_all.session.isin(trials.session.unique()[-12:]))\\\n #& (trials_all.difficulty==2)\\\n\ntrials = trials_all[filters]\n\n## analysis\nps_p,ps_e,ps_n = psy(trials, y='perf', at_least_ntrials=10)\nps_abs_p,ps_abs_e,ps_abs_n = psy(trials, y='perf', x='sumpuffs')\nps_absv_p,ps_absv_e,ps_absv_n = psy(trials, y='val', x='sumpuffs')\nps_ind_names,ps_ind,ps_acr = psy(trials, y='perf', bysub=True)\nps_bydif = [(psy(trials[trials.difficulty==ud], y='perf'),ud) for ud in trials.difficulty.unique()]\nps_bylast_side = [(psy(trials[trials.last_side==uls], valid_only=True),uls) for uls in trials.last_side.unique() if not np.isnan(uls)]\nps_bylast_choice = [(psy(trials[trials.last_choice==uls], valid_only=True),uls) for uls in trials.last_choice.unique() if not np.isnan(uls)]\nps_bylast_choiceside = [(psy(t, valid_only=True),lab) for lab,t in trials.groupby(['last_choice','last_side'])]\ntl = trials[trials.last_outcome<2]\nps_bylast_mix = [(psy(t,valid_only=True),lab) for lab,t in tl.groupby(['last_outcome','last_choice'])]\nps_by_distractors_p, ps_by_distractors_e, ps_by_distractors_n = psy(trials, x='distractors')\nrc_rand_w = rc_regress(trials, trials_timing, choice='boot', bin_width=rc_bins, data_path=data_path)\nrc_w = rc_regress(trials, trials_timing, choice='real', bin_width=rc_bins, data_path=data_path)\nhm = heatmap(trials, at_least_ntrials=3)\nfix_bytotal = fixdif(trials, 'total')\n\n## PLOTS\n# setup axes\nfig, axs = pl.subplots(2,3, figsize=(12,7)); axs = axs.ravel()\nfig2, axs2 = pl.subplots(2,3, figsize=(12,7)); axs2 = axs2.ravel()\n\n# FIGURE 1\n# psychometrics pool\nps_p.plot(ax=axs[0], fmt='o', yerr=ps_e)\n#ps_p.plot(ax=axs[0], fmt='-', yerr=ps_e, alpha=0.4)\npsy_fit(ps_p, ax=axs[0], alpha=0.5)\n# psychometrics by difficulty\nplot_multiple_psy(*zip(*ps_bydif), ax=axs[1])\naxs[1].set_title('By Difficulty')\n# reverse correlation\nrc_w.T.plot(kind='box', ax=axs[3], color='black', rot=90)\nrc_rand_w.T.plot(kind='box', ax=axs[3], color='gray', rot=90, **boxplotkwargs)\n# heatmap\naxs[5].imshow(hm, cmap=pl.cm.coolwarm, interpolation='nearest', origin='lower')\naxs[5].set_ylabel('#L')\naxs[5].set_xlabel('#R')\naxs[5].set_aspect('equal', 'datalim')\n[pretty(ax=ax) for ax in axs]\npl.tight_layout()\n\n# FIGURE 2\n# psychometrics by last side\nps_p.plot(ax=axs2[0], fmt='o', yerr=ps_e, color='gray', label='all')\npsy_fit(ps_p, ax=axs2[0], alpha=0.5, color='gray')\nplot_multiple_psy(*zip(*ps_bylast_side), ax=axs2[0])\naxs2[0].set_title('By Last Correct Side')\n# psy by last choice\nps_p.plot(ax=axs2[1], fmt='o', yerr=ps_e, color='gray', label='all', title='only valid')\npsy_fit(ps_p, ax=axs2[1], alpha=0.5, color='gray')\nplot_multiple_psy(*zip(*ps_bylast_choice), ax=axs2[1])\naxs2[1].set_title('By Last Choice')\n# psychometrics by subj\nplot_multiple_psy(ps_ind, ps_ind_names, ax=axs2[2])\n# detailed logistic rev cor\nfor column,color in zip(rc_w.columns, pl.cm.viridis(np.linspace(0,1,rc_w.shape[1]))):\n rc_w[column].plot(ax=axs2[3], color=color, rot=90, marker='o', label=column)\n rc_rand_w[column].plot(ax=axs2[3], color='gray', linestyle='--', rot=90, marker='o', label=column)\n# fixed difference analyses\nfix_bytotal.plot(ax=axs2[4], style='o')\naxs2[4].set_xlabel('Total # of puffs')\naxs2[4].set_ylabel('Performance relative to mean for R-L groups')\n# aesthetics\n[pretty(ax=ax) for ax in axs2]\npl.tight_layout()\n\n## Fancier Figures\nif fancy_fig == 'heatmap':\n\n fig = pl.figure(num='heat', figsize=(4.2,3.78))\n ax = pl.gca()\n mapp = ax.imshow(hm, cmap=pl.cm.coolwarm, interpolation='nearest', origin='lower')\n ax.tick_params(axis='both', which='both', bottom='on', top='off', left='on', right='off', labelsize=fontsize)\n ax.set_xticks([1,10,20])\n ax.set_yticks([1,10,20])\n\n cb = pl.colorbar(mapp, fraction=0.046, pad=0.04)\n cb.set_ticks([0,np.nanmax(hm)])\n #cb.set_ticklabels(['100% left','100% right'])\n cb.set_ticklabels([])\n #cb.ax.tick_params(labelsize=fontsize)\n #ax.set_ylabel('# L puffs', fontsize=40)\n #ax.set_xlabel('# R puffs', fontsize=40)\n\n despine(ax=ax)\n pl.tight_layout()\n\nif fancy_fig == 'regression':\n fontsize = 'xx-large'\n\n fig = pl.figure(num='regr', figsize=(4.95,3.78))\n ax = fig.add_subplot(111)\n\n ax.plot(rc_w.index, rc_w.values, color='darkcyan', linewidth=5, label='Data')\n ax.plot(rc_rand_w.index, rc_rand_w.values, linestyle='--', color='gray', linewidth=3, label='Shuffled')\n\n ax.set_yticks(np.arange(0, 0.31,0.15))\n ax.set_xticks(np.arange(0,4,1.))\n ax.set_xlim([0,3.7])\n ax.set_ylim(-.06,.3)\n ax.tick_params(labelsize=fontsize)\n ax.set_xlabel('Time in trial (sec)', fontsize=fontsize)\n ax.set_ylabel('Regression weight', fontsize=fontsize)\n\n pl.legend(loc='right')\n\n pretty(ax=ax)\n\n pl.tight_layout()\n\nif fancy_fig == 'psy':\n fontsize = 18\n\n fig = pl.figure('psy', figsize=(3.9,3.78))\n ax = fig.add_subplot(111)\n ps_p.plot(ax=ax, fmt='o', yerr=ps_e, color='k', markeredgewidth=0, ecolor='gray')\n ax.tick_params(labelsize=fontsize)\n ax.set_xticks([-10,0,10])\n ax.set_yticks([0,.5,1])\n ax.set_ylim([-.05, 1.05])\n ax.set_xlim(ps_p.index.min()-1, ps_p.index.max()+1)\n\n ax.set_xlabel('#R - #L', fontsize=fontsize)\n ax.set_ylabel('Fraction rightward decisions', fontsize=fontsize)\n\n psy_fit(ps_p, ax=ax, color='gray', alpha=.5)\n\n pretty(ax=ax)\n pl.tight_layout()\n\nif fancy_fig == 'bylast':\n pp = dict(markersize=2, markeredgewidth=0)\n\n fig,axs = pl.subplots(1,3, num='psy_bylast', figsize=(8.5,3.2))\n\n ps_p.plot(ax=axs[0], fmt='o', yerr=ps_e, color='gray', label='All', **pp)\n psy_fit(ps_p, ax=axs[0], alpha=0.5, color='gray', **pp)\n pss,labs = zip(*ps_bylast_side)\n plot_multiple_psy(pss, num2side(labs), ax=axs[0], **pp)\n axs[0].set_title('By Last Correct Side')\n\n ps_p.plot(ax=axs[1], fmt='o', yerr=ps_e, color='gray', label='All', **pp)\n psy_fit(ps_p, ax=axs[1], alpha=0.5, color='gray')\n pss,labs = zip(*ps_bylast_choice)\n plot_multiple_psy(pss, num2side(labs), ax=axs[1], **pp)\n axs[1].set_title('By Last Choice')\n \n ps_p.plot(ax=axs[2], fmt='o', yerr=ps_e, color='gray', label='All', **pp)\n psy_fit(ps_p, ax=axs[2], alpha=0.5, color='gray')\n pss,labs = zip(*ps_bylast_mix)\n plot_multiple_psy(pss, num2side(labs), ax=axs[2], **pp)\n axs[2].set_title('By Last Outcome+Choice')\n\n #axs[0].set_title('')\n #axs[1].set_title('')\n axs[0].set_xticks([-20,0,20])\n axs[1].set_xticks([-20,0,20])\n axs[2].set_xticks([-20,0,20])\n axs[0].set_yticks([0,.5,1])\n axs[1].set_yticks([])\n axs[2].set_yticks([])\n\n axs[0].tick_params(labelsize=fontsize)\n axs[1].tick_params(labelsize=fontsize)\n axs[2].tick_params(labelsize=fontsize)\n\n axs[1].get_legend().remove()\n\n handles, labels = axs[0].get_legend_handles_labels()\n handles = [h[0] for h in handles]\n axs[0].legend(handles,labels,fontsize=fontsize, columnspacing=0, labelspacing=0, borderpad=0, handletextpad=0, borderaxespad=0, loc='upper left', markerscale=3)\n\n pretty(ax=axs[0])\n pretty(ax=axs[1])\n pretty(ax=axs[2])\n pl.tight_layout()\n\nif fancy_fig == 'chro':\n fig = pl.figure('chro', figsize=(6,3.78))\n ax = fig.add_subplot(111)\n\n # redo chro analysis for by-subject values\n chi,_across = chro(trials, bysub=True, nbins=None, with_delay=False)\n ch_p_bs = []\n for subj,p,e in chi:\n omean = trials[trials.subj==subj].outcome.mean()\n p -= omean\n ch_p_bs.append(p)\n ch_e_bs = pd.DataFrame(ch_p_bs).T.sem(axis=1)\n ch_p_bs = pd.DataFrame(ch_p_bs).T.mean(axis=1)\n\n ch_p_bs.plot(ax=ax, fmt='o', yerr=ch_e_bs, color=(.9,.59,.4), markersize=14, markeredgewidth=0)\n ax.tick_params(labelsize=fontsize)\n ax.set_xlim(ch_p_bs.index.min()-.5, ch_p_bs.index.max()+.5)\n ax.set_xlabel('')\n ax.set_xticks(np.round(ch_p_bs.index))\n ax.set_yticks([-.08, -.04, 0, .04])\n #ticf(y=2, ax=ax)\n pretty(ax=ax)\n pl.tight_layout()\n\nif fancy_fig == 'difficulty':\n fig = pl.figure('difficulty', figsize=(9,7))\n ax = fig.add_subplot(111)\n t = trials[trials.outcome<2]\n ud = t.difficulty.unique()\n p = np.array([t[t.difficulty==d].outcome.mean() for d in ud])\n ud[ud==0] = np.inf\n p = p[np.argsort(ud)]\n ud = np.sort(ud)\n ax.plot(p,'ko-')\n ax.set_xticks(np.arange(len(ud))) \n ax.set_xticklabels([str(i) for i in ud])\n ax.tick_params(labelsize=fontsize)\n ax.set_ylim([-.05, 1.05])\n pretty(ax=ax)\n\n fig = pl.figure('diffpsy')\n ax = fig.add_subplot(111)\n plot_multiple_psy(*zip(*ps_bydif), ax=ax)\n\nif fancy_fig == 'psychometric_animation':\n fig = pl.figure('psychometric animation')\n ax = fig.add_subplot(111)\n a = trials.session.unique()\n bufsize = 5\n cols = pl.cm.Greys(np.linspace(0.1,1,bufsize))\n buf = []\n for i in range(1,len(a)):\n t = trials[trials.session.isin(a[:i])]\n p,e,n = psy(t)\n ax = p.plot(marker='o', ax=ax, color='k')\n linelist = list(ax.get_lines())\n for l in linelist[:-bufsize]:\n l.remove()\n for c,l in zip(cols,linelist):\n l.set_color(c)\n pl.ylim([0,1])\n pl.pause(0.001)\n\nif fancy_fig == 'future':\n ns = np.arange(1,6,1)\n (fcor,ecor), (fincor,eincor) = future(trials, ns=ns)\n\n fig = pl.figure('future', figsize=(5.8,2.8))\n ax = fig.add_subplot(111)\n ax.errorbar(ns, fcor*100, yerr=ecor*100, fmt='o-', label='Correct', markeredgewidth=0, markersize=10) \n ax.errorbar(ns, fincor*100, yerr=eincor*100, fmt='o-', label='Error', markeredgewidth=0, markersize=10) \n ax.set_yticks([0,10,20])\n ax.set_xticks(ns)\n ax.hlines(0, ns.min(), ns.max(), linestyle='--', color='gray', alpha=.5)\n ax.legend(loc='best', fontsize=fontsize)\n handles_, labels = ax.get_legend_handles_labels()\n handles = [h[0] for h in handles_]\n ax.legend(handles, labels, loc='best', fontsize=fontsize)\n \n #ax.set_xlabel('Trials in the future')\n #ax.set_ylabel('Bias (%)')\n ax.tick_params(labelsize=fontsize)\n pretty(ax=ax)\n pl.tight_layout()\n\nif fancy_fig == 'fixdif':\n fig,axs = pl.subplots(1,2, gridspec_kw=dict(hspace=0.2, wspace=0.1, left=0.07, bottom=0.15, right=0.95), num='fixdif', figsize=(8,2.8)); axs=axs.ravel()\n (fix_bytotal*100).plot(ax=axs[0], style='o', markeredgewidth=0, markersize=10)\n #axs[0].set_xlabel('Total # of puffs', fontsize=fontsize)\n #axs[0].set_ylabel('∆ Performance (%)', fontsize=fontsize)\n axs[0].set_xlabel('')\n axs[0].set_ylim([-12,12])\n axs[0].set_xlim([0,25.5])\n axs[0].tick_params(labelsize=fontsize)\n (100*fix_bydur).plot(ax=axs[1], style='o', markeredgewidth=0, markersize=10)\n axs[1].set_ylim([-12,12])\n axs[1].set_xlim([.75,4.5])\n #axs[1].set_xlabel('Trial Duration (sec)', fontsize=fontsize)\n #axs[1].set_ylabel('∆ Performance (%)', fontsize=fontsize)\n axs[1].set_xlabel('')\n axs[1].tick_params(labelsize=fontsize)\n ticf(x=2, ax=axs[1])\n axs[1].set_yticks([])\n [pretty(ax=a) for a in axs]\n #pl.tight_layout()\n\nif fancy_fig == 'rates':\n res = {s:[] for s in trials.subj.unique()}\n for subj,sg in trials.groupby('subj'):\n for sesh,shg in sg.groupby('session'):\n dur = shg.end.max() - shg.start.min() #seconds\n res[subj].append(shg.outcome.mean())\n\nif fancy_fig == 'psy_bysub':\n fig,axs = pl.subplots(2,2, num='bysub', gridspec_kw=dict(left=0.14, bottom=0.12,top=0.98,right=0.99,hspace=0.1, wspace=0.1), figsize=(4.2,3.78)); axs=axs.ravel()\n for (p,e,n),ax in zip(ps_ind,axs):\n p.plot(ax=ax, fmt='o', yerr=e, markeredgewidth=0, color='gray', alpha=0.5)\n psy_fit(p, color='k', ax=ax, linewidth=5, alpha=0.8)\n ax.set_ylim([-0.1,1.1])\n ax.set_xlim([-25,28])\n ax.set_yticks([0,0.5,1])\n ax.set_xticks([-20,0,20])\n ax.tick_params(labelsize=fontsize)\n pretty(ax=ax)\n axs[1].set_yticks([])\n axs[3].set_yticks([])\n axs[0].set_xticks([])\n axs[1].set_xticks([])\n\n##\n","sub_path":"analysis/c7/bi_main.py","file_name":"bi_main.py","file_ext":"py","file_size_in_byte":13109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"527245937","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport sqlite3\nimport sys\n\nfrom datetime import datetime\nfrom gzip import GzipFile\nfrom logging import StreamHandler\nfrom threading import Lock\n\nfrom logging.handlers import RotatingFileHandler\n\nfrom twicorder.config import Config\nfrom twicorder.constants import (\n COMPRESSED_EXTENSIONS,\n DEFAULT_APP_DATA_CONNECTION_TIMEOUT,\n REGULAR_EXTENSIONS,\n TW_TIME_FORMAT,\n)\nfrom twicorder.project_manager import ProjectManager\n\n\nclass TwiLogger:\n\n _logger = None\n\n @classmethod\n def setup(cls):\n cls._logger = logging.getLogger('Twicorder')\n file_handler = RotatingFileHandler(\n ProjectManager.logs,\n maxBytes=1024**2 * 10,\n backupCount=5\n )\n formatter = logging.Formatter(\n '%(asctime)s: [%(levelname)s] %(message)s'\n )\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.WARNING)\n cls._logger.addHandler(file_handler)\n\n stream_handler = StreamHandler(sys.stdout)\n stream_handler.setLevel(logging.DEBUG)\n cls._logger.addHandler(stream_handler)\n\n cls._logger.setLevel(logging.DEBUG)\n\n def __new__(cls, *args, **kwargs):\n if not cls._logger:\n cls.setup()\n return cls._logger\n\n\ndef auto_commit(func):\n def func_wrapper(self, *args, **kwargs):\n with self._conn:\n func(self, *args, **kwargs)\n return func_wrapper\n\n\nclass AppData:\n \"\"\"\n Class for reading and writing AppData to be used between sessions.\n \"\"\"\n\n _config = Config.get()\n _timeout = (\n _config.get('appdata_connection_timeout') or\n DEFAULT_APP_DATA_CONNECTION_TIMEOUT\n )\n _con = sqlite3.connect(\n ProjectManager.app_data,\n check_same_thread=False,\n timeout=float(_timeout)\n )\n _lock = Lock()\n\n def __del__(self):\n self._con.close()\n\n @classmethod\n def _make_query_table(cls, name):\n with cls._lock, cls._con as con:\n con.execute(\n f'''\n CREATE TABLE IF NOT EXISTS [{name}] (\n tweet_id INTEGER PRIMARY KEY,\n timestamp INTEGER NOT NULL\n )\n '''\n )\n\n @classmethod\n def _make_last_id_table(cls):\n with cls._lock, cls._con as con:\n con.execute(\n '''\n CREATE TABLE IF NOT EXISTS queries_last_id (\n query_hash TEXT PRIMARY KEY,\n tweet_id INTEGER NOT NULL\n )\n '''\n )\n\n @classmethod\n def add_query_tweet(cls, query_name, tweet_id, timestamp):\n cls._make_query_table(query_name)\n with cls._lock, cls._con as con:\n con.execute(\n f'''\n INSERT OR REPLACE INTO {query_name} VALUES (\n ?, ?\n )\n ''',\n (tweet_id, timestamp)\n )\n\n @classmethod\n def add_query_tweets(cls, query_name, tweets):\n cls._make_query_table(query_name)\n with cls._lock, cls._con as con:\n con.executemany(\n f'''\n INSERT OR REPLACE INTO {query_name} VALUES (\n ?, ?\n )\n ''',\n tweets\n )\n\n @classmethod\n def get_query_tweets(cls, query_name):\n cls._make_query_table(query_name)\n with cls._lock, cls._con as con:\n cursor = con.cursor()\n cursor.execute(\n f'''\n SELECT DISTINCT\n tweet_id, timestamp\n FROM\n {query_name}\n '''\n )\n return cursor.fetchall()\n\n @classmethod\n def set_last_query_id(cls, query_hash, tweet_id):\n cls._make_last_id_table()\n with cls._lock, cls._con as con:\n con.execute(\n '''\n INSERT OR REPLACE INTO queries_last_id VALUES (\n ?, ?\n )\n ''',\n (query_hash, tweet_id)\n )\n\n @classmethod\n def get_last_query_id(cls, query_hash):\n cls._make_last_id_table()\n with cls._lock, cls._con as con:\n cursor = con.cursor()\n cursor.execute(\n '''\n SELECT\n DISTINCT\n tweet_id\n FROM\n queries_last_id\n WHERE\n query_hash=?\n ''',\n (query_hash,)\n )\n result = cursor.fetchone()\n if not result:\n return\n return result[0]\n\n\ndef twopen(filename, mode='r'):\n \"\"\"\n Replacement method for Python's build-in open. Adds the option to handle\n compressed files.\n\n Args:\n filename (str): Path to file\n mode (str): Open mode\n\n Returns:\n TextIOWrapper / GzipFile: File object\n\n Raises:\n IOError: If extension is unknown.\n\n \"\"\"\n filename = os.path.expanduser(filename)\n dirname = os.path.dirname(filename)\n if mode in ('a', 'w') and not os.path.isdir(dirname):\n os.makedirs(dirname)\n ext = os.path.splitext(filename)[-1].strip('.')\n if ext in REGULAR_EXTENSIONS:\n return open(file=filename, mode=mode)\n elif ext in COMPRESSED_EXTENSIONS:\n return GzipFile(filename=filename, mode=mode)\n else:\n raise IOError('Unrecognised format: {}'.format(ext))\n\n\ndef read(filename):\n \"\"\"\n Reading the file for a given path.\n\n Args:\n filename (str): Path to file to read\n\n Returns:\n str: File data\n\n \"\"\"\n with twopen(filename=filename, mode='r') as file_object:\n data = file_object.read()\n if isinstance(file_object, GzipFile):\n data = data.decode('utf-8')\n return data\n\n\ndef readlines(filename):\n \"\"\"\n Reading the file for a given path.\n\n Args:\n filename (str): Path to file to read\n\n Returns:\n str: File data\n\n \"\"\"\n with twopen(filename=filename, mode='r') as file_object:\n data = file_object.readlines()\n if isinstance(file_object, GzipFile):\n data = [d.decode('utf-8') for d in data]\n return data\n\n\ndef write(data, filename, mode='a'):\n \"\"\"\n Appending data to the given file.\n\n Args:\n data (str): Data to append to the given file\n filename (str): Path to file to write\n mode (str): File stream mode ('a'. 'w' etc)\n\n \"\"\"\n with twopen(filename=filename, mode=mode) as file_object:\n if isinstance(file_object, GzipFile):\n file_object.write(data.encode('utf-8'))\n return\n file_object.write(data)\n\n\ndef message(title='Warning', body='', width=80):\n \"\"\"\n Prints a formatted message based on input\n\n Args:\n title (str): Title of the message\n body (str): Message body\n width (int): Message line width\n\n \"\"\"\n header = ' {} '.format(title).center(width, '=')\n footer = '=' * width\n text = (\n '\\n'\n '{}\\n'\n '\\n'\n '{}\\n'\n '\\n'\n '{}\\n'\n '\\n'\n )\n print(text.format(header, body, footer))\n\n\ndef collect_key_values(key, data):\n \"\"\"\n Builds a list of values for all keys matching the given \"key\" in a nested\n dictionary.\n\n Args:\n key (object): Dictionary key to search for\n data (dict): Nested data dict\n\n Returns:\n list: List of values for given key\n\n \"\"\"\n values = []\n for k, v in data.items():\n if k == key:\n values.append(v)\n continue\n if isinstance(v, dict):\n values += collect_key_values(key, v)\n return values\n\n\ndef flatten(l):\n \"\"\"\n Flattens a nested list\n\n Args:\n l (list): Nested list\n\n Returns:\n list: Flattened list\n\n \"\"\"\n return [item for sublist in l for item in sublist]\n\n\ndef str_to_date(text):\n \"\"\"\n Turns a time stamp represented as a string into a datetime object.\n\n Args:\n text (str): Time stamp\n\n Returns:\n datetime.datetime: Time stamp as datetime object\n\n \"\"\"\n return datetime.strptime(text, TW_TIME_FORMAT)\n\n\ndef timestamp_to_datetime(data):\n \"\"\"\n Traverse dictionary and convert all instances of time stamp strings into\n datetime objects.\n\n Args:\n data (dict): Tweet dictionary\n\n Returns:\n dict: Updated tweet dictionary\n\n \"\"\"\n for key, value in data.items():\n if key in ['created_at', 'recorded_at'] and isinstance(value, str):\n data[key] = datetime.strptime(value, TW_TIME_FORMAT)\n elif isinstance(value, dict):\n data[key] = timestamp_to_datetime(value)\n elif isinstance(value, list):\n data[key] = [\n timestamp_to_datetime(v) for v in value if isinstance(v, dict)\n ]\n return data\n\n\ndef stream_to_search(data):\n \"\"\"\n Conform tweet dictionaries collected from the streaming API to the format of\n tweets collected from the search API.\n\n Args:\n data (dict): Tweet dictionary\n\n Returns:\n dict: Updated tweet dictionary\n\n \"\"\"\n extended_tweet = data.get('extended_tweet')\n if extended_tweet:\n data.pop('extended_tweet')\n data.update(extended_tweet)\n data['truncated'] = False\n data.pop('text')\n else:\n if data.get('text'):\n data['full_text'] = data.pop('text')\n for key, value in data.items():\n if key in ['retweeted_status', 'quoted_status']:\n data[key] = stream_to_search(value)\n return data\n","sub_path":"twicorder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"108644155","text":"#!/usr/bin/env python3\n\nimport os, json, sys\n\ndef create_item(title, arg, icon):\n\titem = {\n\t\t\"title\": title,\n\t\t\"subtitle\": title,\n\t\t\"arg\": arg,\n\t\t\"icon\": {\n\t\t\t\"type\": \".png\",\n\t\t\t\"path\": icon\n\t\t}\n\t\t\n\t}\n\treturn item\n\n\nicons_file = os.getenv('icons_filepath')\nfor dirpath, dirname, files in os.walk(icons_file):\n\tbreak;\n\nitems = []\n\nitem = {\n\t\"title\": \"All Icons\",\n\t\"subtitle\": \"Replace All Icons\",\n\t\"arg\": \"all\",\n\t\"icon\": {\n\t\t\"type\": \".png\",\n\t\t\"path\": \"AppIcon.icns\"\n\t}\n}\n\nitems.append(item)\n\nfor file in files:\n\tif file != \".DS_Store\":\n\t\tpath = dirpath + file\n\t\tif file != \".DS_Store\" and (file.endswith(\".png\") or file.endswith(\".PNG\") or file.endswith(\".icns\") or file.endswith(\".ICNS\")):\n\t\t\tif len(sys.argv) > 1 and file.lower().find(sys.argv[1]) == -1:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tpath = dirpath + file\n\t\t\t\ttitle = file\n\t\t\t\targ = file\n\t\t\t\titem = create_item(title, arg, path)\n\t\t\t\titems.append(item)\n\t\t\t\nresults = {\n\t\"items\": items\n}\nprint(json.dumps(results))\t\t\t","sub_path":"src/choose.py","file_name":"choose.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"42479668","text":"\nclass ConnectGame:\n def __init__(self, board):\n self.matrix = self.prepare_board(board)\n\n def prepare_board(self, board):\n result = []\n for it, i in enumerate(board, 1):\n result.append(i.replace(' ', ''))\n return result[0].split('\\n')\n\n def get_winner(self):\n if len(self.matrix[0]) < 2 and len(self.matrix) < 2:\n return self.matrix[0]\n adj_list_zero, vertex_count_zero, ver_matr_zero = self.get_edges(self.matrix, \"O\")\n adj_list_x, vertex_count_x, ver_matr_x = self.get_edges(list(zip(*self.matrix)), \"X\")\n zero_flag = 0\n x_flag = 0\n for vertex in ver_matr_zero[0]:\n visited = [False] * vertex_count_zero\n self.dfs(vertex, adj_list_zero, visited)\n if any(visited[-1*len(self.matrix[0]):]):\n zero_flag = 1\n for vertex in ver_matr_x[0]:\n visited = [False] * vertex_count_x\n self.dfs(vertex, adj_list_x, visited)\n if any(visited[-1*len(self.matrix[0]):]):\n x_flag = 1\n if zero_flag == x_flag:\n return ''\n elif zero_flag:\n return 'O'\n else:\n return 'X'\n\n def get_edges(self, matrix, player_code):\n columns = list(zip(*matrix))\n vertex_matrix = [[i*len(columns) + j for j, _ in enumerate(columns)]\n for i, _ in enumerate(matrix)]\n vertex_count = len(matrix)*len(columns)\n adj_list = []\n for vert in range(vertex_count):\n if matrix[int(vert / len(columns))][vert % len(columns)] != player_code:\n adj_list.append([])\n continue\n temp = []\n for row in range(int(vert / len(columns))-1, int(vert / len(columns))+2):\n for col in range(vert % len(columns)-1, vert % len(columns)+2):\n if (row < 0 or col < 0):\n continue\n try:\n if matrix[row][col] == player_code:\n temp.append(row * len(columns) + col)\n except:\n pass\n adj_list.append(temp)\n return adj_list, vertex_count, vertex_matrix\n\n def dfs(self, vertex, adj_list, visited):\n visited[vertex] = True\n for new_vertex in adj_list[vertex]:\n if not visited[new_vertex]:\n self.dfs(new_vertex, adj_list, visited)\n","sub_path":"connect/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"216129257","text":"\"\"\"\n@author: Luiza Sayfullina\nCode for paper \"Learning Representations for Soft Skills Matching\"\nhttps://arxiv.org/abs/1807.07741 or https://link.springer.com/chapter/10.1007/978-3-030-11027-7_15\n\"\"\"\n\nimport numpy as np\nfrom JobDataClass import JobData\nfrom CVDataClass import CVData\nfrom lstm_models import LSTM, LSTMWithEmbedding\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\n# downloaded from https://github.com/Bjarten/early-stopping-pytorch\nfrom pytorchtools import EarlyStopping\nfrom utilities import find_recall_for_fixed_precision\n\n\nmode = 'unmodified' # the type of input representation\ndataset = JobData(mode=mode)\n(Xtrain, Lentrain, Ytrain, Skills_train), (Xtest, Lentest, Ytest, Skills_test) = \\\n dataset.get_word_indices_all(return_lens=True)\n\nvoc_size, dim = np.shape(dataset.embed_matrix)\n\nNtrain = len(Xtrain)//2\nNtest = len(Xtest)\nNvalid = len(Xtrain[Ntrain:])\n\nLentrain = np.array(Lentrain).astype(int)\nLentest = np.array(Lentest).astype(int)\n\nLenvalid = Lentrain[Ntrain:].copy()\nLentrain = Lentrain[0:Ntrain]\n\nprint('The size of test data:', Ntest)\n\nXtrain, Xtest = np.array(Xtrain,dtype=np.int64), np.array(Xtest,dtype=np.int64)\nYtrain, Ytest = np.array(Ytrain,dtype=np.int64), np.array(Ytest,dtype=np.int64)\nSkills_train = np.array(Skills_train,dtype=np.int64)\nSkills_test = np.array(Skills_test,dtype=np.int64)\n\nprint('The number of positive training samples:', sum(Ytrain))\nprint('The number of negative training samples:', len(Ytrain)-sum(Ytrain))\n\nprint('The number of positive test samples:', sum(Ytest))\nprint('The number of negative test samples:', len(Ytest) - sum(Ytest))\n\nN, num_words = np.shape(Xtrain)\nbatch_size = 512\n\ntrain_data = torch.from_numpy(Xtrain[0:Ntrain])\ntrain_labels = torch.from_numpy(Ytrain[0:Ntrain])\ntrain_skills = torch.from_numpy(Skills_train[0:Ntrain])\n\ntest_data = torch.from_numpy(Xtest)\ntest_labels = torch.from_numpy(Ytest)\ntest_skills = torch.from_numpy(Skills_test)\n\nvalid_data = torch.from_numpy(Xtrain[Ntrain:])\nvalid_labels = torch.from_numpy(Ytrain[Ntrain:])\nvalid_skills = torch.from_numpy(Skills_train[Ntrain:])\n\ntrain_dataset = torch.utils.data.TensorDataset(train_data, train_labels)\ntest_dataset = torch.utils.data.TensorDataset(test_data,test_labels)\nvalid_dataset = torch.utils.data.TensorDataset(valid_data,valid_labels)\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=False)\n\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False)\n\nvalid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,\n batch_size=batch_size,\n shuffle=False)\n\nnum_epochs = 100\nlearning_rate = 0.001\ndropout = 0.2\ninput_size = dim\nsequence_length = np.shape(Xtrain)[1]\nhidden_size = 100\nnum_layers = 1\nnum_classes = 2\ngradient_clipping_value = 0\nvar_len = True\n\nlstm = LSTMWithEmbedding(dataset, input_size, hidden_size, num_layers, batch_size=batch_size, dropout=dropout)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate)\n\nvalid_acc_history = []\nuse_cuda = False\n\nearly_stopping = EarlyStopping(patience=3, verbose=True, delta=0)\n\nfor epoch in range(num_epochs):\n\n print('Epoch:', epoch)\n train_loss_avg = 0\n\n idx = np.array(np.random.permutation(range(Ntrain)))\n idx_torch = torch.LongTensor(idx)\n train_data = torch.index_select(train_data, 0, idx_torch)\n train_labels = torch.index_select(train_labels, 0, idx_torch)\n Lentrain = Lentrain[idx]\n\n for i in range(int(np.ceil(Ntrain // batch_size))):\n\n if (batch_size * (i + 1)) <= Ntrain:\n images = train_data[batch_size * i:batch_size * (i + 1)]\n labels = train_labels[batch_size * i:batch_size * (i + 1)]\n lens = Lentrain[batch_size * i:batch_size * (i + 1)]\n skills = train_skills[batch_size * i:batch_size * (i + 1)]\n else:\n images = train_data[batch_size * i:]\n labels = train_labels[batch_size * i:]\n lens = Lentrain[batch_size * i:]\n skills = train_skills[batch_size * i:]\n\n ind = torch.LongTensor(np.argsort(np.array(lens))[::-1].copy())\n\n if use_cuda:\n images = Variable(torch.index_select(images, 0, ind)).cuda()\n labels = Variable(torch.index_select(labels, 0, ind)).cuda()\n skills = Variable(torch.index_select(skills, 0, ind)).cuda()\n else:\n images = Variable(torch.index_select(images, 0, ind))\n labels = Variable(torch.index_select(labels, 0, ind))\n skills = Variable(torch.index_select(skills, 0, ind))\n\n lens = sorted(lens)[::-1]\n optimizer.zero_grad()\n\n if batch_size * (i + 1) > Ntrain:\n lstm.hidden = lstm.init_hidden(Ntrain - batch_size * i)\n else:\n lstm.hidden = lstm.init_hidden()\n\n outputs = lstm(images, skills, lens)\n loss = criterion(outputs, labels)\n loss.backward()\n\n if gradient_clipping_value > 0:\n torch.nn.utils.clip_grad_norm(lstm.parameters(), gradient_clipping_value)\n\n optimizer.step()\n train_loss_avg += loss.data[0]\n\n total = 0\n correct = 0\n lstm.eval()\n\n for i, (images, labels) in enumerate(valid_loader):\n\n if batch_size * (i + 1) > Nvalid:\n lstm.hidden = lstm.init_hidden(Nvalid - batch_size * i)\n else:\n lstm.hidden = lstm.init_hidden()\n\n if var_len:\n if batch_size * (i + 1) <= Nvalid:\n lens = Lenvalid[i * batch_size:(i + 1) * batch_size]\n skills = valid_skills[i * batch_size:(i + 1) * batch_size]\n else:\n lens = Lenvalid[i * batch_size:]\n skills = valid_skills[i * batch_size:]\n\n ind = torch.LongTensor(np.argsort(np.array(lens))[::-1].copy())\n\n if use_cuda:\n images = Variable(torch.index_select(images, 0, ind)).cuda()\n skills = Variable(torch.index_select(skills, 0, ind)).cuda()\n else:\n images = Variable(torch.index_select(images, 0, ind))\n skills = Variable(torch.index_select(skills, 0, ind))\n\n lens = sorted(lens)[::-1]\n outputs = lstm(images, skills, lens)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted.cpu() == labels[ind].cpu()).numpy().sum()\n\n curr_acc = correct * 100.0 / total\n valid_acc_history.append(curr_acc)\n early_stopping(-curr_acc, lstm)\n\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n lstm.train()\n\nlstm.load_state_dict(torch.load('checkpoint.pt'))\n\nlstm.eval()\npred_border = []\ny_true_border = []\ntotal = 0\ncorrect = 0\n\nfor i, (images, labels) in enumerate(test_loader):\n\n if batch_size * (i + 1) > Ntest:\n lstm.hidden = lstm.init_hidden(Ntest - batch_size * i)\n else:\n lstm.hidden = lstm.init_hidden()\n\n if var_len:\n if batch_size * (i + 1) <= Ntest:\n lens = Lentest[i * batch_size:(i + 1) * batch_size]\n skills = test_skills[i * batch_size:(i + 1) * batch_size]\n else:\n lens = Lentest[i * batch_size:]\n skills = test_skills[i * batch_size:]\n\n ind = torch.LongTensor(np.argsort(np.array(lens))[::-1].copy())\n\n if use_cuda:\n images = Variable(torch.index_select(images, 0, ind)).cuda()\n skills = Variable(torch.index_select(skills, 0, ind)).cuda()\n else:\n images = Variable(torch.index_select(images, 0, ind))\n skills = Variable(torch.index_select(skills, 0, ind))\n\n lens = sorted(lens)[::-1]\n outputs = lstm(images, skills, lens)\n\n _, predicted = torch.max(outputs.data, 1)\n pred = F.softmax(outputs, 1).data.cpu().numpy()[:, 1]\n pred_border.extend(pred)\n y_true_border.extend(labels[ind].cpu().numpy())\n\n total += labels.size(0)\n correct += (predicted.cpu() == labels[ind]).sum()\n\nprint('Test Accuracy of the model: %d %%' % (100.0 * correct / total))\ntest_acc = 100 * correct / (total + 0.0)\n\nprint('Mode:', mode)\ndesired_precision = 0.90\nprecision, recall, f1_w, f1 = find_recall_for_fixed_precision(y_true_border, pred_border, desired_precision)\nprint('Precision: {0}, Recall: {1}, F1_weighted: {2}'.format(precision, recall, f1_w))\n\ndef get_cv_data(mode=mode, batch_size=512):\n\n \"\"\"\n\n :param mode: type of input representation(tagged, unmodified or masked), should be the same as used for training\n :param batch_size:\n :return:\n \"\"\"\n\n dataset = CVData(mode=mode)\n (Xtest, Ytest, Lentest, widx_ss_test) = dataset.get_word_indices_all(return_lens=True)\n\n Xtest, Ytest, Lentest = np.array(Xtest, dtype=np.int64), np.array(Ytest, dtype=np.int64), np.array(Lentest, dtype=np.int64)\n widx_ss_test = np.array(widx_ss_test, dtype=np.int64)\n\n Ntest = len(Xtest)\n test_data = torch.from_numpy(Xtest)\n test_labels = torch.from_numpy(Ytest)\n test_skills = torch.from_numpy(widx_ss_test)\n\n test_dataset = torch.utils.data.TensorDataset(test_data, test_labels)\n\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False)\n\n return test_loader, Lentest, Ntest, test_skills\n\ntest_loader, Lentest, Ntest, test_skills = get_cv_data(mode=mode, batch_size=512)\n\npred_border = []\ny_true_border = []\n\ntotal = 0\ncorrect = 0\n\nfor i, (images, labels) in enumerate(test_loader):\n\n if batch_size * (i + 1) > Ntest:\n lstm.hidden = lstm.init_hidden(Ntest - batch_size * i)\n else:\n lstm.hidden = lstm.init_hidden()\n\n if var_len:\n if batch_size * (i + 1) <= Ntest:\n lens = Lentest[i * batch_size:(i + 1) * batch_size]\n skills = test_skills[i * batch_size:(i + 1) * batch_size]\n else:\n lens = Lentest[i * batch_size:]\n skills = test_skills[i * batch_size:]\n\n ind = torch.LongTensor(np.argsort(np.array(lens))[::-1].copy())\n\n if use_cuda:\n images = Variable(torch.index_select(images, 0, ind)).cuda()\n skills = Variable(torch.index_select(skills, 0, ind)).cuda()\n else:\n images = Variable(torch.index_select(images, 0, ind))\n skills = Variable(torch.index_select(skills, 0, ind))\n\n lens = sorted(lens)[::-1]\n outputs = lstm(images, skills, lens)\n\n _, predicted = torch.max(outputs.data, 1)\n pred = F.softmax(outputs, 1).data.cpu().numpy()[:, 1]\n pred_border.extend(pred)\n y_true_border.extend(labels[ind].cpu().numpy())\n\n total += labels.size(0)\n correct += (predicted.cpu() == labels[ind]).sum()\n\nprint('Mode:', mode)\nprint('CV dataset')\ndesired_precision = 0.90\nprecision, recall, f1_w, f1 = find_recall_for_fixed_precision(y_true_border, pred_border, desired_precision)\nprint('Precision: {0}, Recall: {1}, F1_weighted: {2}'.format(precision, recall, f1_w))","sub_path":"run_concatenated_lstm.py","file_name":"run_concatenated_lstm.py","file_ext":"py","file_size_in_byte":11177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"159367423","text":"import math\nimport json\nfrom json import JSONDecodeError\nfrom django.forms.models import model_to_dict\nfrom django.http import HttpResponse, HttpResponseNotAllowed, \\\n JsonResponse, HttpResponseBadRequest, HttpResponseNotFound\nfrom assaapp.models import User, Course\nfrom recommend.models import CoursePref, TimePref, RecommendTimetable\nfrom recommend.recommend import run_recommendation\n\ndef cf_score(user):\n all_course = list(Course.objects.all().values())\n all_coursepref = list(CoursePref.objects.all().values())\n user_coursepref = list(CoursePref.objects.filter(user=user).values())\n all_user = list(User.objects.all().values())\n user_dict = model_to_dict(user)\n\n score_sum = {}\n user_count = {}\n score_average = {}\n\n total_sum = 0.0\n total_count = 0.0\n total_average = 5.0\n\n total_square_sum = 0.0\n total_std = 1.0\n\n user_score_sum = {}\n user_score_square_sum = {}\n user_score_count = {}\n user_average = {}\n user_std = {}\n\n user_sum = {}\n person_sum = {}\n user_person_sum = {}\n relation = {}\n\n score_weighted_sum = {}\n relation_sum = {}\n\n score_result_dict = {}\n\n for course in all_course:\n score_sum[course['id']] = 0.0\n user_count[course['id']] = 0.0\n score_weighted_sum[course['id']] = 0.0\n relation_sum[course['id']] = 0.0\n score_result_dict[course['id']] = -2.0\n\n for person in all_user:\n user_sum[person['id']] = 0.0\n person_sum[person['id']] = 0.0\n user_person_sum[person['id']] = 0.0\n user_score_sum[person['id']] = 0.0\n user_score_count[person['id']] = 0.0\n user_score_square_sum[person['id']] = 0.0\n\n for score_data in all_coursepref:\n score_sum[score_data['course_id']] += score_data['score']\n user_count[score_data['course_id']] += 1.0\n user_score_sum[score_data['user_id']] += score_data['score']\n user_score_square_sum[score_data['user_id']] += score_data['score']**2\n user_score_count[score_data['user_id']] += 1.0\n total_sum += score_data['score']\n total_count += 1.0\n total_square_sum += score_data['score']**2\n\n if total_count > 0.0:\n total_average = round(total_sum/total_count, 3)\n if total_count > 1.0:\n val = total_square_sum/(total_count-1.0)-(total_average**2)*total_count/(total_count-1)\n total_std = math.sqrt(val)\n\n\n for course in all_course:\n usrcnt = user_count[course['id']]\n if usrcnt == 0.0:\n score_average[course['id']] = total_average\n else:\n score_average[course['id']] = score_sum[course['id']]/usrcnt\n\n for person in all_user:\n pid = person['id']\n score_square_sum = user_score_square_sum[pid]\n score_sum = user_score_sum[pid]\n score_cnt = user_score_count[pid]\n if score_cnt == 0.0:\n user_average[pid] = total_average\n user_std[pid] = total_std\n else:\n user_average[pid] = score_sum/score_cnt\n average = user_average[pid]\n if score_cnt > 1.0:\n val = score_square_sum/(score_cnt-1.0)-(average**2)*score_cnt/(score_cnt-1.0)\n user_std[pid] = math.sqrt(val)\n else:\n user_std[pid] = total_std\n\n for score_data in user_coursepref:\n score_result_dict[score_data['course_id']] = score_data['score']\n\n for score_data in all_coursepref:\n course = score_data['course_id']\n score = score_result_dict[course]\n person = score_data['user_id']\n if score > -1.0:\n user_delta = score-user_average[user_dict['id']]\n person_delta = score_data['score']-user_average[person]\n user_sum[person] += user_delta*user_delta\n person_sum[person] += person_delta*person_delta\n user_person_sum[person] += user_delta*person_delta\n\n for person in all_user:\n pid = person['id']\n usr_sum = user_sum[pid]\n persn_sum = person_sum[pid]\n usr_persn_sum = user_person_sum[pid]\n if usr_sum*persn_sum == 0.0:\n relation[pid] = 0.0\n else:\n relation[pid] = usr_persn_sum/math.sqrt(usr_sum*persn_sum)\n\n for score_data in all_coursepref:\n score_data_usr = score_data['user_id']\n rel = relation[score_data_usr]\n score_data_score = score_data['score']\n score_data_course = score_data['course_id']\n usr_average = user_average[score_data_usr]\n usr_std = user_std[score_data_usr]\n if usr_std != 0.0:\n score_weighted_sum[score_data_course] += rel*(score_data_score-usr_average)/usr_std\n relation_sum[score_data['course_id']] += abs(rel)\n\n for course in all_course:\n usr_std = user_std[user_dict['id']]\n weighted_sum = score_weighted_sum[course['id']]\n rel = relation_sum[course['id']]\n if relation_sum[course['id']] == 0.0:\n score_result_dict[course['id']] = score_average[course['id']]\n else:\n val = weighted_sum*usr_std/rel+user_average[user_dict['id']]\n score_result_dict[course['id']] = round(val, 3)\n if score_result_dict[course['id']] < 0.0:\n score_result_dict[course['id']] = 0.0\n if score_result_dict[course['id']] > 10.0:\n score_result_dict[course['id']] = 10.0\n\n return score_result_dict\n\ndef cf_view(user):\n all_course = [course.id for course in Course.objects.all()]\n all_coursepref = [model_to_dict(score_data) for score_data in CoursePref.objects.all()]\n user_coursepref = [model_to_dict(score_data)\n for score_data in CoursePref.objects.filter(user=user)]\n all_user = [person.id for person in User.objects.all()]\n user_id = user.id\n\n course_size = len(all_course)\n\n user_score = {}\n user_sum = {}\n user_one = {}\n\n relation = {}\n relation_sum = 0.0\n relation_abs_sum = 0.0\n\n course_score = {}\n\n for person in all_user:\n user_sum[person] = 0\n user_one[person] = 0\n for course in all_course:\n user_score[course] = 0\n course_score[course] = 0.0\n for score_data in user_coursepref:\n user_score[score_data['course']] = 1\n for score_data in all_coursepref:\n user_sum[score_data['user']] += 1\n if user_score[score_data['course']] == 1:\n user_one[score_data['user']] += 1\n for person in all_user:\n if person == user_id:\n continue\n relation_up = 2.0 * user_sum[user_id] + 2.0 * user_sum[person] - 4.0 * user_one[person]\n relation[person] = 1.0 - relation_up / course_size\n relation_sum += relation[person]\n relation_abs_sum += abs(relation[person])\n for score_data in all_coursepref:\n if score_data['user'] == user_id:\n continue\n course_score[score_data['course']] += relation[score_data['user']]\n if relation_abs_sum == 0.0:\n for course in all_course:\n course_score[course] = 0.5\n else:\n relation_base = 0.5 - relation_sum / relation_abs_sum / 2.0\n for course in all_course:\n course_score[course] = (course_score[course] / relation_abs_sum) + relation_base\n return course_score\n\ndef has_text(text, match_text):\n if match_text:\n matched = 0\n for char in text:\n if char == match_text[matched]:\n matched += 1\n if matched == len(match_text):\n return True\n return False\n return True\n\ndef searcher(course, score, request_get):\n search_dict = {}\n search_dict['title'] = request_get.get('title')\n search_dict['classification'] = request_get.get('classification')\n search_dict['department'] = request_get.get('department')\n search_dict['degree_program'] = request_get.get('degree_program')\n search_dict['academic_year'] = request_get.get('academic_year')\n search_dict['course_number'] = request_get.get('course_number')\n search_dict['lecture_number'] = request_get.get('lecture_number')\n search_dict['professor'] = request_get.get('professor')\n search_dict['language'] = request_get.get('language')\n if request_get.get('max_credit'):\n search_dict['max_credit'] = int(request_get.get('max_credit'))\n else:\n search_dict['max_credit'] = 32\n if request_get.get('min_credit'):\n search_dict['min_credit'] = int(request_get.get('min_credit'))\n else:\n search_dict['min_credit'] = -32\n if request_get.get('max_score'):\n search_dict['max_score'] = float(request_get.get('max_score'))\n else:\n search_dict['max_score'] = 32.0\n if request_get.get('min_score'):\n search_dict['min_score'] = float(request_get.get('min_score'))\n else:\n search_dict['min_score'] = -32.0\n return (has_text(course.title+course.subtitle, search_dict['title']) and\n has_text(course.classification, search_dict['classification']) and\n has_text(course.college+course.department, search_dict['department']) and\n has_text(course.degree_program, search_dict['degree_program']) and\n has_text(course.academic_year, search_dict['academic_year']) and\n has_text(course.course_number, search_dict['course_number']) and\n has_text(course.lecture_number, search_dict['lecture_number']) and\n has_text(course.professor, search_dict['professor']) and\n has_text(course.language, search_dict['language']) and\n search_dict['min_credit'] <= course.credit <= search_dict['max_credit'] and\n search_dict['min_score'] <= score <= search_dict['max_score']\n )\n\ndef auth_func(func):\n def wrapper_function(*args, **kwargs):\n if args[0].user.is_authenticated:\n return func(*args, **kwargs)\n return HttpResponse(status=401)\n return wrapper_function\n\n@auth_func\ndef api_coursepref_rated(request):\n if request.method == 'GET':\n cf_score_result = cf_score(request.user)\n cf_user = list(CoursePref.objects.filter(user=request.user))\n start = int(request.GET.get('start'))\n end = int(request.GET.get('end'))\n sort_type = int(request.GET.get('sort'))\n position = 0\n course_list = []\n if sort_type == 0:\n cf_user = sorted(cf_user, key=lambda score_data: score_data.score)\n elif sort_type == 1:\n cf_user = sorted(cf_user, key=lambda score_data: -score_data.score)\n elif sort_type == 2:\n cf_user = sorted(cf_user, key=lambda score_data: score_data.course.title)\n for score_data in cf_user:\n if position > end:\n break\n course = score_data.course\n if position >= start and searcher(course, score_data.score, request.GET):\n course_data = course.data()\n course_data['score'] = score_data.score\n course_data['expected'] = cf_score_result[course.id]\n course_list.append(course_data)\n if searcher(course, score_data.score, request.GET):\n position += 1\n return JsonResponse(course_list, safe=False)\n return HttpResponseNotAllowed(['GET'])\n\n@auth_func\ndef api_coursepref_unrated(request):\n if request.method == 'GET':\n cf_view_result = cf_view(request.user)\n cf_score_result = cf_score(request.user)\n cf_user = [score_data.course.id\n for score_data in CoursePref.objects.filter(user=request.user)]\n start = int(request.GET.get('start'))\n end = int(request.GET.get('end'))\n sort_type = int(request.GET.get('sort'))\n position = 0\n rated = {}\n course_list = []\n all_course = list(Course.objects.all())\n if sort_type == 0:\n all_course = sorted(all_course, key=lambda course: -cf_view_result[course.id])\n elif sort_type == 1:\n all_course = sorted(all_course, key=lambda course: course.title)\n for course in all_course:\n rated[course.id] = False\n for score_data in cf_user:\n rated[score_data] = True\n for course in all_course:\n if position > end:\n break\n if position >= start and (not rated[course.id]) and searcher(course, 0, request.GET):\n course_data = course.data()\n course_data['score'] = '-'\n course_data['expected'] = cf_score_result[course.id]\n course_list.append(course_data)\n if (not rated[course.id]) and searcher(course, 0, request.GET):\n position += 1\n return JsonResponse(course_list, safe=False)\n return HttpResponseNotAllowed(['GET'])\n\n@auth_func\ndef api_coursepref_id(request, course_id):\n if request.method == 'GET':\n try:\n course = Course.objects.get(id=course_id)\n score_data = CoursePref.objects.get(user=request.user, course=course)\n except (Course.DoesNotExist, CoursePref.DoesNotExist):\n return JsonResponse({}, status=404, safe=False)\n return JsonResponse(model_to_dict(score_data), safe=False)\n if request.method == 'PUT':\n try:\n body = request.body.decode()\n score = int(json.loads(body)['score'])\n if score < 0 or score > 10:\n return HttpResponseBadRequest()\n except (KeyError, JSONDecodeError):\n return HttpResponseBadRequest()\n try:\n course = Course.objects.get(id=course_id)\n except Course.DoesNotExist:\n return JsonResponse({}, status=404, safe=False)\n try:\n score_data = CoursePref.objects.get(user=request.user, course=course)\n score_data.score = score\n score_data.save()\n return JsonResponse(model_to_dict(score_data), safe=False, status=200)\n except CoursePref.DoesNotExist:\n new_score = CoursePref(user=request.user, course=course, score=score)\n new_score.save()\n return JsonResponse(model_to_dict(new_score), safe=False, status=201)\n if request.method == 'DELETE':\n try:\n course = Course.objects.get(id=course_id)\n except Course.DoesNotExist:\n return HttpResponseNotFound()\n try:\n score_data = CoursePref.objects.get(user=request.user, course=course)\n except CoursePref.DoesNotExist:\n return HttpResponseNotFound()\n score_data.delete()\n return HttpResponse(status=200)\n return HttpResponseNotAllowed(['GET', 'PUT', 'DELETE'])\n\n@auth_func\ndef api_timepref(request):\n if request.method == 'GET':\n time_data = [time_pref.data()\n for time_pref in TimePref.objects.filter(user=request.user)]\n table = [[3] * 6 for i in range(26)]\n for time_pref in time_data:\n x_pos = (time_pref['start_hour']-8) * 2 + time_pref['start_minute']//30\n y_pos = time_pref['weekday']\n table[x_pos][y_pos] = time_pref['score']\n return JsonResponse(table, safe=False)\n if request.method == 'PUT':\n try:\n body = json.loads(request.body.decode())\n table = body['table']\n user = request.user\n except (KeyError, JSONDecodeError):\n return HttpResponseBadRequest()\n TimePref.objects.filter(user=user).delete()\n for i in range(26):\n for j in range(6):\n weekday = j\n score = table[i][j]\n start_time = str(8+i//2) + \":\" + (\"30\" if i%2 == 1 else \"00\")\n new_score = TimePref(user=user, score=score, weekday=weekday, start_time=start_time)\n new_score.save()\n return HttpResponse(status=200)\n return HttpResponseNotAllowed(['GET', 'PUT'])\n\n@auth_func\ndef api_recommend(request):\n if request.method == 'GET':\n recommend = RecommendTimetable.objects.filter(user=request.user)\n recommend_data = [recommend_timetable.data() for recommend_timetable in recommend]\n return JsonResponse(recommend_data, safe=False)\n if request.method == 'POST':\n recommend = run_recommendation(request.user)\n recommend_data = [recommend_timetable.data() for recommend_timetable in recommend]\n return JsonResponse(recommend_data, safe=False)\n if request.method == 'DELETE':\n recommend = RecommendTimetable.objects.filter(user=request.user).delete()\n return HttpResponse(status=200)\n return HttpResponseNotAllowed(['GET', 'POST', 'DELETE'])\n\n@auth_func\ndef api_constraints(request):\n if request.method == 'GET':\n return JsonResponse(request.user.data_constraint(), safe=False)\n if request.method == 'PUT':\n try:\n body = json.loads(request.body.decode())\n user = request.user\n days_per_week = body['days_per_week']\n credit_min = body['credit_min']\n credit_max = body['credit_max']\n major_min = body['major_min']\n major_max = body['major_max']\n user.days_per_week = days_per_week\n user.credit_min = credit_min\n user.credit_max = credit_max\n user.major_min = major_min\n user.major_max = major_max\n user.save()\n except (KeyError, ValueError, JSONDecodeError):\n return HttpResponseBadRequest()\n return HttpResponse(status=200)\n return HttpResponseNotAllowed(['GET', 'PUT'])\n\n@auth_func\ndef api_lastpage(request):\n if request.method == 'GET':\n return JsonResponse(request.user.last_recommend_page, safe=False)\n if request.method == 'PUT':\n try:\n body = json.loads(request.body.decode())\n user = request.user\n last_page = body['last_page']\n except (KeyError, JSONDecodeError):\n return HttpResponseBadRequest()\n user.last_recommend_page = last_page\n user.save()\n return HttpResponse(status=200)\n return HttpResponseNotAllowed(['GET', 'PUT'])\n","sub_path":"backend/recommend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"192580951","text":"# -*-coding:utf-8-*-\nfrom flask import request\n\nfrom app.libs.enums import ClientTypeEnum\nfrom app.libs.redprint import Redprint\nfrom app.models.user import User\nfrom app.validators.forms import ClientForm, UserEmailForm\n\n__author__ = 'ZeroLoo'\n\napi = Redprint('client')\n\napi.route('/register')\n\n\ndef create_client():\n data = request.json\n form = ClientForm(data=data)\n if form.validate():\n promise = {\n ClientTypeEnum.USER_EMAIL: __register_user_by_email\n }\n promise[form.type.data]()\n\n\n\ndef __register_user_by_email():\n form = UserEmailForm(request.json)\n if form.validate():\n User.register_by_email(form.nickname, form.account.data, form.secret.data)\n","sub_path":"app/api/v1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"409385333","text":"import os, shelve\r\n\r\nos.chdir(r'C:\\Users\\Assassin123\\Desktop\\Test') #use os.makedirs(dirName) if directory doesnt exist\r\n\r\n'''fileObj=open(r'Test.txt','a')\r\nfileObj.write(r'Hey there.. This is a test file created by my first file handling Python script')\r\nfileObj.close()\r\nfile=open(r'Test\\Test.txt','r')\r\nstring=file.read()\r\nprint(string)\r\n'''\r\nshelfFile=shelve.open('shelfFile')\r\nfriends=['Somesh', 'A.p', 'Soham', 'Baba', 'Jindal']\r\nshelfFile['friends']=friends #saves the variable friends's value as an entry in the\r\n#shelf file 'shelfFile'. Now this value can be retrieved in any program at any time.\r\nshelfFile.close()\r\n#test this functionality (retrieval of the value of 'friends') by running a different\r\n#test script: shelves.py\r\n\r\nshelfFile=shelve.open('shelfFile')\r\n'''print(shelfFile['friends'])\r\nprint(shelfFile['useless'])\r\nprint(shelfFile['number'])\r\nprint(shelfFile['float'])\r\n'''\r\n#variables saved from shelves.py script\r\nfor i,j in shelfFile.items():\r\n print(i,': ',j)\r\n","sub_path":"Python/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"644633781","text":"import os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\n\nimport models.networks as networks\nfrom .base_model import BaseModel\n\n\nclass SRModel(BaseModel):\n def __init__(self, opt):\n super(SRModel, self).__init__(opt)\n train_opt = opt['train']\n self.input_L = self.Tensor()\n self.input_H = self.Tensor()\n\n # define network and load pretrained models\n self.netG = networks.define_G(opt)\n self.load()\n\n if self.is_train:\n self.netG.train()\n\n # loss\n loss_type = train_opt['pixel_criterion']\n if loss_type == 'l1':\n self.cri_pix = nn.L1Loss()\n elif loss_type == 'l2':\n self.cri_pix = nn.MSELoss()\n else:\n raise NotImplementedError('Loss type [%s] is not recognized.' % loss_type)\n if self.use_gpu:\n self.cri_pix.cuda()\n self.l_pix_w = train_opt['pixel_weight']\n\n # optimizers\n self.optimizers = []\n wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0\n optim_params = []\n for k, v in self.netG.named_parameters(): # can optimize for a part of the model\n if v.requires_grad:\n optim_params.append(v)\n else:\n print('WARNING: params [%s] will not optimize.' % k)\n self.optimizer_G = torch.optim.Adam(optim_params,\n lr=train_opt['lr_G'], weight_decay=wd_G)\n self.optimizers.append(self.optimizer_G)\n\n # schedulers\n self.schedulers = []\n if train_opt['lr_scheme'] == 'MultiStepLR':\n for optimizer in self.optimizers:\n self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \\\n train_opt['lr_steps'], train_opt['lr_gamma']))\n else:\n raise NotImplementedError('MultiStepLR learning rate scheme is enough.')\n\n self.log_dict = OrderedDict()\n\n print('---------- Model initialized ------------------')\n self.print_network()\n print('-----------------------------------------------')\n\n def feed_data(self, data, volatile=False, need_HR=True):\n # LR\n input_L = data['LR']\n self.input_L.resize_(input_L.size()).copy_(input_L)\n self.var_L = Variable(self.input_L, volatile=volatile)\n\n if need_HR:\n input_H = data['HR']\n self.input_H.resize_(input_H.size()).copy_(input_H)\n self.real_H = Variable(self.input_H, volatile=volatile)\n\n def optimize_parameters(self, step):\n self.optimizer_G.zero_grad()\n self.fake_H = self.netG(self.var_L)\n l_pix = self.l_pix_w * self.cri_pix(self.fake_H, self.real_H)\n l_pix.backward()\n self.optimizer_G.step()\n\n # set log\n self.log_dict['l_pix'] = l_pix.data[0]\n\n def test(self):\n self.netG.eval()\n self.fake_H = self.netG(self.var_L)\n self.netG.train()\n\n def get_current_log(self):\n return self.log_dict\n\n def get_current_visuals(self, need_HR=True):\n out_dict = OrderedDict()\n out_dict['LR'] = self.var_L.data[0].float().cpu()\n out_dict['SR'] = self.fake_H.data[0].float().cpu()\n if need_HR:\n out_dict['HR'] = self.real_H.data[0].float().cpu()\n return out_dict\n\n def print_network(self):\n s, n = self.get_network_description(self.netG)\n print('Number of parameters in G: {:,d}'.format(n))\n if self.is_train:\n message = '-------------- Generator --------------\\n' + s + '\\n'\n network_path = os.path.join(self.save_dir, '../', 'network.txt')\n with open(network_path, 'w') as f:\n f.write(message)\n\n def load(self):\n load_path_G = self.opt['path']['pretrain_model_G']\n if load_path_G is not None:\n print('loading model for G [%s] ...' % load_path_G)\n self.load_network(load_path_G, self.netG)\n\n def save(self, iter_label):\n self.save_network(self.save_dir, self.netG, 'G', iter_label)\n","sub_path":"codes/models/SR_model.py","file_name":"SR_model.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"541693866","text":"__author__ = 'Jonas'\nimport math\n\n\nk = 0.4605170186\nS = 1000\na = 1 - 0.8835488577\nBesitzer = []\nPotKaufer = []\nKaufer = []\nfor x in range(11):\n Besitzer.append(S * (1 - math.e ** (-k * x)))\n PotKaufer.append(S - Besitzer[x])\n Kaufer.append(a * PotKaufer[x])\n\nprint(a)\nprint(Besitzer)\nprint(PotKaufer)\nprint(Kaufer)\n\n","sub_path":"JTSv2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"516855494","text":"import os\nimport sys\nimport shutil\nfrom collections import namedtuple\n\nFile = namedtuple(\"File\", \"name, path\")\n\ndef performSearch(rootDirectory, searchType, searchCriteria):\n filePaths = []\n for directoryName, subdirectoryList, fileList in os.walk(rootDirectory):\n if fileList:\n for file in fileList:\n path = os.path.join(directoryName, file)\n if (meetsSearchCriteria(path, searchType, searchCriteria)):\n name = os.path.splitext(file)[0]\n newFile = File(name, path)\n filePaths.append(newFile)\n return filePaths\n\ndef meetsSearchCriteria(filePath, searchType, searchCriteria):\n if searchType == \"N\":\n name = os.path.splitext(os.path.basename(filePath))[0]\n return namesMatch(name, searchCriteria)\n elif searchType == \"E\":\n if searchCriteria[0] != \".\":\n searchCriteria = \".\" + searchCriteria\n fileExtension = os.path.splitext(filePath)[1]\n return extensionsMatch(fileExtension, searchCriteria)\n elif searchType == \"S\":\n byteSize = int(searchCriteria)\n fileSize = os.path.getsize(filePath)\n return largerThan(fileSize, byteSize)\n\ndef namesMatch(firstName, secondName):\n return firstName == secondName\n\ndef extensionsMatch(firstExtension, secondExtension):\n return firstExtension == secondExtension\n\ndef largerThan(toCompare, baseline):\n return toCompare > baseline\n \ndef searchByName(fileList, searchTerm):\n matchingFiles = []\n for file in fileList:\n if file.name == searchTerm:\n matchingFiles.append(file.path)\n return matchingFiles\n\ndef searchByExtension(fileList, searchTerm):\n matchingFiles = []\n if searchTerm[0] != \".\":\n searchTerm = \".\" + searchTerm # Add leading \".\" to search term\n for file in fileList:\n fileExtenstion = os.path.splitext(file.path)[1]\n if fileExtenstion == searchTerm:\n matchingFiles.append(file.path)\n return matchingFiles\n\ndef searchBySize(fileList, searchTerm):\n matchingFiles = []\n byteSize = int(searchTerm)\n for file in fileList:\n if (os.path.getsize(file.path) > byteSize):\n matchingFiles.append(file.path)\n return matchingFiles\n\ndef getRootDirectory():\n rootDirectory = input(\"Enter the root directory to be searched: \")\n while not os.path.exists(rootDirectory):\n print(\"ERROR: Path does not exist or cannot be used\")\n rootDirectory = input(\"Enter the root directory to be searched: \")\n return rootDirectory\n\ndef validSearchType(searchType, criteria):\n if searchType == \"N\": # Searching for a name shouldn't have invalid input - files can be named anything\n return True\n elif searchType == \"E\": # Searching for an extension shouldn't have an invalid input\n return True\n else:\n try:\n numberCheck = int(criteria) # Integers are the only valid input for \"S\"\n return True\n except:\n return False\n \ndef getSearchCharacteristics():\n VALID_SEARCH_TYPE = \"NES\"\n validInput = False\n while not validInput:\n try:\n userInput = input(\"Enter search type and criteria: \").split()\n searchType = userInput[0]\n criteria = userInput[1]\n if (searchType in VALID_SEARCH_TYPE) and validSearchType(searchType, criteria):\n validInput = True\n else:\n print(\"ERROR: Please enter a valid search type and criteria\")\n \n except:\n print(\"ERROR: Invalid input\")\n return searchType, criteria\n\ndef getActionType():\n VALID_ACTION_TYPE = \"PFDT\"\n validInput = False\n while not validInput:\n try:\n actionType = input(\"Enter action type: \")\n if actionType in VALID_ACTION_TYPE:\n validInput = True\n else:\n print(\"ERROR: Please enter a valid action type: \")\n except:\n print(\"ERROR: Invalid input\")\n return actionType\n\ndef printFiles(fileList):\n print()\n for file in fileList:\n print(file.path)\n\ndef printFirstLine(fileList):\n for file in fileList:\n try:\n openedFile = open(file.path, \"r\")\n firstLine = openedFile.readline()\n print(\"First line in \" + file.path + \" reads:\" + \"\\n\" + firstLine)\n except:\n print(\"File could not be read\")\n finally:\n openedFile.close()\n\ndef createCopy(fileList):\n for file in fileList:\n try:\n duplicatePath = file.path + \".dup\"\n shutil.copy(file.path, duplicatePath)\n except:\n print(file.path + \" could not be copied.\")\n\ndef touchFiles(fileList):\n for file in fileList:\n with open(file.path, \"a\"):\n os.utime(file.path, times = None)\n\ndef performAction(actionType, searchedFiles):\n if actionType == \"P\":\n printFiles(searchedFiles)\n elif actionType == \"F\":\n printFirstLine(searchedFiles)\n elif actionType == \"D\":\n createCopy(searchedFiles)\n elif actionType == \"T\":\n touchFiles(searchedFiles)\n\ndef main(argv):\n rootDirectory = getRootDirectory()\n searchType, criteria = getSearchCharacteristics()\n actionType = getActionType()\n searchedFilePaths = performSearch(rootDirectory, searchType, criteria)\n performAction(actionType, searchedFilePaths)\n\nif __name__ == '__main__':\n main(sys.argv)","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"397834387","text":"__author__ = \"Alexandra Diem \"\n\"\"\" This FEniCs demo is implementing the porous mechanical framework\nfor modelling the interaction between coronary perfusion and myocardial\nmechanics based on J Biomech. 2012 Mar 15; 45(5): 850–855.\n\nThe demo is implemented in the main python file \"demo_inflate.py\"\nand requires the package 'Poroelastic' to be imported.\nThe module 'Poroelastic' implements the multicompartment\nporoelastic equations. 'Poroelastic' does only support Python3.x.\n'Poroelastic' requires FEniCS 2017.2.0, upwards compatibility is suspected,\nbut has not yet been tested.\n\nIt is recommended to setup FEniCs on Docker.A detailed manual for the\ninstallation procedure can be found here https://fenicsproject.org/download/.\n\nIn short, to pull an image for creating a docker container you can\nuse the docker pull command:\n\n $docker pull quay.io/fenicsproject/stable\n\nYou can then run the docker container in the version FEniCs 2017.2.0\nusing the following command:\n\n $docker run -ti -v $(pwd):/home/fenics/shared quay.io/fenicsproject/stable:2017.2.0\n\nThe tag reflects the FEniCS version used to develop the package.\nInside the Docker container, the 'poroelastic' package is installed visualization\n\n $python3 setup.py install\n\nTo view the output Paraview 5.x is required.\n\n\"\"\"\n\n# Equation and problem definition - class PoroelasticProblem\n# -----------------------------------------------------------\n# This demonstration aims to capture the coupling between the fluid flow and the\n# mechanical deformation of the myocardial tissue. This is done by a novel porous\n# mechanical framework presented in J Biomech. 2012 Mar 15; 45(5): 850–855.\n# The mathematical equations needed for solving the perfusion problem of the left\n# ventricle are represented in the 'problem' module.\n# To understand the functions and classes behind this demo, we will walk through\n# the main equation in the module 'problem'.\n#\n# To solve the complex problem described here, the class 'PoroelasticProblem'\n# was created. It inherits the main functionalities for handling the equations\n# of the poroelastic problem.\n# For solving the coupled solid-fluid equations, an iterative solving scheme is\n# performed between equations the divergence of the stretch of the skeleton\n# (solid) (I), the compartmental fluid pressure (II) and the divergence of the\n# integral of the mass divergence (III).\n#\n# (I) is described in the function 'set_solid_variational_form' of the class\n# 'PoroelasticProblem'. It inherits the solid variational form of the follwing\n# equation provided by the paper:\n#\n# $ Psic = self.psi + L*(J-1 - sum([m[i]/rho)) for i in range self.N]\n#\n# where 'Psic 'represents the divergence of the stress tensor.\n#\n# Provided from this equation, the 'solid_variational_form' is created.\n# Where Psi, representing the constitutive law , is defined by:\n#\n# $ self.Psi = self.material.constitutive_law(J=self.J, C=self.C,\n# M=m, rho=rho, phi=phi0)\n#\n# with J ...determinant of the deformation gradient tensor 'F'\n# with C ...the right Cauchy-Green deformation tensor\n# with M ...the mass increase of the fluid\n# with rho... the density of the fluid\n# with phi...the porosity of the solid phase\n#\n# The divergence of the stress tensor is then calculated from the\n# solid_variational_form as defined in the function 'set_solid_variational_form'\n# as follows:\n#\n# $ Psic = self.Psi*dx + L*(self.J-Constant(1)-m/rho)*dx\n#\n# with L ... being the Lagrange multiplier enforcing volume constraints\n# with m ... the fluid mass increase\n# with rho...being the density of the fluid\n#\n# (II) The 'fluid-solid-coupling' is repesented as the following equation\n# in the paper\n#\n# $ p_i = (Del Psi_S)/(Del J self.phi_{f,i})) - L\n#\n# which becomes in the variational form in the function 'fluid_solid_coupling'\n#\n# $ p*q*dx = (tr(diff(self.Psi, self.F) * self.F.T))/self.phif[i]*q*dx - L*q*dx\n#\n# with q ... being the TestFunction living in the FunctionSpace FS_F\n# with p ... being the TrialFunction living in the FunctionSpace FS_F\n# The functionspaces are defined in the function 'create_function_spaces'\n# of the module 'problem' as MixedElement space if there is more than one\n# compartment which equals N > 1.\n#\n# At last, the fluid variational form for several compartments (N>1)\n# is represented in the function 'set_fluid_variational_form' by\n#\n# vm = TestFunctions(self.FS_M)\n# Form = sum([k*(m[i] - m_n[i])*vm[i]*dx for i in range(self.N)])\\\n# + sum([dot(grad(M[i]), k*(dU-dU_n))*vm[i]*dx\n# for i in range(self.N)])\\\n# + sum([inner(-((rho * self.J * inv(self.F) * self.K() * inv(self.F.T))*grad(self.p[i]), grad(vm[i]))*dx\n# for i in range(self.N)])\n# where the inflow and outflow terms are added and represented by:\n#\n# # Add inflow terms\n# Form += -rho*self.qi*vm*dx\n#\n# # Add outflow term\n# Form += rho*q_out*vm*dx\n#\n# and the compartment exchange is included by:\n#\n# # Compartment exchange\n# for i in range(len(beta)):\n# Form += -self.J*beta[i]*((self.p[i] - self.p[i+1])*vm[i] +\\\n# (self.p[i+1] - self.p[i])*vm[i+1])*dx\n# with vm ... being the TestFunction living in the FunctionSpace FS_M\n# with ... being the TrialFunction living in the FunctionSpace FS_M\n#\n# We will apply a time-stepping technique according theta-rule after Crank-Nicolson.\n# this represents a compromise between the explicit and implicit time-dependent\n# modelling method. For that to work, the solution for 'm' and 'th' need to be updated\n# after every iteration step allowing for adapted time-stepping.\n# $M = th*m + th_*m_n\n#\n# Implementation\n# --------------\n#\n# This description goes through the implementation (in\n# `demo_inflate.py`) of a solver for the above described\n# poroelastic problem step-by-step.\n\n# First the required modules are imported.\n#\nimport sys\nimport uuid\nimport poroelastic as poro\nimport dolfin as df\nimport numpy as np\n\n# Access MPI functionality using dolfin's provided MPI class to enable parallel\n# computing, e.g. iterative adjusting of parameters.\n#\n# Allows for parallelizing processes\n#\ncomm = df.mpi_comm_world()\n#\n# We need to create a mesh based on the dolfin class dolfin.cpp.Mesh which is going\n# to cover the unit square. Here the mesh consists of nx x nx squares.\n#\n# Create mesh\nnx = 10\nmesh = df.UnitSquareMesh(nx, nx)\n#\n# To be able to pass arguments ,e.g. parameters, to the script executed,\n# or store parameters in a dictionary, the python class ParamParser()\n# was created.\n#\n# Here we intialize the paramparser for allowing for passing an argument\n# (here ) to the script as argument when executing from the\n# command line.\n#\nparams = poro.ParamParser()\n#\n# Next, we need to generate a unique directory data will be stored in.\n# Using the uuid4() function for generating a random UUID (Universal Unique\n# Identifier, 128 bit number) a random identifictaion number is created and\n# with the string method converted to a string of hex digits.\n#\ndata_dir = str(uuid.uuid4())\n#\n# Add result to dictionary section, key, value\n#\nparams.add_data(\"Simulation\", \"dir\", data_dir)\n#\n# Print unique simulation ID to screen\n#\nprint(\"Simulation ID {}\".format(data_dir))\n#\n# Print number of cells in mesh to screen\n#\nprint(\"Number of cells: {}\".format(mesh.num_cells()))\n#\n# Next, we are defining the problem with mesh definition and the parameters\n# provided by the configuration file.\n#\npprob = poro.PoroelasticProblem(mesh, params.p)\n#\n# Next, we want to divide our left ventricle, into 4 main subdomains, having there\n# individually set boundary conditions.\n# For that to work, we create classes for defining parts of the boundaries and\n# the interior of the domains.\n# We consider Dirichlet boundary conditions. These can be implied by creating a\n# simple function returning a boolean. The idea is to return 'True' for points\n# inside the subdomain and 'False' for those oustide.\n# In our case this means for 'Left' we set the boundaries to x=0.0 .\n#\nclass Left(df.SubDomain):\n def inside(self, x, on_boundary):\n return df.near(x[0], 0.0) and on_boundary\n\nclass Right(df.SubDomain):\n def inside(self, x, on_boundary):\n return df.near(x[0], 1.0) and on_boundary\n\nclass Top(df.SubDomain):\n def inside(self, x, on_boundary):\n return df.near(x[1], 1.0) and on_boundary\n\nclass Bottom(df.SubDomain):\n def inside(self, x, on_boundary):\n return df.near(x[1], 0.0) and on_boundary\n#\n# Next, we would like to create a meshfunction allowing for the storage\n# and numbering of subdomains using the 'MeshFunction' from the dolfin package.\n# When creating a MeshFunction an argument defining the type of MeshFunction\n# is required. This is represented by the first argument which in our example\n# is defined by 'size_t'. 'size_t' defines that an integer is taken as argument\n# and all facets will as consequence be given this index.\n# The second argument which is optional, defines the mesh.\n# The third argument provides the topological dimension of the mesh which in\n# our case is '-1'. This argument is optional, but important to be defined in\n# respect to the boundary conditions, which need to be a dimension lower than\n# the space we are working in.\n#\n# Initialize mesh function for boundary domains.\nboundaries = df.MeshFunction(\"size_t\", mesh, mesh.topology().dim()-1)\n#\n# To ensure all boundaries are set to 0 allowing for knowing and tracking\n# th exact coordinates, the boundary values need to be set to 0.\n#\n# Hint: when ommiting this step in older dolfin versions you might actually end\n# up with randomly set boundary values.\n#\nboundaries.set_all(0)\n#\n# Initialize sub-domain instances.\n#\nleft = Left()\nright = Right()\ntop = Top()\nbottom = Bottom()\n#\n# Next, we nitialize the mesh function for boundary domains in sub-domains.\n# We set markers allowing for tracking of changes in mesh before the mesh is deformed.\n# Often, you will find that these markers are saved into a dictionary.\n#\nleft.mark(boundaries, 1)\nright.mark(boundaries, 2)\ntop.mark(boundaries, 3)\nbottom.mark(boundaries, 4)\n#\n# Next, we need to add the dirichlet boundary conditions for the solid.\n# For that we will use the function 'add_solid_dirichlet_condition' from the\n# class 'PoroelasticProblem' from the 'Poroelastic' package.\n# This function allows for setting different dirichlet boundary conditions\n# depending on the subspace.\n#\n# The boundaries are set with the first argument in correspondence to the subdomains\n# marked by 1 (Left) and 4 (Bottom) defined by the boundaries function.\n# The third argument represents the sub domain instance the condition is\n# defined for.\n# In our example the fourth argument functions as a keyword argument setting\n# the value for n in the vectorspace.\n# The boundary conditions are stored in a dictionary.\n# Optionally, a string specifying a DirichletBC method can be passed as an argument.\n# This allows for the usage of DirichletBC function defined methods provided\n# by the dolfin package.\n# A timestep condition can be enforced by adding 'time' which functions as a boolean\n# as an argument. For usage 'tcond' can be defined as an additional variable referring to the\n# to an expression defining the condition of the parameter 't'. The second argument\n# in tcond would define the current time setting and the third could refer to the\n# degree of the expression.\n#\n# example:\n# tcond = df.Expression('t', t=0.0, degree=1)\n# pprob.add_solid_dirichlet_condition(tcond, boundaries, 1, time=True)\n#\n# Time conditions will then be saved in an additiona new list.\n#\n# Define Dirichlet boundary conditions\nzero = df.Constant(0.0)\npprob.add_solid_dirichlet_condition(zero, boundaries, 1, n=0)\npprob.add_solid_dirichlet_condition(zero, boundaries, 4, n=1)\n#\n# Eventually, we will have to store the data produced in files.\n# For that to work we will use a dolfin class supporting the output of meshes\n# and functions in XDMF format. This will allow us to create an XML file describing\n# the data produced and pointing to a so-called HDF5 file that will store the actual\n# data.\n# In order to allow output of data in parallel, 'comm ' will be used as an argument.\n# The second argument passed represents the location the file will be stored in.\n#\n# Before storing the data, we need to define major parameters of the xdmf files.\n# The function 'set_xdmf_parameters' sets functionalities for processing and opening\n# of the XDMFFiles.\n\n#1) ' flush_output'\n# Enables the functionality to preview the XDMFFile produced, which comes in handy\n# when you have an iterative process taking a long time to run and you want to check\n# whether the results provided meet your expectations. If this is not set to true,\n# you cannot read the file produced (e.g. with Paraview) until the program terminates.\n\n#2) 'functions_share_mesh'\n# When enabled it makes all functions share the same mesh and time series.\n\n#3) 'rewrite_function_mesh'\n# If False, this parameter limits each function to one mesh for the complete time\n# series. The Mesh will not be rewritten every time-step.\n\ndef set_xdmf_parameters(f):\n f.parameters['flush_output'] = True\n f.parameters['functions_share_mesh'] = True\n f.parameters['rewrite_function_mesh'] = False\n\n#\n# Setting the number of compartments by the integer found for the key 'Parameter' ,\n# 'N' in the input file.cfg.\n#\nN = int(params.p['Parameter']['N'])\n# f1 - list divergence stress\n# f2 - mass fluid\n# f3 - pressure\n # f4 - list scalar of divergence change of deformation solid\nf1 = [df.XDMFFile(comm, '../data/{}/uf{}.xdmf'.format(data_dir, i)) for i in range(N)]\nf2 = df.XDMFFile(comm, '../data/{}/mf.xdmf'.format(data_dir))\nf3 = [df.XDMFFile(comm, '../data/{}/p{}.xdmf'.format(data_dir, i)) for i in range(N)]\nf4 = df.XDMFFile(comm, '../data/{}/du.xdmf'.format(data_dir))\n#\n# Initialize 'set_xdmf_parameters' for XDMFFiles to be created\n#\n[set_xdmf_parameters(f1[i]) for i in range(N)]\nset_xdmf_parameters(f2)\n[set_xdmf_parameters(f3[i]) for i in range(N)]\nset_xdmf_parameters(f4)\n#\n# dx and ds are predefined measures in dolfin referring to the integration over\n# cells and exterior facets (facets on the boundary), respectively.\n# Since dx and ds can take additional integer arguments, integration over subdomains\n# can be defined by using different variables or integer labels as arguments.\n# In order to map the geometry information stored in the mesh functions to the\n# measures, we will define new measures and for ds we will use the boundary defining\n# mesh function for the subdomains as input.\n#\n# Define new measures associated with exterior boundaries.\ndx = df.Measure(\"dx\")\nds = df.Measure(\"ds\")(subdomain_data=boundaries)\n#\n# Set start variables for the calculations\nsum_fluid_mass = 0\ntheor_fluid_mass = 0\nsum_disp = 0\ndomain_area = 1.0\n#\n# Using the get_params function provided by the ParamParser class, the configuration\n# file provided in self.prams is read and the parameters are stored as dictionary\n# params['name'] = value .\n# In this instance the key 'parameter' is looked up.\n#\nphi = params.p['Parameter'][\"phi\"]\nrho = params.p['Parameter'][\"rho\"]\nqi = params.p['Parameter'][\"qi\"]\ndt = params.p['Parameter'][\"dt\"]\ntf = params.p['Parameter'][\"tf\"]\n#\n# The average error which will be calculated is stored in the list 'avg_error'.\navg_error = []\n#\n# To solve the variational problem the class 'PoroelasticProblem' defined in the\n# the module 'problem' in the package 'Poroelastic' provides the function 'solve()'.\n# The function allows for parallel computing and will return process rank of the\n# computed processes for the communicator or the local machine.\n# Furthermore, the function initializes the tolerance function TOL() from the\n# class 'PoroelasticProblem' and sets it to the value found for the keys 'TOL'\n# in the dictionary created when reading in the configuration file.\n# The maximum number of iterations is set to 100.\n# The current time is set to t = 0.0.\n# dt is initialized as a constant of the function 'set_fluid_variational_form'\n# of the class 'Poroelastic'.\n# The solve() function initiates solving of the fluid mass (Mf), fluid divergence (Uf),\n# the pressure (p), the solid divergence (Us) and the time (t).\n# In the solve() function the NonlinearVariationalProblem for the solid as well as\n# the fluid phase will be executed. For that, as described above, the\n# variational problems are expressed in there variational form stored in\n# the 'set_fluid_variational_form' and the 'set_solid_variational_form' function\n# provided by the module 'problem' in the 'Poroelastic' package.\n# The variational forms are in the 'solve' functions passed as parameter of the\n# dolfin class 'NonlinearVariationalProblem', a class representing a nonlinear\n# variational problem.\n# Besides the variational form, the unknown function has to be passed as parameter,\n# which in out example is represented by mf (fluid mass) or Us (divergence of solid).\n# The other parameters passed to specify boundary conditions and the Jacobiany\n# and the Jacobian are optional.\n# After setting the parameters of the 'NonlinearVariationalProblem' the 'choose_solver'\n# function is initated out of the 'solve' function.\n#\n# The 'choose_solver' function of the 'problem' module allows the user to choose\n# between a direct solver and an iterative one. For that the 'choose_solver' functions\n# goes through the params dictionary created when reading in the configuration file,\n# and if there is a key defined for ' Simulation and solver' defining the value as\n# directed the method of solving the NonlinearVariationalProblem will be chosen to\n# be done directly by executing the function 'direct_solver'.\n# if not running a Simulation, the value and method ' direct' can be ommitted\n# and the variational problem can be solved in iterative manner by executing the\n# function 'iterative_solver'.\n#\n# A main difference between the two approaches is the computational expense. while\n# we will use in this demo the iterative approach, which is less computationally\n# expensive, one could also define in the configuration file the use of the direct\n# approach. While the the direct approach allows solving the problem in one major\n# computational step, requiring a lot of RAM, the indirect method approaches the\n# solution gradually by in smaller steps which require less RAM but create the\n# need for iteration over solved steps.\n# The iterative approach has in consequence the advantage of being faster.\n# Nevertheless, the tolerance estimate wil be defined by the solution from the\n# direct method of the well-defined or well-conditioned problem.\n#\n# A while-loop in the 'solve' function enables ffor running the Simulation\n# for as long as defined in the 'Params' dictionary created when reading the\n# configuration file.\n# The MPI-worker number 1 (rank 0) is then defined to be in charge of printing\n# the time by initiating the function 'print_time'.\n# 'print_time' is a function defined in the 'utils' module of the 'Poroelastic'\n# package.\n# The variables 'iter' (iteration) and 'eps' (error-per-step) are initialized.\n#\n# A second while-loop in the 'solve' function limits the number of maximum_iterations\n# to eps > tol and iter < maxiter. That means, that the iteration over the equations\n# is done as long as the error per step (eps) is above the threshold for the tolerance\n# (tol) and the maximum number of iterations set has not been exceeded.\n#\n# During each iteration the latest calculated pressure function 'p' is assigned to\n# the function 'mf_'. For that, the dolfin function .assign() is used, allowing\n# to assign one function to another.\n# The variational problem for the solid, for the fluid and for the fluid_solid_coupling (defining 'p')\n# are initiated respectively.\n# The error variable 'e' is then calculated by first substratingthe pressure variable\n# 'mf' assigned from the iteration step before from the current 'p[0]' .\n# The error 'eps' is in the next step evaluated by the square of the error \"e\"\n# following the L2 -norm.\n# If the condition eps > tol and iter < maxiter still apply, the while loop of the\n# 'solve' function is not broken.\n# After breaking out of the itaeration determining while - loop, the current Solution\n# for the functions 'mf' and 'Us' are stored as the previous solutions\n# using the dolfin.assign() function.\n#\n# In the next step, the Lagrangian Darcy flow vector, in short referred to as fluid vector,\n# is calculated by initiating the function 'calculate_flow_vector' of the\n# class 'PoroelasticProblem'. For solving this variational problem, the 'solver_parameters'\n# are set to the iterative solver 'minres' with the precondiitoner 'hypre_amg'\n# the same as set as default in the 'iterative_solver' function.\n# The 'solve' function, or in this case even defined as a python generator by using the\n# 'yield' statement instead of the 'return' statement, yields the objects\n# 'self.mf', 'self.Uf', 'self.p', 'self.Us', 't'.\n# 'yield' allows for the local variables being created to be kept and prevents\n# the function from exiting. This way for the simulation, yield allows us to\n# produce a sequence of values. This facilitates iterating over the sequence of solutions created,\n# but does not require us to store the entire sequence in memory as the 'return'\n# statement would.\n# In the next step the mesh is moved by initiating the 'move_mesh' function.\n# The 'move_mesh' function is defined in the 'Problem' module. It takes advantage\n# of the 'ALE.move' class returning the projection of the components of the function\n# 'dU' onto the VectorFunctionSpace.\n# In the last step the the time print statement is updated by adding dt to the current\n# time.\n# An additional print statement after exiting the while-loop is added to avoid\n# overwriting the time print statements when the next output is printed.\n#\n# Moving back to the 'demo_inflate' , we are using the dolfin provided 'split()'\n# function to extract the subfunctions of 'Us' extracting sub functions.\n#\n# The results will be written to the XDMFFiles created earlier, using the 'write_file'\n# function of the 'utils' module. This function itself will initiate the DOLFIN\n# provided 'set_log_level' function, deciding which messages routed through the logging\n# system will be printed to the console. Calling the function 'set_log_level', we can\n# specify the log level of the messages printed by setting the value for the Optionally# integer argument.\n# In our example it is set to 40, meaning with the default level being 20, only messages\n# higher than or equal to the set log level will be printed.\n# next, the 'write_checkpoint' allows for saving a function to an XDMFFile for checkpointing,\n# taking in the parameters of the function to save, the name (label) of the function used,\n# and the time step.\n# Last, the log level is increased by setting the integer to 30, allowing for\n# for messages to be printed.\n#\n# Next in the for loop, the solutions for the domain_area, the sum of the fluid mass\n# (sum_fluid_mass), the theoretical fluid mass (theor_fluid_mass), the theoretical\n# solution for the fluid mass in the domain (theor_sol), the sum of the dispersion\n# (sum_disp) and the average error (avg_error) are computed.\n# The use of the dolfin function 'assemble' returns depending on the input,\n# a scalar value, a vector, a matrix or a higher rank tensor (in our case a scalar\n# or a matrix).\n#\n# The 'avg_error' saves the error according errornorm L2 and normalized by the\n# theoretical solution 'theor_sol'. The 'avg_error' values are appended to a list.\n#\n# As long as the for loop continues, the theoretical solution and the currently\n# approximated solution of the sum of the fluid mass are printed to the screen.\n#\n# Upon exiting the for loop, the XDMFFiles created are closed by calling the\n# 'close()' function.\n# domain_area scalar value\n# sum_fluid_mass scalar values\n# theor_fluid_mass scalar value\n# sum_dis Matrix\n# avg_error scalar values\n\nfor Mf, Uf, p, Us, t in pprob.solve():\n\n dU, L = Us.split(True)\n\n [poro.write_file(f1[i], Uf[i], 'uf{}'.format(i), t) for i in range(N)]\n poro.write_file(f2, Mf, 'mf', t)\n [poro.write_file(f3[i], p[i], 'p{}'.format(i), t) for i in range(N)]\n poro.write_file(f4, dU, 'du', t)\n\n domain_area += df.assemble(df.div(dU)*dx)*(1-phi)\n sum_fluid_mass += df.assemble(Mf*dx)\n theor_fluid_mass += qi*rho*dt\n theor_sol = theor_fluid_mass*domain_area\n sum_disp += df.assemble(dU[0]*ds(4))\n avg_error.append(np.sqrt(((df.assemble(Mf*dx)-theor_sol)/theor_sol)**2))\n print(theor_sol, df.assemble(Mf*dx))\n\n[f1[i].close() for i in range(N)]\nf2.close()\n[f3[i].close() for i in range(N)]\nf4.close()\n#\n# The final error is calculated by normalizing the avg_error by the number of elements\n# in the list of errors.\n#\nerror = sum(avg_error)/len(avg_error)\n#\n# The function 'write_config' inherited by the class 'ParamParser' of the module\n# param_parser is executed on the configuration files to be created.\n#\nparams.write_config('../data/{}/{}.cfg'.format(data_dir, data_dir))\n#\n# Finally, the result for the expected sum fluid mass, the calculated sum of the\n# fluid mass and the average error over all time steps are ptinted to the screen.\n#\nprint(\"Expected sum fluid mass: {}\".format(theor_fluid_mass))\nprint(\"Sum fluid mass: {}\".format(sum_fluid_mass))\nprint(\"Average error over all time steps: {}\".format(error))\n","sub_path":"demo/demo_inflate_fenicsstyle.py","file_name":"demo_inflate_fenicsstyle.py","file_ext":"py","file_size_in_byte":25503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"134854337","text":"import cv2\r\n\r\n# Playing video from file:\r\nvidcap = cv2.VideoCapture(\"C:\\\\Nikhil's Stuff\\\\B.E Project\\\\Final Dataset\\\\Fighting028_x264.mp4\")\r\ncurrentFrame = 0\r\nt1 = vidcap.get(cv2.CAP_PROP_FPS)\r\nt2 = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)\r\n\r\ntime = t2 / t1;\r\ntime *= 1000\r\ntime_frame = 0\r\n\r\nwhile time_frame < time:\r\n # Capture frame-by-frame\r\n ret, frame = vidcap.read()\r\n\r\n # Saves image of the current frame in jpg file\r\n name = 'img' + str(currentFrame) + '.jpg'\r\n cv2.imwrite(name, frame)\r\n time_frame += 200\r\n vidcap.set(cv2.CAP_PROP_POS_MSEC,time_frame) \r\n\r\n # To stop duplicate images\r\n currentFrame += 1\r\n print('Creating....' +name)\r\n\r\n# When everything done, release the capture\r\nvidcap.release()\r\ncv2.destroyAllWindows()\r\n\r\n \r\n\r\n","sub_path":"extract_frames.py","file_name":"extract_frames.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"30888301","text":"#!/usr/bin/env python3\nimport numpy\n\n#constants\nview = range(-2, 3)\nearshot = range(-4, 5)\ndeadzone = 1\nstart = [12, 25]\nnumpy.random.seed(12345)\nalpha = .001\ndimX = 33\ndimY = 2\ndimR = 4\ndimZ = 10\ndimIn = dimX + dimY + dimR\n\n#build array of world map\nworld = numpy.ones((25, 50))\n\n'''\n [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n\n [ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],\n\n [ 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n \n [ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n'''\n\n#set location data\npos = [start[0], start[1]]\n\n#mainatin record of last input, output, reward and state\nlastX = numpy.zeros((dimX))\nlastY = numpy.zeros((dimY))\nlastR = numpy.zeros((dimR))\nlastZ = numpy.zeros((dimZ))\n\n#initialize the weights of the network\nWi = 2 * numpy.random.rand(dimZ, dimZ+dimIn+1) -1\nWo = 2 * numpy.random.rand(dimIn, dimZ+1) -1\n\n#convert a vector into a cardnial direction\ndef cardinal(Y):\n if numpy.linalg.norm(Y) < deadzone:\n return [ 0, 0]\n angle = numpy.arctan2(Y[1], Y[0])\n if -7*numpy.pi/8 <= angle < -5*numpy.pi/8:\n return [-1,-1]\n elif -5*numpy.pi/8 <= angle < -3*numpy.pi/8:\n return [ 0,-1]\n elif -3*numpy.pi/8 <= angle < -numpy.pi/8:\n return [ 1,-1]\n elif -numpy.pi/8 <= angle < numpy.pi/8:\n return [ 1, 0]\n elif numpy.pi/8 <= angle < 3*numpy.pi/8:\n return [ 1, 1]\n elif 3*numpy.pi/8 <= angle < 5*numpy.pi/8:\n return [ 0, 1]\n elif 5*numpy.pi/8 <= angle < 7*numpy.pi/8:\n return [-1, 1]\n else:\n return [-1, 0]\n\n#render output of world\ndef draw():\n for x in range(len(world)):\n string = ''\n for y in range(len(world[0])):\n if pos[0] == x and pos[1] == y:\n string += '@ '\n elif world[x][y] == -1:\n string+='# '\n elif world[x][y] == -2:\n string+='* '\n elif world[x][y] == 1:\n string+='. '\n elif world[x][y] == 0:\n string+=' '\n else:\n string+=str(world[x][y])\n print('| ' + string + '|')\n\n#activation function\ndef nonLinearFunction(x):\n if x>0:\n return numpy.log(1+x)\n else:\n return -numpy.log(1-x)\nnlf = numpy.vectorize(nonLinearFunction)\n\n#derivative of the activation function\ndef nonLinearDerivative(x):\n if x>0:\n return 1/(1+x)\n else:\n return 1/(1-x)\nnld = numpy.vectorize(nonLinearDerivative)\n\n#process an output to compute the new inputs and rewards\ndef process(Y):\n #allow update of position\n global pos\n\n #calculate movement direction\n direction = cardinal(Y)\n\n #move\n lastPos = pos\n newPos = [pos[0] + direction[0], pos[1] + direction[1]]\n if 0 > newPos[0] or 0 > newPos[1] or len(world) <= newPos[0] or len(world[0]) <= newPos[1]:\n newPosValue = -1\n else:\n newPosValue = world[newPos[0]][newPos[1]]\n if newPosValue >= 0:\n pos = newPos\n world[newPos[0]][newPos[1]] = 0\n\n ### X ###\n\n #get values of the world map in view\n X1 = []\n for i in view:\n for j in view:\n if 0 > pos[0]+i or 0 > pos[1]+j or len(world) <= pos[0]+i or len(world[0]) <= pos[1]+j:\n value = -1\n else:\n value = world[pos[0] + i][pos[1] + j]\n X1.append(value)\n\n #note nearby preditors and prey\n pred = []\n prey = []\n\n #search for preditors and prey in earshot\n for i in earshot:\n for j in earshot:\n if 0 > pos[0]+i or 0 > pos[1]+j or len(world) <= pos[0]+i or len(world[0]) <= pos[1]+j:\n value = -1\n else:\n value = world[pos[0] + i][pos[1] + j]\n if value == 1:\n prey.append([i, j])\n elif value == -2:\n pred.append([i, j])\n\n #find closest preditor in earshot\n closePred = [[0, 0], 0]\n for p in pred:\n norm = numpy.linalg.norm(p)\n if 1/norm > closePred[1]:\n closePred = [p, 1/norm]\n\n #find closest prey in earshot\n closePrey = [[0, 0], 0]\n for p in prey:\n norm = numpy.linalg.norm(p)\n if 1/norm > closePrey[1]:\n closePrey = [p, 1/norm]\n\n #record nearest preditor and prey\n X2 = [closePred[0][0], closePred[0][1], closePrey[0][0], closePrey[0][1]]\n\n #get actual movement\n X3 = numpy.subtract(pos, lastPos)\n\n #get global location\n X4 = numpy.subtract(pos, start)\n\n #put all inputs together\n X = numpy.concatenate((X1, X2, X3, X4))\n\n ### R ###\n\n #reward for getting food\n R1 = [newPosValue]\n \n #reward for proximity to food\n R2 = [closePrey[1], -closePred[1]]\n\n #punishment for trying to move too fast\n R3 = [-numpy.square(numpy.linalg.norm(Y))]\n\n #put all rewards together\n R = numpy.concatenate((R1, R2, R3))\n\n ### END ###\n\n #return the pair of inputs for next cycle\n return [X, R]\n\n#accept commands for stepping speed and exiting\ncmd = ''\nsteps = 0\n\n#loop until command Q given\nwhile True:\n #draw the world\n print('OUTPUT: ' + str(lastY))\n print('REWARD: ' + str(lastR))\n draw()\n\n #get input for commands\n cmd = input('CMD: ')\n\n #chech command\n if cmd == 'q':\n exit()\n elif cmd == 'w':\n print('Wi:')\n print(Wi)\n print('Wo:')\n print(Wo)\n else:\n #try to adjust step speed\n try:\n steps = int(cmd)\n except:\n pass\n\n #loop over number of steps before drawing output\n for i in range(steps):\n #build input vector\n In = numpy.concatenate(([1], lastX, lastY, lastR, lastZ))\n \n #calculate internal node values\n Z = nlf(Wi @ In)\n dZ = numpy.diag(nld(Wi @ In))\n\n #add bias value for hidden nodes\n Hid = numpy.concatenate(([1], Z))\n\n #calculate output predictions\n Out = nlf(Wo @ Hid)\n dOut = numpy.diag(nld(Wo @ Hid))\n\n #separate output into X, Y and R predictions\n preX = Out[:dimX]\n preY = Out[dimX:dimX + dimY]\n preR = Out[dimX + dimY:]\n\n #calculate reward gradient\n dOutdIn = dOut @ Wo[:,1:] @ dZ @ Wi[:,1:]\n dYdR = dOutdIn[dimX + dimY:, dimZ + dimX:dimZ + dimX + dimY]\n\n #calculate optimal output based on prediction and reward gradient\n Y = preY + alpha*numpy.sum(dYdR, 0)\n\n #use actual output to move and calculate inputs and reward\n [X, R] = process(Y)\n\n #concatinate real values for training\n Target = numpy.concatenate((X, Y, R))\n\n #calculate the error\n dEdWo = numpy.outer((Out-Target) @ dOut, Hid)\n dEdWi = numpy.outer((Out-Target) @ dOut @ Wo[:,1:] @ dZ, In)\n\n #update weigths\n Wo -= alpha*dEdWo\n Wi -= alpha*dEdWi\n\n #save values for next iteration\n lastX = X\n lastY = Y\n lastR = R\n lastZ = Z\n","sub_path":"Archive/trial-01.py","file_name":"trial-01.py","file_ext":"py","file_size_in_byte":7724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"53440765","text":"from .orbitcontents import OrbitContent\nfrom .tables import WorldClimate\n\n\nclass AsteroidBelt(OrbitContent):\n \"\"\"Class for asteroid belts.\"\"\"\n def __init__(self, primarystar, orbitalradius):\n OrbitContent.__init__(self, primarystar, orbitalradius)\n self.makeresources()\n self.makesurftemp()\n self.makeclimate()\n self.__habitability = 0\n self.__affinity = self.__habitability + self.__rvm\n\n def __repr__(self):\n return repr(\"Asteroid Belt\")\n\n def type(self):\n return \"Ast. Belt\"\n\n def printinfo(self):\n print(\"Asteroid Belt {}\".format(self.getName()))\n print(\" Orbit:\\t{}\".format(self.getOrbit()))\n print(\" Orb Per:\\t{}\".format(self.getPeriod()))\n print(\" Orb Ecc:\\t{}\".format(self.getEcc()))\n print(\" RVM:\\t{}\".format(self.__rvm))\n print(\" Res. V:\\t{}\".format(self.__resources))\n print(\" Aff.:\\t{}\".format(self.__affinity))\n print(\"\")\n\n def makeresources(self):\n dice = self.roll(3,0)\n rvm = -5\n value = 'Worthless'\n if dice == 4:\n rvm = -4\n value = 'Very Scant'\n if dice == 5:\n rvm = -3\n value = 'Scant'\n if dice >= 6 and dice <= 7:\n rvm = -2\n value = 'Very Poor'\n if dice >= 8 and dice <= 9:\n rvm = -1\n value = 'Poor'\n if dice >= 10 and dice <= 11:\n rvm = 0\n value = 'Average'\n if dice >= 12 and dice <= 13:\n rvm = 1\n value = 'Abundant'\n if dice >= 14 and dice <= 15:\n rvm = 2\n value = 'Very Abundant'\n if dice == 16:\n rvm = 3\n value = 'Rich'\n if dice == 17:\n rvm = 4\n value = 'Very Rich'\n if dice == 16:\n rvm = 5\n value = 'Motherlode'\n self.__rvm = rvm\n self.__resources = value\n\n def makesurftemp(self):\n self.__avsurf = self.getBBTemp() * 0.97\n\n def getAvSurf(self):\n return self.__avsurf\n\n def makeclimate(self):\n self.__climate = WorldClimate(self.getAvSurf())\n\n def getClimate(self):\n return self.__climate\n\n def getResources(self):\n return self.__resources\n\n def getRVM(self):\n return self.__rvm\n\n def getAffinity(self):\n return self.__affinity\n","sub_path":"gurpsspace/asteroidbelt.py","file_name":"asteroidbelt.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"76254831","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 30 19:50:51 2018\n\n@author: shotaro\n\"\"\"\n\nimport glfw\nimport numpy\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\ndef drawAxis(): \n lim = 100\n glLineWidth(2)\n \n glBegin(GL_LINES)\n glColor(1.0,0,0)\n glVertex3d(lim,0,0)\n glVertex3d(0,0,0)\n \n glColor(0,1.0,0)\n glVertex3d(0,0,0)\n glVertex3d(0,lim,0)\n \n glColor(0,0,1)\n glVertex3d(0, 0,0)\n glVertex3d(0, 0,lim)\n glEnd()\n \ndef drawPlane():\n glLineWidth(1)\n glColor(1,1,1)\n glBegin(GL_LINES)\n for i in range(10):\n for j in range(10):\n if i == 0 or j == 0:\n continue\n glVertex3d(i,j,0)\n glVertex3d(-i,j,0)\n \n glVertex3d(i,-j,0)\n glVertex3d(-i,-j,0)\n \n glVertex3d(i,j,0)\n glVertex3d(i,-j,0)\n \n glVertex3d(-i,j,0)\n glVertex3d(-i,-j,0)\n glEnd()\n\ndef drawCircle(r,lim = 24):\n glLineWidth(5)\n glColor3f(0.2, 0.1, 0.8)\n glBegin(GL_LINE_LOOP)\n for i in range(lim):\n x = r * numpy.cos(numpy.pi * 2 / lim * i)\n y = r * numpy.sin(numpy.pi * 2 / lim * i)\n glVertex3d(x,y,0)\n glEnd()\n\ndef drawCube(f):\n glLineWidth(5)\n glColor3f(0.5, 0.0, 1.0)\n glBegin(GL_LINE_LOOP)\n glVertex3d(-f, -f, -f)\n glVertex3d(-f, f, -f)\n glVertex3d( f, f, -f)\n glVertex3d( f, -f, -f)\n glEnd()\n \n glBegin(GL_LINE_LOOP)\n glVertex3d(-f, -f, f)\n glVertex3d(-f, f, f)\n glVertex3d( f, f, f)\n glVertex3d( f, -f, f)\n glEnd()\n \n glBegin(GL_LINES)\n glVertex3d(-f, -f, -f)\n glVertex3d(-f, -f, f)\n \n glVertex3d(-f, f, -f)\n glVertex3d(-f, f, f)\n\n glVertex3d( f, -f, -f)\n glVertex3d( f, -f, f)\n\n glVertex3d( f, f, -f)\n glVertex3d( f, f, f)\n glEnd()\n \ndef init():\n glLineWidth(3)\n glEnable(GL_LINE_SMOOTH);\n glClearColor(0.2, 0.1, 0.2, 1.0)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n return True\n\ndef draw():\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glEnable(GL_DEPTH_TEST)\n \n drawPlane()\n drawAxis()\n \n drawCube(1)\n drawCircle(4,lim=16)\n\ndef key_cb(window,key,scancode,action,mods):\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(window,True)\n\ndef main():\n if not glfw.init():\n return\n\n # Create a windowed mode window and its OpenGL context\n window = glfw.create_window(640, 480, \"Hello World\", None, None)\n \n if not window:\n glfw.terminate()\n return\n \n # Make the window's context current\n glfw.make_context_current(window)\n \n state = init()\n if not state:\n return \n \n glfw.set_key_callback(window,key_cb)\n\n theta = 45\n rad = 5\n \n t = 0\n # Loop until the user closes the window\n while not glfw.window_should_close(window):\n# theta += 0.5\n# t += 1\n if theta > 180:\n theta = theta % 180\n \n x = numpy.cos(numpy.deg2rad(theta)) * rad\n y = numpy.sin(numpy.deg2rad(theta)) * rad\n \n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(x, y, 5, 0, 0, 0, 0, 0, 1)\n# glRotate(t,1,0,0)\n draw()\n \n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glFrustum(-1, 1, -1, 1, 1, 20)\n# glOrtho(-5, 5, -5, 5, 1, 20)\n\n # Swap front and back buffers\n glfw.swap_buffers(window)\n\n # Poll for and process events\n glfw.poll_events()\n\n glfw.terminate()\n\nif __name__ == \"__main__\":\n main()","sub_path":"gl/mio-sample.py","file_name":"mio-sample.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"612550467","text":"import socket,datetime#Module\r\ns = socket.socket()\r\nhost = socket.gethostname()\r\n'''\r\nCreating a object\r\n'''\r\nport = 6380 #Connecting to Port\r\ns.connect((host , port))\r\n'''\r\nHost of your ip adress or Your machine hostname\r\n'''\r\nz = \"Hi Server\"\r\ns.sendall(z.encode())#Sending Message\r\nprint(\"---Dred Client---\")\r\nprint(s.recv(1024))\r\nprint(\"Time of Connection - \",datetime.datetime.now())\r\n'''\r\nSending Messages in bytes with encoding \r\n'''\r\nwhile True: #To Send Messages to Server\r\n r = input(\"\\nType Messages to Send \")\r\n s.sendall(r.encode())\r\ns.close()","sub_path":"CLeint.py","file_name":"CLeint.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"386550171","text":"'''\nAuther: GD\n27.04.2017\nProgramm compress all *.tiff and *.tif files to file_name.tiff.zip file in the same directory and log all \nactions into log.txt file with path, date and time\n'''\nimport os\nfrom zipfile import *\nfrom datetime import datetime\n\n\ndef get_path():\n # folder_path = str(input('Enter folder path: \\n')) + '\\\\'\n # log_path = str(input('Enter log folder path: \\n')) + '\\\\'\n folder_path = 'D:\\\\inc\\\\tif\\\\'\n log_path = 'D:\\\\inc\\\\'\n return folder_path, log_path\n\n\ndef recursive_folder_list(path):\n for i in os.listdir(path):\n full_path = os.path.join(path, i)\n if os.path.isfile(full_path):\n if i.lower().endswith('.tif') or i.lower().endswith('.tiff'):\n compress_files(full_path, i)\n else:\n recursive_folder_list(full_path)\n\n\ndef compress_files(path, name):\n zip_archive = ZipFile(path + '.zip', 'w', ZIP_BZIP2, True)\n zip_archive.write(path, arcname=name)\n logging(path)\n zip_archive.close()\n remove_tiff_file(path)\n\n\ndef remove_tiff_file(path):\n os.remove(path)\n\n\ndef start_log():\n f = open(log_path + 'CompressTIFF Log file.txt', 'a')\n f.write(\n '\\n' + '--- CompressTIFF started in ' + str(\n datetime.now().strftime('%d-%m-%Y %H:%M:%S')) + ' ---' + '\\n' + '\\n')\n f.close()\n\n\ndef logging(path):\n f = open(log_path + 'CompressTIFF Log file.txt', 'a')\n f.write(str(datetime.now().strftime('%H:%M:%S')) + ' ' + path + '\\n')\n f.close()\n\n\ndef end_log():\n f = open(log_path + 'CompressTIFF Log file.txt', 'a')\n f.write(\n '\\n' + '--- CompressTIFF ended in ' + str(datetime.now().strftime('%d-%m-%Y %H:%M:%S')) + ' ---' + '\\n' + '\\n')\n f.close()\n\n\nif __name__ == \"__main__\":\n folder_path, log_path = get_path()\n start_log()\n recursive_folder_list(folder_path)\n end_log()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"228298724","text":"import math\nimport os\nimport logging\nimport random\nfrom pathlib import Path\n\nimport dask\nimport more_itertools\nfrom pycocotools.coco import COCO\n\nfrom falconcv.ds.image import TaggedImage,BoxRegion,PolygonRegion\nfrom falconcv.util import FileUtil,ImageUtil\n\nlogger=logging.getLogger(__name__)\nfrom falconcv.ds.dataset import DatasetDownloader\n\n\nclass Coco(DatasetDownloader):\n def __init__(self, v = 2017):\n super(Coco, self).__init__()\n assert v == 2017, \"version not supported\"\n config_path,_=os.path.splitext(__file__)\n self._annotations_json_file = None\n self._v = v\n self._coco_api_client = None\n self._remote_dep = {\n \"annotations_file_uri\": \"http://images.cocodataset.org/annotations/annotations_trainval2017.zip\"\n }\n\n def _create_box_rois(self,image_info,class_name):\n regions=[]\n try:\n category_id=self.labels_map[class_name]\n annotations_ids=self._coco_api_client.getAnnIds(imgIds=image_info['id'],catIds=category_id)\n annotations_info=self._coco_api_client.loadAnns(ids=annotations_ids)\n for ann_info in annotations_info:\n r=BoxRegion()\n bb=ann_info['bbox']\n r.shape_attributes[\"x\"]=math.ceil(bb[0])\n r.shape_attributes[\"y\"]=math.ceil(bb[1])\n r.shape_attributes[\"width\"]=math.ceil(bb[2])\n r.shape_attributes[\"height\"]=math.ceil(bb[3])\n r.region_attributes[\"name\"]=class_name\n regions.append(r)\n except Exception as ex:\n print(ex)\n return regions\n\n def _create_polygon_rois(self,image_info,class_name):\n regions=[]\n category_id=self.labels_map[class_name]\n annotations_ids=self._coco_api_client.getAnnIds(imgIds=image_info['id'],catIds=category_id)\n annotations_info=self._coco_api_client.loadAnns(ids=annotations_ids)\n for ann_info in annotations_info:\n polygons =ann_info['segmentation']\n if len(polygons) > 0:\n for polygon in polygons:\n all_x, all_y=[], []\n for i in range(0,len(polygon),2):\n try:\n if isinstance(polygon[i], float) and \\\n isinstance(polygon[i+1],float):\n all_x.append(math.ceil(polygon[i]))\n all_y.append(math.ceil(polygon[i+1]))\n except Exception:\n pass\n if len(all_x) > 0 and len(all_y) > 0:\n bb = ann_info['bbox']\n r=PolygonRegion()\n r.shape_attributes[\"all_points_x\"]=all_x\n r.shape_attributes[\"all_points_y\"]=all_y\n r.shape_attributes[\"x\"] = math.ceil(bb[0])\n r.shape_attributes[\"y\"] = math.ceil(bb[1])\n r.shape_attributes[\"width\"] = math.ceil(bb[2])\n r.shape_attributes[\"height\"] = math.ceil(bb[3])\n r.region_attributes[\"name\"] = class_name\n regions.append(r)\n return regions\n\n @dask.delayed\n def _fetch_single_image(self, img_info, image_id, image_label):\n try:\n img_uri = img_info[\"coco_url\"]\n if FileUtil.exists_http_file(img_uri):\n img_arr = ImageUtil.url2img(img_uri)\n tagged_image=TaggedImage(img_arr)\n tagged_image.id=image_id\n if self.task == \"detection\":\n tagged_image.regions = self._create_box_rois(img_info, image_label)\n elif self.task == \"segmentation\":\n tagged_image.regions=self._create_polygon_rois(img_info,image_label)\n return tagged_image\n except Exception as ex:\n print(ex)\n logger.error(\"error downloading the image with id {} : {}\".format(image_id,ex))\n return None\n\n def fetch(self,n=None,labels=None,batch_size: int = 200):\n try:\n assert self._coco_api_client, \"did you forget to call the setup method?\"\n labels =list(map(lambda l: l.capitalize(),labels))\n valid_labels= {name: id for name, id in self.labels_map.items() if name.capitalize() in labels}\n for class_name, class_id in valid_labels.items():\n logger.info(\"downloading images for : {}\".format(class_name))\n images_ids=self._coco_api_client.getImgIds(catIds=class_id)\n if n:\n count=min(n,len(images_ids))\n images_ids=random.sample(images_ids,count)\n number_of_batches = math.ceil(len(images_ids) / batch_size)\n for i,batch_ids in enumerate(more_itertools.chunked(images_ids,batch_size)):\n images_batch=self._coco_api_client.loadImgs(ids=batch_ids)\n delayed_tasks=[]\n for img_info, image_id in zip(images_batch, batch_ids):\n delayed_tasks.append(\n self._fetch_single_image(\n img_info,\n image_id,\n class_name\n )\n )\n logger.info(\"downloading batch {}/{}\".format(i+1,number_of_batches))\n if len(delayed_tasks) > 0:\n results=dask.compute(*delayed_tasks)\n results=[img for img in results if img]\n yield results\n del results\n except Exception as ex:\n logger.exception(\"Error fetching the images : {} \".format(ex)) # in case something wrong happens\n raise ex\n\n def setup(self, split=\"train\", task=\"detection\"):\n try:\n assert task == \"detection\" or task==\"segmentation\",\"task not supported\"\n assert split in [\"train\",\"validation\"],\"invalid split parameter\"\n super(Coco, self).setup(split, task)\n ann_zip_file: Path =self._dependencies[\"annotations_file_uri\"]\n ann_folder=ann_zip_file.parent.joinpath(\"annotations\")\n ann_file_prefix=\"train\" if split == \"train\" else \"val\"\n ann_file=ann_folder.joinpath(\"instances_{}{}.json\".format(ann_file_prefix, self._v))\n self._coco_api_client=COCO(str(ann_file))\n cat_ids=self._coco_api_client.getCatIds()\n cats_info=self._coco_api_client.loadCats(cat_ids)\n self.labels_map={cat['name']:cat['id'] for cat in cats_info}\n self.slabels_map = self.labels_map\n except Exception as ex:\n logger.error(\"Error preparing the dataset : {} \".format(ex))\n raise ex\n\n\n","sub_path":"falconcv/ds/coco.py","file_name":"coco.py","file_ext":"py","file_size_in_byte":6895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"200762782","text":"# 3. Equal Sum Partitioning problem\n\ndef subset(arr, sum, n):\n t= [[False for j in range(sum+1)] for i in range(n+1)]\n\n for j in range(sum+1):\n t[0][j] = False\n\n for i in range(n+1):\n t[i][0] = True\n\n for i in range(1, n+1):\n for j in range(1, sum+1):\n if arr[i-1] > j:\n t[i][j] = subset(arr, j, i-1)\n\n else:\n t[i][j]= subset(arr, j - arr[i-1], i-1) or subset(arr, j, i-1)\n\n return t[n][sum]\n\narr = [5,11,5, 3]\nn = len(arr)\ntotalSum = sum(arr)\n\nif totalSum % 2 != 0:\n print(\"False\")\nelse:\n print(subset(arr,int(totalSum/2), n))","sub_path":"01Knapsack/EqualSumPartitioningProblem.py","file_name":"EqualSumPartitioningProblem.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"357651736","text":"import tkinter as tk\nimport random\n\ndef dispLabel():\n kuji=[\"daikiti\",\"tyukiti\",\"syokiti\",\"kyo\"]\n lbl.configure(text=random.choice(kuji))\n\nroot = tk.Tk()\nroot.geometry(\"200x100\")\n\nlbl = tk.Label(text=\"LABLE\")\nbtn = tk.Button(text=\"PUSH\",command=dispLabel)\n\nlbl.pack()\nbtn.pack()\ntk.mainloop()","sub_path":"workspace/python/app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"124792644","text":"import operator\nimport pandas as pd\nfrom pandas import DataFrame\nimport numpy as np\nimport itertools\nfrom fractions import Fraction\nfrom functools import reduce\n\nclass PatternGenerator(DataFrame):\n \n _metadata = ['pattern_definitions', 'pattern_results']\n\n def __init__(self, *args, **kwargs):\n self.pattern_definitions = None\n self.pattern_results = None\n super(PatternGenerator, self).__init__(*args, **kwargs)\n\n def __setattr__(self, attr, val):\n # have to special case b/c pandas tries to use as column\n if attr == 'pattern_definitions':\n object.__setattr__(self, attr, val)\n elif attr == 'pattern_results':\n object.__setattr__(self, attr, val)\n else:\n super(PatternGenerator, self).__setattr__(attr, val)\n\n def generate_patterns(self, *args, **kwargs):\n append = kwargs.pop('append', False)\n if append == True and self.pattern_definition is not None:\n # from struct or from pattern\n self.pattern_definitions.extend(list(generate(dataframe = self, *args, **kwargs)))\n self.pattern_definitions = list(set(self.pattern_definitions))\n else:\n self.pattern_definitions = list(generate(dataframe = self, *args, **kwargs))\n\n# equivalence -> reported together\ndef logical_equivalence(*c):\n \"\"\"Operator definition of logical equivalence taking two parameters\n \"\"\"\n nonzero_c1 = (c[0] != 0)\n nonzero_c2 = (c[1] != 0)\n return ((nonzero_c1 & nonzero_c2) | (~nonzero_c1 & ~nonzero_c2))\n\n# implication\ndef logical_implication(*c):\n \"\"\"Operator definition of logical implication taking two parameters\n \"\"\"\n nonzero_c1 = (c[0] != 0)\n nonzero_c2 = (c[1] != 0)\n return ~(nonzero_c1 & ~nonzero_c2)\n\ndef logical_or(*c):\n \"\"\"Operator definition of logical or taking two parameters\n \"\"\"\n nonzero_c1 = (c[0] != 0)\n nonzero_c2 = (c[1] != 0)\n return (nonzero_c1 | nonzero_c2)\n\ndef logical_and(*c):\n \"\"\"Operator definition of logical and taking two parameters\n \"\"\"\n nonzero_c1 = (c[0] != 0)\n nonzero_c2 = (c[1] != 0)\n return (nonzero_c1 & nonzero_c2)\n\noperators = {'>' : operator.gt,\n '<' : operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '=' : operator.eq,\n '!=': operator.ne,\n '<->': logical_equivalence,\n '-->': logical_implication}\n\npreprocess = {'>': operator.and_,\n '<': operator.and_,\n '>=': operator.and_,\n '<=': operator.and_,\n '=' : operator.and_,\n '!=': operator.and_,\n '<->': operator.or_,\n '-->': operator.or_,\n 'sum': operator.and_,\n 'ratio': operator.and_,\n 'interval': operator.and_}\n\ndef derive_pattern_statistics(co):\n \"\"\"Pattern statistics:\n co_sum: support (number of confirmations)\n ex_sum: number of exceptions\n conf : confidence\n \"\"\"\n co_sum = co.sum()\n ex_sum = (~co).sum()\n conf = np.round(co_sum / (co_sum + ex_sum), 4)\n # oddsratio is a correlation measure\n #oddsratio = (1 + co_sum) / (1 + ex_sum)\n return co_sum, ex_sum, conf #, oddsratio\n\ndef derive_pattern_data(dataframe, \n P, \n Q,\n pattern, \n co, \n confidence, \n include_co,\n include_ex, \n data_filter):\n \"\"\"Derives the pattern data of a single pattern\n Output: [[pattern, P, Q], co_sum, ex_sum, conf (,confirmation)(,exceptions)]\n \"\"\"\n\n data = list()\n # pattern statistics\n co_sum, ex_sum, conf = derive_pattern_statistics(co)\n # we only store the rules with confidence higher than conf\n if conf >= confidence:\n data = [[pattern, P, Q], co_sum, ex_sum, conf]\n if include_co:\n if data_filter is None:\n data.extend([list(dataframe.index[co])])\n else:\n data.extend([list(dataframe.index[data_filter][co])])\n if include_ex:\n if data_filter is None:\n data.extend([list(dataframe.index[~co])])\n else:\n data.extend([list(dataframe.index[data_filter][~co])])\n return data\n\ndef get_parameters(parameters):\n \"\"\"Extract parameters from parameters list\n \"\"\"\n\n confidence = parameters.get(\"min_confidence\", 0.75)\n support = parameters.get(\"min_support\", 1)\n include_co = parameters.get(\"include_co\", False)\n include_ex = parameters.get(\"include_ex\", False)\n\n return confidence, support, include_co, include_ex\n\n\ndef patterns_column_value(dataframe = None, \n pattern = None,\n columns = None,\n value = None,\n parameters= {}):\n \"\"\"Generate patterns of the form \"[c1] operator value\" where c1 is in columns\n \"\"\"\n\n confidence, support, include_co, include_ex = get_parameters(parameters)\n\n data_array = dataframe.values.T\n \n for c in columns:\n # confirmations and exceptions of the pattern, a list of booleans\n co = reduce(operators[pattern], [data_array[c, :], 0])\n pattern_data = derive_pattern_data(dataframe,\n dataframe.columns[c],\n value,\n pattern, \n co, \n confidence,\n include_co, \n include_ex, None)\n if pattern_data and len(co) >= support:\n yield pattern_data\n \n\ndef patterns_column_column(dataframe = None,\n pattern = None,\n P_columns = None, \n Q_columns = None, \n parameters = {}):\n \"\"\" Generate patterns of the form {[c1] operator [c2]} where c1 and c2 in df.columns\n operators:\n '=' -> patterns in equal values in columns\n '<' -> patterns in lower values in columns\n '>' -> patterns in greater values in columns\n '<->' -> patterns in datapoints that are reported together\n \"\"\"\n \n confidence, support, include_co, include_ex = get_parameters(parameters)\n\n preprocess_operator = preprocess[pattern]\n \n initial_data_array = dataframe.values.T\n # set up boolean masks for nonzero items per column\n nonzero = (initial_data_array != 0)\n \n for c0 in P_columns:\n for c1 in Q_columns:\n if c0 != c1:\n # applying the filter\n data_filter = reduce(preprocess_operator, [nonzero[c] for c in [c0, c1]])\n if data_filter.any():\n data_array = initial_data_array[:, data_filter]\n # confirmations of the pattern, a list of booleans\n co = reduce(operators[pattern], data_array[[c0, c1], :])\n if co.any():\n pattern_data = derive_pattern_data(dataframe,\n dataframe.columns[c0], \n dataframe.columns[c1], \n pattern,\n co, \n confidence,\n include_co,\n include_ex, data_filter)\n if pattern_data and len(co) >= support:\n yield pattern_data\n\ndef patterns_ratio(dataframe = None,\n pattern = None,\n columns = None, \n parameters = {}):\n \"\"\"Generate patterns with ratios\n \"\"\"\n \n confidence, support, include_co, include_ex = get_parameters(parameters)\n\n limit_denominator = parameters.get(\"limit_denominator\", 10000000)\n\n preprocess_operator = preprocess[pattern]\n \n # set up boolean masks for nonzero items per column\n nonzero = (dataframe.values != 0).T\n \n for c0 in columns:\n for c1 in columns:\n if c0 != c1:\n # applying the filter\n data_filter = reduce(preprocess_operator, [nonzero[c] for c in [c0, c1]])\n data_array = map(lambda e: Fraction(e).limit_denominator(limit_denominator), \n dataframe.values[data_filter, c0] / dataframe.values[data_filter, c1])\n ratios = pd.Series(data_array)\n if support >= 2:\n possible_ratios = ratios.loc[ratios.duplicated(keep = False)].unique()\n else:\n possible_ratios = ratios.unique()\n for v in possible_ratios:\n if (abs(v) > 1e-6) and (v > -1) and (v < 1):\n # confirmations of the pattern, a list of booleans\n co = ratios==v\n if sum(co) >= support:\n pattern_data = derive_pattern_data(dataframe,\n str(v),\n [dataframe.columns[c0], \n dataframe.columns[c1]], \n pattern,\n co, \n confidence,\n include_co,\n include_ex, data_filter)\n if pattern_data:\n yield pattern_data\n\ndef patterns_interval(dataframe = None,\n pattern = None,\n columns = None, \n parameters = {}):\n \n confidence, support, include_co, include_ex = get_parameters(parameters)\n\n limit_denominator = parameters.get(\"limit_denominator\", 10000000)\n\n preprocess_operator = preprocess[pattern]\n \n # set up boolean masks for nonzero items per column\n nonzero = (dataframe.values != 0).T\n \n for c0 in columns:\n for c1 in columns:\n if c0 != c1:\n # applying the filter\n data_filter = reduce(preprocess_operator, [nonzero[c] for c in [c0, c1]])\n data_array = map(lambda e: Fraction(e).limit_denominator(limit_denominator), \n dataframe.values[data_filter, c0] / dataframe.values[data_filter, c1])\n ratios = pd.Series(data_array)\n \n interval_elements = list(ratios.loc[ratios.duplicated(keep = False)].unique())\n\n if 1 in interval_elements: interval_elements.remove(1)\n if -1 in interval_elements: interval_elements.remove(-1)\n\n if len(interval_elements) > 0:\n interval_elements.sort()\n if min(ratios) not in interval_elements:\n interval_elements = [-np.inf] + interval_elements\n if max(ratios) not in interval_elements:\n interval_elements = interval_elements + [np.inf]\n intervals = list(itertools.combinations(interval_elements,2))\n if (-np.inf, np.inf) in intervals:\n intervals.remove((-np.inf, np.inf))\n \n for interval in intervals:\n # confirmations of the pattern, a list of booleans\n co = (ratios >= interval[0]) & (ratios <= interval[1])\n if sum(co) >= support:\n pattern_data = derive_pattern_data(dataframe,\n \"[\" + str(interval[0]) + \", \" + str(interval[1]) + \"]\",\n [dataframe.columns[c0], \n dataframe.columns[c1]], \n pattern,\n co, \n confidence,\n include_co,\n include_ex, data_filter)\n if pattern_data:\n yield pattern_data\n\ndef patterns_sums_column(dataframe = None,\n pattern = None,\n parameters = {}):\n \"\"\"Generate patterns with sums\n \"\"\"\n\n confidence, support, include_co, include_ex = get_parameters(parameters)\n sum_elements = parameters.get(\"sum_elements\", 2)\n\n preprocess_operator = preprocess[pattern]\n initial_data_array = dataframe.values.T\n # set up boolean masks for nonzero items per column\n nonzero = (initial_data_array != 0)\n\n n = len(dataframe.columns)\n# matrix = np.ones(shape = (n, n), dtype = bool)\n# for c in itertools.combinations(range(n), 2):\n# v = (data_array[c[1], :] <= data_array[c[0], :] + 1).all()\n# matrix[c[0], c[1]] = v\n# matrix[c[1], c[0]] = ~v\n# np.fill_diagonal(matrix, False)\n\n for lhs_elements in range(2, sum_elements + 1):\n for rhs_column in range(n):\n start_array = initial_data_array\n # minus righthandside is taken so we can use sum/add function for all columns\n start_array[rhs_column, :] = -start_array[rhs_column, :]\n# lower_columns, = np.where(matrix[sum_col] == True)\n lhs_column_list = [col for col in range(n) if col != rhs_column]\n for lhs_columns in itertools.combinations(lhs_column_list, lhs_elements):\n all_columns = lhs_columns + (rhs_column,)\n #data_filter = reduce(preprocess_operator, [nonzero[c] for c in all_columns])\n data_filter = np.logical_and.reduce(nonzero[all_columns, :])\n# data_filter = nonzero[rhs_column]\n# for col in lhs_columns:\n# data_filter = data_filter & nonzero[col]\n if data_filter.any():\n data_array = start_array[:, data_filter]\n# co = (abs(reduce(operator.add, data_array[all_columns, :])) < 1)\n co = (abs(np.sum(data_array[all_columns, :], axis = 0)) < 1)\n if co.any():\n pattern_data = derive_pattern_data(dataframe, \n [dataframe.columns[c] for c in lhs_columns],\n dataframe.columns[rhs_column],\n pattern,\n co, \n confidence,\n include_co,\n include_ex, None)\n if pattern_data and len(co) >= support:\n yield pattern_data\n\ndef generate(dataframe = None,\n P_dataframe = None,\n Q_dataframe = None,\n pattern = None,\n columns = None,\n P_columns = None, \n Q_columns = None,\n value = None,\n parameters = {}):\n \"\"\"General function to call specific pattern functions\n Only numerical columns are used\n \"\"\"\n\n # if P_dataframe and Q_dataframe are given then join the dataframes and select columns\n if (not P_dataframe is None) and (not Q_dataframe is None):\n try:\n dataframe = P_dataframe.join(Q_dataframe)\n except:\n print(\"Join of P_dataframe and Q_dataframe failed, overlapping columns?\")\n return []\n P_columns = P_dataframe.columns\n Q_columns = Q_dataframe.columns\n\n # select all columns with numerical values\n numerical_columns = [dataframe.columns[c] for c in range(len(dataframe.columns)) \n if ((dataframe.dtypes[c] == 'float64') or (dataframe.dtypes[c] == 'int64')) and (dataframe.iloc[:, c] != 0).any()]\n dataframe = dataframe[numerical_columns]\n\n if not P_columns is None:\n P_columns = [dataframe.columns.get_loc(c) for c in P_columns if c in numerical_columns]\n else:\n P_columns = range(len(dataframe.columns))\n\n if not Q_columns is None:\n Q_columns = [dataframe.columns.get_loc(c) for c in Q_columns if c in numerical_columns]\n else:\n Q_columns = range(len(dataframe.columns))\n\n if not columns is None: \n columns = [dataframe.columns.get_loc(c) for c in columns if c in numerical_columns]\n else:\n columns = range(len(dataframe.columns))\n\n # if a value is given -> columns pattern value\n if not value is None:\n return patterns_column_value(dataframe = dataframe,\n pattern = pattern,\n columns = columns,\n value = value,\n parameters = parameters)\n # if the pattern is sum and sum_elements is given -> c1 + ... cn = c\n elif pattern == 'sum':\n return patterns_sums_column(dataframe = dataframe,\n pattern = pattern,\n parameters = parameters) \n elif pattern == 'ratio':\n return patterns_ratio(dataframe = dataframe,\n pattern = pattern,\n columns = columns,\n parameters = parameters) \n elif pattern == 'interval':\n return patterns_interval(dataframe = dataframe,\n pattern = pattern,\n columns = columns,\n parameters = parameters) \n # everything else -> c1 pattern c2\n else:\n return patterns_column_column(dataframe = dataframe,\n pattern = pattern, \n P_columns = P_columns,\n Q_columns = Q_columns,\n parameters = parameters)","sub_path":"insurlib/patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":18376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"19609834","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 12 15:46:40 2020\r\n\r\n@author: mehmet\r\n\"\"\"\r\nimport numpy\r\nimport pickle\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom scipy.stats import norm ,multivariate_normal\r\nimport matplotlib.pyplot as plt\r\nfrom random import random\r\nfrom skimage.transform import rescale, resize, downscale_local_mean\r\n\r\n# from skimage.transform import rescale, resize, downscale_local_mean\r\ndef unpickle(file):\r\n with open(file, 'rb') as f:\r\n dict = pickle.load(f, encoding=\"latin1\")\r\n return dict\r\ndata_batch_1 = unpickle(r'C:\\Users\\mehmet\\Desktop\\Master at Machine Learning\\1. Period\\Introduction to pattern recognition and ML\\Exercise 2\\cifar-10-batches-py/data_batch_1')\r\ndata_batch_2 = unpickle(r'C:\\Users\\mehmet\\Desktop\\Master at Machine Learning\\1. Period\\Introduction to pattern recognition and ML\\Exercise 2\\cifar-10-batches-py/data_batch_2')\r\ndata_batch_3 = unpickle(r'C:\\Users\\mehmet\\Desktop\\Master at Machine Learning\\1. Period\\Introduction to pattern recognition and ML\\Exercise 2\\cifar-10-batches-py/data_batch_3')\r\ndata_batch_4 = unpickle(r'C:\\Users\\mehmet\\Desktop\\Master at Machine Learning\\1. Period\\Introduction to pattern recognition and ML\\Exercise 2\\cifar-10-batches-py/data_batch_4')\r\ndata_batch_5 = unpickle(r'C:\\Users\\mehmet\\Desktop\\Master at Machine Learning\\1. Period\\Introduction to pattern recognition and ML\\Exercise 2\\cifar-10-batches-py/data_batch_5')\r\ntrdata =np.concatenate(( data_batch_1[\"data\"],data_batch_2[\"data\"],data_batch_3[\"data\"],data_batch_4[\"data\"],data_batch_5[\"data\"]))\r\ntrlabel = np.concatenate(( data_batch_1[\"labels\"],data_batch_2[\"labels\"],data_batch_3[\"labels\"],data_batch_4[\"labels\"],data_batch_5[\"labels\"]))\r\ntrlabel = np.array(trlabel)\r\ndatadict = unpickle(r'C:\\Users\\mehmet\\Desktop\\Master at Machine Learning\\1. Period\\Introduction to pattern recognition and ML\\Exercise 2\\cifar-10-batches-py/test_batch')\r\nx = trdata\r\ny = datadict[\"data\"]\r\ntest_label = datadict[\"labels\"]\r\ntest_label = np.array(test_label)\r\ny = y.reshape(10000, 3, 32, 32)\r\nlabeldict = unpickle(r'C:\\Users\\mehmet\\Desktop\\Master at Machine Learning\\1. Period\\Introduction to pattern recognition and ML\\Exercise 2\\cifar-10-batches-py/batches.meta')\r\nlabel_names = labeldict[\"label_names\"]\r\nlabel_names = np.array(label_names)\r\nx = x.reshape(50000, 3, 32, 32).transpose(0,2,3,1)\r\n# Average color for test data\r\ndef cifar10_color(x): \r\n Xf = []\r\n for i in range(0,len(trlabel)):\r\n average1 = x[i].mean(axis=0).mean(axis=0) \r\n Xf.append(average1)\r\n Xf= np.array(Xf)\r\n return Xf \r\nYf = [] # avarage color for test data\r\nfor i in range(0,len(y)):\r\n average1 = y[i].mean(axis=0).mean(axis=0) \r\n Yf.append(average1)\r\nYf =np.array(Yf) \r\nXf = cifar10_color(x) \r\nY = [ Xf[:,0],Xf[:,1],Xf[:,2],trlabel]\r\nY = np.array(Y)\r\nY = Y.transpose() \r\n## to find sigma and mean values \r\ndef cifar_10_naivebayes_learn(Xf,Y):\r\n mu = []\r\n sigma = []\r\n for i in range(0,len(label_names)): \r\n a = [np.mean(Y[Y[:,3]==i, 0:], axis=0)] \r\n a2 = [np.std(Y[Y[:,3]==i, 0:], axis=0)] \r\n sigma.append(a2)\r\n mu.append(a) \r\n sigma = np.array(sigma) \r\n mu = np.array(mu) \r\n Y = mu.reshape(10,4) , sigma.reshape(10,4) \r\n return Y \r\nmu,sigma = cifar_10_naivebayes_learn(Xf,Y) \r\nsigma = sigma[:,0:3] \r\nmu = mu[:,0:3] \r\np = 0.1 + np.zeros([10,1])\r\n#################### Exercise 3.1 ####################################\r\ndef cifar10_classier_naivebayes(x,mu,sigma,p): \r\n predicted_class = []\r\n for i in tqdm(range(0,len(y))):\r\n P = []\r\n for k in range(0,len(mu)):\r\n denum = 0\r\n denum = ((norm.pdf(Yf[i,0],sigma[k,0],mu[k,0])*norm.pdf(Yf[i,1],sigma[k,1],mu[k,1])*norm.pdf(Yf[i,2],sigma[k,2],mu[k,2])))*p[k]\r\n denum += denum\r\n for k in range(0,len(mu)): \r\n P1 = ((norm.pdf(Yf[i,0],mu[k,0],sigma[k,0])*norm.pdf(Yf[i,1],mu[k,1], sigma[k,1])*norm.pdf(Yf[i,2], mu[k,2], sigma[k,2]))*p[k])/denum \r\n P.append(P1)\r\n predicted_class1 = P.index(max(P))\r\n predicted_class.append(predicted_class1)\r\n predicted_class = np.array(predicted_class)\r\n return predicted_class\r\npredicted_class = cifar10_classier_naivebayes(x,mu,sigma,p)\r\nAccuracy = 100*np.mean( predicted_class == test_label )\r\nprint(f'Accuracy of the system for Exercise 3.1 is {Accuracy} %')\r\n\r\n# #################### Exercise 3.2 ####################################\r\ncovariance =[]\r\nfor i in range(0,10):\r\n class_imgs = Xf[np.where(trlabel==i)] \r\n b= np.cov(class_imgs, rowvar= False)\r\n covariance.append(b)\r\ncovariance = np.array(covariance) \r\ndef cifar10_classier_naivebayes(x,mu,covariance,p): \r\n predicted_class = [] \r\n for i in tqdm(range(0,len(test_label))):\r\n P2 = []\r\n for k in range(0,len(mu)):\r\n denum = 0\r\n denum =(multivariate_normal.pdf(Yf[i],mu[k], covariance[k]))*p[k]\r\n denum += denum\r\n for k in range(0,len(mu)): \r\n P1 =(multivariate_normal.pdf(Yf[i],mu[k],covariance[k])*p[k])/denum \r\n P2.append(P1) \r\n predicted_class1 = P2.index(max(P2)) \r\n predicted_class.append(predicted_class1) \r\n predicted_class = np.array(predicted_class)\r\n return predicted_class\r\npredicted_class = cifar10_classier_naivebayes(x,mu,covariance,p)\r\nAccuracy = 100*np.mean( predicted_class == test_label )\r\nprint(f'Accuracy of the system for Exercise 3.2 is {Accuracy} % ')\r\n\r\n\r\n# #################### Exercise 3.3 ####################################\r\n\r\nfor size in tqdm([1,2,4,6]):\r\n rgb_vals = []\r\n for i in range(y.shape[0]):\r\n # Convert images to mean values of each color channel\r\n img = y[i]\r\n img_8x8 = resize(img, (size, size)) \r\n r_vals = img_8x8[:,:,0].reshape(size*size)\r\n g_vals = img_8x8[:,:,1].reshape(size*size)\r\n b_vals = img_8x8[:,:,2].reshape(size*size)\r\n rgb_vals1 = np.concatenate((r_vals,g_vals,b_vals)) \r\n rgb_vals.append(rgb_vals1)\r\n rgb_vals = np.array(rgb_vals)\r\n X_mean = []\r\n covariance =[]\r\n for i in range(0,10):\r\n class_imgs = rgb_vals[np.where(test_label==i)] \r\n X_mean1 = np.mean(class_imgs, axis=0)\r\n covariance1= np.cov(class_imgs, rowvar= False)\r\n X_mean.append(X_mean1)\r\n covariance.append(covariance1)\r\n X_mean = np.array(X_mean)\r\n covariance = np.array(covariance)\r\n \r\n def cifar10_classier_naivebayes(x,mu,covariance,p): \r\n predicted_class = [] \r\n for i in range(0,len(test_label)):\r\n P2 = []\r\n for k in range(0,len(p)):\r\n denum = 0\r\n denum =(multivariate_normal.pdf(rgb_vals[i],X_mean[k], covariance[k]))*p[k]\r\n denum += denum\r\n for k in range(0,len(p)): \r\n P1 =(multivariate_normal.pdf(rgb_vals[i],X_mean[k],covariance[k])*p[k])/denum \r\n P2.append(P1) \r\n predicted_class1 = P2.index(max(P2)) \r\n predicted_class.append(predicted_class1) \r\n predicted_class = np.array(predicted_class)\r\n return predicted_class\r\n predicted_class = cifar10_classier_naivebayes(x,mu,covariance,p)\r\n Accuracy = 100*np.mean( predicted_class == test_label )\r\n # print(f'Accuracy of the system is {Accuracy} % ')\r\n plt.plot(size, Accuracy, 'ro' )\r\n plt.xlabel('size')\r\n plt.ylabel('Accuracy')\r\n \r\n########################## THE END ####################################################\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Course Scripts/Pattern Recognition and/Naive_Bayes_ Learning for _CIFAR10 _dataset.py","file_name":"Naive_Bayes_ Learning for _CIFAR10 _dataset.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"221013801","text":"#!/usr/bin/python3\n\nimport argparse\nimport json\nimport re\nimport os\n\nHISTORY_HTML_TEMPL = u\"\"\"\n\n\n\n\nHistory\n\n\n\n
\n

History

\n{body}
\n\n\n\"\"\"\n\n\ndef sort_by_time(item):\n return item['reportTime']\n\n\ndef get_report_names(path, file_type):\n results = []\n for file_name in os.listdir(path):\n s = re.search(\n '^index_(\\\\d{{4}})(\\\\d{{2}})(\\\\d{{2}})(\\\\d{{2}})(\\\\d{{2}})(\\\\d{{2}})\\\\.{}$'.format(\n file_type), file_name)\n if s:\n results.append({\n 'reportTime': '{}-{}-{} {}:{}:{}'.format(\n s.group(1), s.group(2), s.group(3), s.group(4), s.group(5), s.group(6)),\n 'reportPath': file_name})\n results.sort(key=sort_by_time, reverse=True)\n return results\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Recreate history files generated by xrd_all_methods.py',\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument('path', metavar='PATH', help='Path to the reports')\n parser.add_argument('--html', action='store_true', help='Recreate history.html')\n parser.add_argument('--json', action='store_true', help='Recreate history.json')\n args = parser.parse_args()\n\n if not os.path.exists(args.path):\n print(u'Directory not found \"{}\"'.format(args.path))\n exit(1)\n\n do_html = False\n if args.html:\n do_html = True\n\n do_json = False\n if args.json:\n do_json = True\n\n if not do_html and not do_json:\n print('Specify which history files to recreate!\\n')\n parser.print_help()\n exit(0)\n\n if do_html:\n html_names = get_report_names(args.path, 'html')\n html_items = ''\n for item in html_names:\n html_items += '

{}

\\n'.format(\n item['reportPath'], item['reportTime'])\n if html_items:\n html = HISTORY_HTML_TEMPL.format(body=html_items)\n with open(u'{}/history.html'.format(args.path), 'w') as f:\n f.write(html)\n print('Writing {} items to {}/history.html'.format(len(html_names), args.path))\n else:\n print('No HTML reports files found in directory: {}'.format(args.path))\n\n if do_json:\n json_names = get_report_names(args.path, 'json')\n if len(json_names):\n with open(u'{}/history.json'.format(args.path), 'w') as f:\n json.dump(json_names, f, indent=2, ensure_ascii=False)\n print('Writing {} items to {}/history.json'.format(len(json_names), args.path))\n else:\n print('No JSON reports files found in directory: {}'.format(args.path))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"xrdinfo/wsdls_history.py","file_name":"wsdls_history.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"578620884","text":"# Copyright 2011-2013 GRNET S.A. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n#\n# The views and conclusions contained in the software and documentation are\n# those of the authors and should not be interpreted as representing official\n# policies, either expressed or implied, of GRNET S.A.\n#\n\nfrom django.core.management.base import CommandError\nfrom snf_django.management.commands import RemoveCommand\nfrom snf_django.lib.api import faults\nfrom synnefo.logic import networks\nfrom synnefo.management.common import get_network, convert_api_faults\n\n\nclass Command(RemoveCommand):\n can_import_settings = True\n args = \" [ ...]\"\n help = \"Remove a network from the Database, and Ganeti\"\n\n @convert_api_faults\n def handle(self, *args, **options):\n if not args:\n raise CommandError(\"Please provide a network ID\")\n\n force = options['force']\n message = \"networks\" if len(args) > 1 else \"network\"\n self.confirm_deletion(force, message, args)\n\n for network_id in args:\n self.stdout.write(\"\\n\")\n try:\n network = get_network(network_id, for_update=True)\n self.stdout.write('Removing network: %s\\n' %\n network.backend_id)\n\n networks.delete(network)\n\n self.stdout.write(\"Successfully submitted Ganeti jobs to\"\n \" remove network %s\\n\" % network.backend_id)\n except (CommandError, faults.BadRequest) as e:\n self.stdout.write(\"Error -- %s\\n\" % e.message)\n","sub_path":"snf-cyclades-app/synnefo/logic/management/commands/network-remove.py","file_name":"network-remove.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"100125898","text":"\"\"\"\nfrom https://leetcode.com/problems/zigzag-conversion/#/description\n\nNotes: Very unclear problem:\nExample given:\n Input:\n \"PAYPALISHIRING\"\n Expected output:\n P A H N\n A P L S I I G\n Y I R\n Note:\n What happens when numRows is even? There seems to be no column in between other columns, but not sure.\n That is, I think the expected output would be\n P Y A I H R N\n A P L S I I G\n\n Note2: See this about the zigzag pattern\n https://discuss.leetcode.com/topic/22925/if-you-are-confused-with-zigzag-pattern-come-and-see\n\"\"\"\n\n\nclass Solution(object):\n def convert(self, s, numRows):\n \"\"\"\n :type s: str\n :type numRows: int\n :rtype: str\n \"\"\"\n rotation = list(range(numRows)) + list(range(1, numRows - 1))[::-1]\n\n # if numRows % 2 == 0:\n # rotation = list(range(numRows))\n # else:\n # rotation = list(range(numRows)) + [int(numRows / 2)]\n arr = [[] for i in range(numRows)]\n for i in range(len(s)):\n arr_ind = rotation[i % len(rotation)]\n arr[arr_ind].append(s[i])\n\n res = ''\n for a in arr:\n res = '%s%s' % (res, ''.join(a))\n return res\n\n\nif __name__ == '__main__':\n print(Solution().convert('ABCD', 3))\n","sub_path":"leetcode/python/src/6_zigzag_conversion.py","file_name":"6_zigzag_conversion.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"22207731","text":"import os\nimport shutil\nfrom app import app\n\nfrom flask import Flask, request, redirect, abort, jsonify, send_from_directory\nfrom werkzeug.utils import secure_filename\n\n#UPLOAD_DIRECTORY = \"g:/OneDrive/coding/python/PythonPractice/testfolder/api_uploaded_files\"\n#print(os.getcwd()) # Show work dir\n\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'ini']) \n\ndef allowed_file(filename):\n\treturn '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\nif not os.path.exists(app.config['UPLOAD_DIRECTORY']):\n os.makedirs(app.config['UPLOAD_DIRECTORY'])\n\n\n#app = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"Hello World!\"\n\n\n\n@app.route('/files', methods=['GET']) # Endpoint to list files on the server\ndef listFiles(): \n\n files = []\n\n for filename in os.listdir(app.config['UPLOAD_DIRECTORY']):\n pathToFile = os.path.join(app.config['UPLOAD_DIRECTORY'], filename)\n if os.path.isfile(pathToFile):\n files.append(filename)\n return jsonify(files)\n\n\n\n@app.route('/files/', methods=['GET']) # Download a file\ndef getFile(path):\n return send_from_directory(app.config['UPLOAD_DIRECTORY'], path, as_attachment=True)\n\n\n\n@app.route('/files/', methods=['POST']) # Upload File\ndef postFile(filename):\n if '/' in filename: # Return 400 BAD REQUEST\n abort(400, \"No SubDir's Allowed\")\n\n with open(os.path.join(app.config['UPLOAD_DIRECTORY'], filename), 'wb') as fp:\n fp.write(request.data)\n\n return jsonify(201, 'File uploaded successfully!') # Return 201 CREATED\n\n\n\n@app.route('/del-files/', methods=['DELETE']) # Del file by filename\ndef delFile(filename):\n filePath = os.path.join(app.config['UPLOAD_DIRECTORY'], filename) \n if os.path.isfile(filePath):\n os.remove(filePath)\n response = {\"message\": \"File Deleted\"}\n else:\n response = {\"message\": \"File Not Found!\"}\n \n return jsonify(response), 200\n\n\n\n@app.route('/multiple-files-upload', methods=['POST'])\ndef uploadFiles():\n\t# check if the post request has the file part\n\tif 'files[]' not in request.files:\n\t\tresp = jsonify({'message' : 'No file part in the request'})\n\t\tresp.status_code = 400\n\t\treturn resp\n\t\n\tfiles = request.files.getlist('files[]')\n\t\n\terrors = {}\n\tsuccess = False\n\t\n\tfor file in files:\t\t\n\t\tif file and allowed_file(file.filename): #allowed_file(filename) to allow user only upload allowed file types\n\t\t\tfilename = secure_filename(file.filename)\n\t\t\tfile.save(os.path.join(app.config['UPLOAD_DIRECTORY'], filename))\n\t\t\tsuccess = True\n\t\telse:\n\t\t\terrors[file.filename] = 'File type is not allowed'\n\t\n\tif success:\n\t\tresponse = jsonify({'message' : 'Files successfully uploaded'})\n\t\tresponse.status_code = 201\n\t\treturn response\n\telse:\n\t\tresponse = jsonify(errors)\n\t\tresponse.status_code = 500\n\t\treturn response\n\n\n\n@app.route('/folders', methods=['GET']) # List all folders in Path\ndef listFolders(): \n folders = []\n\n for dirs in os.listdir(app.config['UPLOAD_DIRECTORY']):\n pathToDirs = os.path.join(app.config['UPLOAD_DIRECTORY'], dirs)\n if os.path.isdir(pathToDirs):\n folders.append(dirs)\n return jsonify(folders)\n\n\n\n@app.route('/create-folder/', methods=['GET']) # Creates 1 folder with a name entry\ndef createFolder(dirName):\n filePath = os.path.join(app.config['UPLOAD_DIRECTORY'])\n if os.path.exists(dirName):\n response = {\"message\": \"Folder already exists!\"}\n else:\n os.chdir(filePath)\n os.mkdir(dirName)\n response = {\"message\": \"Folder created\"}\n return jsonify (response), 200\n\n\n\n@app.route('/create-folders//', methods=['GET']) # Creates 2 folder with a name entry\ndef createFolders(dirName, dirName2):\n filePath = os.path.join(app.config['UPLOAD_DIRECTORY'])\n if os.path.exists(dirName) or os.path.exists(dirName2):\n response = {\"message\": \"Folders already exists!\"}\n else:\n os.chdir(filePath)\n os.mkdir(dirName)\n os.mkdir(dirName2)\n response = {\"message\": \"Folders created\"}\n return jsonify (response), 200\n\n\n\n@app.route('/del-folder/', methods=['DELETE']) # Delete 1 folder\ndef deleteFolder(dirDel):\n filePath = os.path.join(app.config['UPLOAD_DIRECTORY'])\n os.chdir(filePath)\n if os.path.exists(dirDel):\n os.rmdir(dirDel)\n response = {\"message\": \"Folder deleted successfully.\"}\n else:\n response = {\"message\": \"Folder NOT found!\"}\n return jsonify (response), 200\n\n\n\n@app.route('/del-all-folders', methods=['DELETE']) # Delete all folders tree + included files in folders \ndef delAllFoldersTree():\n for dir in os.listdir(app.config['UPLOAD_DIRECTORY']):\n path = os.path.join(app.config['UPLOAD_DIRECTORY'], dir)\n if os.path.exists(path):\n shutil.rmtree(path, ignore_errors=True)\n response = {\"message\": \"All folders deleted\"}\n else:\n response = {\"message\": \"Folders NOT found\"}\n return jsonify (response), 200\n\n\n\n@app.route('/del-all-empty-folders', methods=['DELETE']) # Delete only all empty folders\ndef delAllEmptyDirs():\n folders = []\n\n for dirs in os.listdir(app.config['UPLOAD_DIRECTORY']):\n pathToDirs = os.path.join(app.config['UPLOAD_DIRECTORY'], dirs)\n if os.path.isdir(pathToDirs):\n folders.append(dirs)\n os.rmdir(pathToDirs)\n response = {\"message\": \"All Folders deleted\"} # ?\n return jsonify(response)\n\n\n\n@app.route('/upload-folder/', methods=['POST'])\ndef uploadFolders(dirName):\n filePath = os.path.join(app.config['UPLOAD_DIRECTORY'])\n if os.path.exists(dirName):\n response = {\"message\": \"Folder already uploaded!\"}\n else:\n os.chdir(filePath)\n os.mkdir(dirName)\n pathNewFld = os.path.abspath(dirName)\n os.chdir(pathNewFld)\n newPath = os.getcwd()\n\n files = request.files.getlist('files[]')\n\t\n errors = {}\n success = False\n\t\n for file in files:\n if file and allowed_file(file.filename): #allowed_file(filename) to allow user only upload allowed file types\n filename = secure_filename(file.filename)\n file.save(os.path.join(newPath, filename))\n success = True\n else:\n errors[file.filename] = 'File type is not allowed'\n \n if success:\n response = jsonify({'message' : 'Folder successfully uploaded!'})\n response.status_code = 201\n return response\n else:\n response = jsonify(errors)\n response.status_code = 500\n return response\n \n #return jsonify (response), 200\n\n\n@app.route('/create-multiple-folders/', methods=['POST']) \ndef createMultipleFolders(folders): \n os.chdir(app.config['UPLOAD_DIRECTORY'])\n for folder in folders:\n if not os.path.exists(folder):\n os.mkdir(folder)\n response = {\"Folders created successfully\"}\n else:\n response = {\"Folders already exists!\"}\n return jsonify(response) \n\n\n\n@app.route('/create-folder-tree///', methods=['GET'])\ndef createDirsTree(root_dir, main_dir, dir_names):\n # Create directory\n os.chdir(app.config['UPLOAD_DIRECTORY'])\n for i in range(0, len(main_dir)):\n for j in range(0,len(main_dir[i])):\n dirName = str(root_dir) + '/' + str(main_dir[i][j])\n \n try:\n # Create target Directory\n os.makedirs(dirName)\n response = {\"Directory \" , dirName , \" Created \"} \n except FileExistsError:\n response = {\"Directory \" , dirName , \" already exists\"} \n\n # Create target Directory if don't exist\n if not os.path.exists(dirName):\n os.makedirs(dirName)\n response = {\"Directory \" , dirName , \" Created \"}\n else: \n response = {\"Directory \" , dirName , \" already exists\"}\n return jsonify(response)\n\n\n\n\n\n@app.errorhandler(404)\ndef not_found(error=None):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n }\n resp = jsonify(message)\n resp.status_code = 404\n\n return resp\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n app.run(host= '0.0.0.0',port=8000)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"582826393","text":"# Copyright 2017, Inderpreet Singh, All rights reserved.\n\nimport subprocess\nimport logging\nimport time\n\n# my libs\nfrom common import AppError\n\n\nclass SshError(AppError):\n \"\"\"\n Custom exception that describes the failure of the ssh command\n \"\"\"\n pass\n\n\nclass Ssh:\n \"\"\"\n SSH command utility\n \"\"\"\n def __init__(self,\n host: str,\n port: int,\n user: str = None,\n target_dir: str = None):\n if host is None:\n raise ValueError(\"Hostname not specified.\")\n self.__host = host\n self.__port = port\n self.__user = user\n self.__target_dir = target_dir\n self.logger = logging.getLogger(\"Ssh\")\n\n def set_base_logger(self, base_logger: logging.Logger):\n self.logger = base_logger.getChild(\"Ssh\")\n\n def run_command(self, command: str) -> bytes:\n \"\"\"\n Returns the output of the remote command as a bytes\n :param command:\n :return:\n \"\"\"\n if not command:\n raise ValueError(\"Command cannot be empty\")\n command_args = [\n \"ssh\",\n \"-o\", \"PasswordAuthentication = no\", # don't ask for password\n \"-p\", str(self.__port)\n ]\n if self.__user:\n command_args.append(\"{}@{}\".format(self.__user, self.__host))\n else:\n command_args.append(\"{}\".format(self.__host))\n if self.__target_dir:\n command_args.append(\"cd {}; {}\".format(self.__target_dir, command))\n else:\n command_args.append(\"{}\".format(command))\n\n self.logger.debug(\"Command args: {}\".format(str(command_args)))\n sp = subprocess.Popen(command_args,\n stdin=subprocess.DEVNULL,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n start_time = time.time()\n out, err = sp.communicate()\n end_time = time.time()\n self.logger.debug(\"Return code: {}\".format(sp.returncode))\n self.logger.debug(\"Command took {:.3f}s\".format(end_time-start_time))\n if sp.returncode != 0:\n raise SshError(err.decode())\n return out\n","sub_path":"src/python/ssh/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"230988477","text":"from django.contrib import admin\n\nfrom savoy.contrib.bookmarks.models import *\n\nclass BookmarkAdmin(admin.ModelAdmin):\n prepopulated_fields = {'slug': ('title',)}\n list_display = ('title','date_published', 'source','rating',)\n search_fields = ('title','description',)\n fieldsets = (\n ('Basic (required)', {\n 'fields': ('url','title','slug','date_published',)\n }),\n ('Additional (optional)', {\n 'fields': ('description',)\n }),\n ('Categorization (optional)', {\n 'fields' : ('tags',)\n }),\n )\n\nadmin.site.register(Bookmark, BookmarkAdmin)","sub_path":"virtualenvs/ninetyseven/src/savoy/contrib/bookmarks/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"560782073","text":"from .attributes import Attributes\nfrom .languages import Languages\nfrom .race import Race, RaceName\n\ndragonborn = Race(\n name=RaceName.dragonborn,\n bonuses=Attributes(strength=2, charisma=1),\n languages=[Languages.common, Languages.draconic],\n)\ndwarf = Race(\n name=RaceName.dwarf,\n bonuses=Attributes(constitution=2),\n languages=[Languages.common, Languages.dwarvish],\n)\nelf = Race(\n name=RaceName.elf,\n bonuses=Attributes(dexterity=2),\n languages=[Languages.common, Languages.elvish],\n)\ngnome = Race(\n name=RaceName.gnome,\n bonuses=Attributes(intelligence=2),\n languages=[Languages.common, Languages.gnomish],\n)\nhalf_elf = Race(\n name=RaceName.half_elf,\n bonuses=Attributes(charisma=2),\n languages=[Languages.common, Languages.elvish],\n)\nhalfling = Race(\n name=RaceName.halfling,\n bonuses=Attributes(dexterity=2),\n languages=[Languages.common, Languages.halfling],\n)\nhalf_orc = Race(\n name=RaceName.half_orc,\n bonuses=Attributes(strength=2),\n languages=[Languages.common, Languages.orc],\n)\nhuman = Race(\n name=RaceName.human,\n bonuses=Attributes(1, 1, 1, 1, 1, 1),\n languages=[Languages.common],\n)\ntiefling = Race(\n name=RaceName.tiefling,\n bonuses=Attributes(charisma=2, intelligence=1),\n languages=[Languages.common, Languages.infernal],\n)\naarakocra = Race(\n name=RaceName.aarakocra,\n bonuses=Attributes(dexterity=2, wisdom=1),\n languages=[Languages.common],\n)\ngenasi = Race(\n name=RaceName.genasi,\n bonuses=Attributes(constitution=2),\n languages=[Languages.common],\n)\ngoliath = Race(\n name=RaceName.goliath,\n bonuses=Attributes(strength=2, constitution=1),\n languages=[Languages.common],\n)\naasimar = Race(\n name=RaceName.aasimar, bonuses=Attributes(charisma=2), languages=[Languages.common]\n)\nbugbear = Race(\n name=RaceName.bugbear,\n bonuses=Attributes(strength=2, dexterity=1),\n languages=[Languages.common],\n)\nfirbolg = Race(\n name=RaceName.firbolg,\n bonuses=Attributes(wisdom=2, strength=1),\n languages=[Languages.common],\n)\ngoblin = Race(\n name=RaceName.goblin,\n bonuses=Attributes(dexterity=2, constitution=1),\n languages=[Languages.common],\n)\nhobgoblin = Race(\n name=RaceName.hobgoblin,\n bonuses=Attributes(intelligence=1, constitution=2),\n languages=[Languages.common],\n)\nkenku = Race(\n name=RaceName.kenku,\n bonuses=Attributes(dexterity=2, wisdom=1),\n languages=[Languages.common],\n)\nkobold = Race(\n name=RaceName.kobold,\n bonuses=Attributes(dexterity=2, strength=-2),\n languages=[Languages.common],\n)\nlizardfold = Race(\n name=RaceName.lizardfold,\n bonuses=Attributes(constitution=2, wisdom=1),\n languages=[Languages.common],\n)\norc = Race(\n name=RaceName.orc,\n bonuses=Attributes(strength=2, constitution=1, intelligence=-2),\n languages=[Languages.common],\n)\ntabaxi = Race(\n name=RaceName.tabaxi,\n bonuses=Attributes(dexterity=2, charisma=1),\n languages=[Languages.common],\n)\ntriton = Race(\n name=RaceName.triton,\n bonuses=Attributes(strength=1, constitution=1, charisma=1),\n languages=[Languages.common],\n)\nyuan_ti_pureblood = Race(\n name=RaceName.yuan_ti_pureblood,\n bonuses=Attributes(charisma=2, intelligence=1),\n languages=[Languages.common],\n)\nferal_tiefling = Race(\n name=RaceName.feral_tiefling,\n bonuses=Attributes(dexterity=2, intelligence=1),\n languages=[Languages.common],\n)\ntortle = Race(\n name=RaceName.tortle,\n bonuses=Attributes(strength=2, wisdom=1),\n languages=[Languages.common],\n)\nchangeling = Race(\n name=RaceName.changeling,\n bonuses=Attributes(charisma=2),\n languages=[Languages.common],\n)\nkalashtar = Race(\n name=RaceName.kalashtar,\n bonuses=Attributes(wisdom=2, charisma=1),\n languages=[Languages.common],\n)\neberron_orc = Race(\n name=RaceName.eberron_orc,\n bonuses=Attributes(strength=2, constitution=1),\n languages=[Languages.common],\n)\nshifter = Race(\n name=RaceName.shifter, bonuses=Attributes(), languages=[Languages.common]\n)\nwarforged = Race(\n name=RaceName.warforged,\n bonuses=Attributes(constitution=2),\n languages=[Languages.common],\n)\ngith = Race(\n name=RaceName.gith, bonuses=Attributes(intelligence=1), languages=[Languages.common]\n)\ncentaur = Race(\n name=RaceName.centaur,\n bonuses=Attributes(strength=2, wisdom=1),\n languages=[Languages.common],\n)\nloxodon = Race(\n name=RaceName.loxodon,\n bonuses=Attributes(constitution=2, wisdom=1),\n languages=[Languages.common],\n)\nminotaur = Race(\n name=RaceName.minotaur,\n bonuses=Attributes(strength=2, constitution=1),\n languages=[Languages.common],\n)\nsimic_hybrid = Race(\n name=RaceName.simic_hybrid,\n bonuses=Attributes(constitution=2),\n languages=[Languages.common],\n)\nvedalken = Race(\n name=RaceName.vedalken,\n bonuses=Attributes(intelligence=2, wisdom=1),\n languages=[Languages.common],\n)\n\nall_races = [v for v in locals().values() if isinstance(v, Race)]\n","sub_path":"dnd/races.py","file_name":"races.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"275624111","text":"import itertools\nfrom typing import Union, Dict, Set, Iterable, FrozenSet, Tuple, cast, List, Optional, DefaultDict, Deque\nfrom collections import defaultdict, deque\nfrom copy import deepcopy\nfrom enum import Enum\nfrom fractions import Fraction\n\nimport numpy as np\n\nfrom qctoolkit.utils.types import ChannelID, TimeType\nfrom qctoolkit.pulses.instructions import AbstractInstructionBlock, EXECInstruction, REPJInstruction, GOTOInstruction,\\\n STOPInstruction, CHANInstruction, Waveform, MEASInstruction, Instruction\nfrom qctoolkit.comparable import Comparable\nfrom qctoolkit.utils.tree import Node, is_tree_circular\nfrom qctoolkit.utils.types import MeasurementWindow\nfrom qctoolkit.utils import checked_int_cast, is_integer\n\nfrom qctoolkit.pulses.sequence_pulse_template import SequenceWaveform\nfrom qctoolkit.pulses.repetition_pulse_template import RepetitionWaveform\n\n__all__ = ['Loop', 'MultiChannelProgram', 'make_compatible']\n\n\nclass Loop(Comparable, Node):\n \"\"\"Build a loop tree. The leaves of the tree are loops with one element.\"\"\"\n def __init__(self,\n parent: Union['Loop', None]=None,\n children: Iterable['Loop']=list(),\n waveform: Optional[Waveform]=None,\n measurements: Optional[List[MeasurementWindow]]=None,\n repetition_count=1):\n super().__init__(parent=parent, children=children)\n\n self._waveform = waveform\n self._measurements = measurements\n self._repetition_count = int(repetition_count)\n self._cached_body_duration = None\n\n if abs(self._repetition_count - repetition_count) > 1e-10:\n raise ValueError('Repetition count was not an integer')\n\n if not isinstance(waveform, (type(None), Waveform)):\n raise Exception()\n\n @property\n def compare_key(self) -> Tuple:\n return self._waveform, self.repetition_count, tuple(c.compare_key for c in self)\n\n def append_child(self, **kwargs) -> None:\n # do not invalidate but update cached duration\n super().__setitem__(slice(len(self), len(self)), (kwargs, ))\n self._invalidate_duration(body_duration_increment=self[-1].duration)\n\n def _invalidate_duration(self, body_duration_increment=None):\n if self._cached_body_duration is not None:\n if body_duration_increment is not None:\n self._cached_body_duration += body_duration_increment\n else:\n self._cached_body_duration = None\n if self.parent:\n if body_duration_increment is not None:\n self.parent._invalidate_duration(body_duration_increment=body_duration_increment*self.repetition_count)\n else:\n self.parent._invalidate_duration()\n\n def add_measurements(self, measurements: List[MeasurementWindow]):\n body_duration = float(self.body_duration)\n if body_duration == 0:\n measurements = measurements\n else:\n measurements = ((mw_name, begin+body_duration, length) for mw_name, begin, length in measurements)\n\n if self._measurements is None:\n self._measurements = list(measurements)\n else:\n self._measurements.extend(measurements)\n\n @property\n def waveform(self) -> Waveform:\n return self._waveform\n\n @waveform.setter\n def waveform(self, val) -> None:\n self._waveform = val\n self._invalidate_duration()\n\n @property\n def body_duration(self) -> TimeType:\n if self._cached_body_duration is None:\n if self.is_leaf():\n if self.waveform:\n self._cached_body_duration = self.waveform.duration\n else:\n self._cached_body_duration = TimeType(0)\n else:\n self._cached_body_duration = sum(child.duration for child in self)\n return self._cached_body_duration\n\n @property\n def duration(self) -> TimeType:\n return self.repetition_count*self.body_duration\n\n @property\n def repetition_count(self) -> int:\n return self._repetition_count\n\n @repetition_count.setter\n def repetition_count(self, val) -> None:\n new_repetition = int(val)\n if abs(new_repetition - val) > 1e-10:\n raise ValueError('Repetition count was not an integer')\n self._repetition_count = new_repetition\n\n def unroll(self) -> None:\n for i, e in enumerate(self.parent):\n if id(e) == id(self):\n self.parent[i:i+1] = (child.copy_tree_structure(new_parent=self.parent)\n for _ in range(self.repetition_count)\n for child in self)\n self.parent.assert_tree_integrity()\n return\n raise Exception('self not found in parent')\n\n def __setitem__(self, idx, value):\n super().__setitem__(idx, value)\n self._invalidate_duration()\n\n def unroll_children(self) -> None:\n old_children = self.children\n self[:] = (child.copy_tree_structure()\n for _ in range(self.repetition_count)\n for child in old_children)\n self.repetition_count = 1\n self.assert_tree_integrity()\n\n def encapsulate(self) -> None:\n self[:] = [Loop(children=self,\n repetition_count=self.repetition_count,\n waveform=self._waveform,\n measurements=self._measurements)]\n self.repetition_count = 1\n self._waveform = None\n self._measurements = None\n self.assert_tree_integrity()\n\n def __repr__(self) -> str:\n is_circular = is_tree_circular(self)\n if is_circular:\n return '{}: Circ {}'.format(id(self), is_circular)\n\n if self.is_leaf():\n return 'EXEC {} {} times'.format(self._waveform, self.repetition_count)\n else:\n repr = ['LOOP {} times:'.format(self.repetition_count)]\n for elem in self:\n sub_repr = elem.__repr__().splitlines()\n sub_repr = [' ->' + sub_repr[0]] + [' ' + line for line in sub_repr[1:]]\n repr += sub_repr\n return '\\n'.join(repr)\n\n def copy_tree_structure(self, new_parent: Union['Loop', bool]=False) -> 'Loop':\n return type(self)(parent=self.parent if new_parent is False else new_parent,\n waveform=self._waveform,\n repetition_count=self.repetition_count,\n measurements=self._measurements,\n children=(child.copy_tree_structure() for child in self))\n\n def _get_measurement_windows(self) -> DefaultDict[str, np.ndarray]:\n temp_meas_windows = defaultdict(list)\n if self._measurements:\n for (mw_name, begin, length) in self._measurements:\n temp_meas_windows[mw_name].append((begin, length))\n\n for mw_name, begin_length_list in temp_meas_windows.items():\n temp_meas_windows[mw_name] = [np.asarray(begin_length_list, dtype=float)]\n\n # calculate duration together with meas windows in the same iteration\n if self.is_leaf():\n body_duration = float(self.waveform.duration)\n else:\n offset = TimeType(0)\n for child in self:\n for mw_name, begins_length_array in child._get_measurement_windows().items():\n begins_length_array[:, 0] += float(offset)\n temp_meas_windows[mw_name].append(begins_length_array)\n offset += child.duration\n\n body_duration = float(offset)\n\n # repeat and add repetition based offset\n for mw_name, begin_length_list in temp_meas_windows.items():\n temp_begin_length_array = np.concatenate(begin_length_list)\n\n begin_length_array = np.tile(temp_begin_length_array, (self.repetition_count, 1))\n\n shaped_begin_length_array = np.reshape(begin_length_array, (self.repetition_count, -1, 2))\n\n shaped_begin_length_array[:, :, 0] += (np.arange(self.repetition_count) * body_duration)[:, np.newaxis]\n\n temp_meas_windows[mw_name] = begin_length_array\n\n return temp_meas_windows\n\n def get_measurement_windows(self) -> Dict[str, Tuple[np.ndarray, np.ndarray]]:\n return {mw_name: (begin_length_list[:, 0], begin_length_list[:, 1])\n for mw_name, begin_length_list in self._get_measurement_windows().items()}\n\n def split_one_child(self, child_index=None) -> None:\n \"\"\"Take the last child that has a repetition count larger one, decrease it's repetition count and insert a copy\n with repetition cout one after it\"\"\"\n if child_index:\n if self[child_index].repetition_count < 2:\n raise ValueError('Cannot split child {} as the repetition count is not larger 1')\n else:\n try:\n child_index = next(i for i in reversed(range(len(self)))\n if self[i].repetition_count > 1)\n except StopIteration:\n raise RuntimeError('There is no child with repetition count > 1')\n\n new_child = self[child_index].copy_tree_structure()\n new_child.repetition_count = 1\n\n self[child_index].repetition_count -= 1\n\n self[child_index+1:child_index+1] = (new_child,)\n self.assert_tree_integrity()\n\n def flatten_and_balance(self, depth: int) -> None:\n \"\"\"\n Modifies the program so all tree branches have the same depth\n :param depth: Target depth of the program\n :return:\n \"\"\"\n i = 0\n while i < len(self):\n # only used by type checker\n sub_program = cast(Loop, self[i])\n\n if sub_program.depth() < depth - 1:\n sub_program.encapsulate()\n\n elif not sub_program.is_balanced():\n sub_program.flatten_and_balance(depth - 1)\n\n elif sub_program.depth() == depth - 1:\n i += 1\n\n elif len(sub_program) == 1 and len(sub_program[0]) == 1:\n sub_sub_program = cast(Loop, sub_program[0])\n\n sub_program.repetition_count = sub_program.repetition_count * sub_sub_program.repetition_count\n sub_program[:] = sub_sub_program[:]\n sub_program.waveform = sub_sub_program.waveform\n\n else:\n sub_program.unroll()\n\n\nclass ChannelSplit(Exception):\n def __init__(self, channel_sets):\n self.channel_sets = channel_sets\n\n\nclass MultiChannelProgram:\n def __init__(self, instruction_block: AbstractInstructionBlock, channels: Iterable[ChannelID] = None):\n \"\"\"Channels with identifier None are ignored.\"\"\"\n if channels is None:\n def find_defined_channels(instruction_list):\n for instruction in instruction_list:\n if isinstance(instruction, EXECInstruction):\n return instruction.waveform.defined_channels\n elif isinstance(instruction, REPJInstruction):\n for _ in range(instruction.count):\n return find_defined_channels(\n instruction.target.block.instructions[instruction.target.offset:])\n elif isinstance(instruction, GOTOInstruction):\n return find_defined_channels(instruction.target.block.instructions[instruction.target.offset:])\n elif isinstance(instruction, CHANInstruction):\n return itertools.chain(*instruction.channel_to_instruction_block.keys())\n elif isinstance(instruction, STOPInstruction):\n break\n elif isinstance(instruction, MEASInstruction):\n pass\n else:\n raise TypeError('Unhandled instruction type', type(instruction))\n raise ValueError('Instruction block has no defined channels')\n\n channels = find_defined_channels(instruction_block.instructions)\n else:\n channels = set(channels)\n\n channels = frozenset(channels - {None})\n\n root = Loop()\n stacks = {channels: (root, [((), deque(instruction_block.instructions))])}\n self._programs = dict()\n\n while len(stacks) > 0:\n chans, (root_loop, stack) = stacks.popitem()\n try:\n self._programs[chans] = MultiChannelProgram.__split_channels(chans, root_loop, stack)\n except ChannelSplit as split:\n for new_channel_set in split.channel_sets:\n assert (new_channel_set not in stacks)\n assert (chans.issuperset(new_channel_set))\n\n stacks[new_channel_set] = (root_loop.copy_tree_structure(), deepcopy(stack))\n\n def repeat_measurements(child_loop, rep_count):\n duration_float = float(child_loop.duration)\n if child_loop._measurements:\n for r in range(rep_count):\n for name, begin, length in child_loop._measurements:\n yield (name, begin+r*duration_float, length)\n for channels, program in self._programs.items():\n iterable = program.get_breadth_first_iterator()\n try:\n while True:\n loop = next(iterable)\n if len(loop) == 1 and not loop._measurements:\n loop._measurements = loop[0]._measurements\n loop.waveform = loop[0].waveform\n loop.repetition_count = loop.repetition_count * loop[0].repetition_count\n loop[:] = loop[0][:]\n if len(loop):\n iterable = itertools.chain((loop,), iterable)\n except StopIteration:\n pass\n\n @property\n def programs(self) -> Dict[FrozenSet[ChannelID], Loop]:\n return self._programs\n\n @property\n def channels(self) -> Set[ChannelID]:\n return set(itertools.chain(*self._programs.keys()))\n\n @staticmethod\n def __split_channels(channels: FrozenSet[ChannelID],\n root_loop: Loop,\n block_stack: List[Tuple[Tuple[int, ...],\n Deque[Instruction]]]) -> Loop:\n while block_stack:\n current_loop_location, current_instruction_block = block_stack.pop()\n current_loop = root_loop.locate(current_loop_location)\n\n while current_instruction_block:\n instruction = current_instruction_block.popleft()\n\n if isinstance(instruction, EXECInstruction):\n if not instruction.waveform.defined_channels.issuperset(channels):\n raise Exception(instruction.waveform.defined_channels, channels)\n current_loop.append_child(waveform=instruction.waveform)\n\n elif isinstance(instruction, REPJInstruction):\n if current_instruction_block:\n block_stack.append((current_loop_location, current_instruction_block))\n\n current_loop.append_child(repetition_count=instruction.count)\n block_stack.append(\n (current_loop[-1].get_location(),\n deque(instruction.target.block[instruction.target.offset:-1]))\n )\n break\n\n elif isinstance(instruction, CHANInstruction):\n if channels in instruction.channel_to_instruction_block.keys():\n # push to front\n new_instruction_ptr = instruction.channel_to_instruction_block[channels]\n new_instruction_list = [*new_instruction_ptr.block[new_instruction_ptr.offset:-1]]\n current_instruction_block.extendleft(new_instruction_list)\n\n else:\n block_stack.append((current_loop_location, deque([instruction]) + current_instruction_block))\n\n raise ChannelSplit(instruction.channel_to_instruction_block.keys())\n\n elif isinstance(instruction, MEASInstruction):\n current_loop.add_measurements(instruction.measurements)\n\n else:\n raise Exception('Encountered unhandled instruction {} on channel(s) {}'.format(instruction, channels))\n return root_loop\n\n def __getitem__(self, item: Union[ChannelID, Set[ChannelID], FrozenSet[ChannelID]]) -> Loop:\n if not isinstance(item, (set, frozenset)):\n item = frozenset((item,))\n elif isinstance(item, set):\n item = frozenset(item)\n\n for channels, program in self._programs.items():\n if item.issubset(channels):\n return program\n raise KeyError(item)\n\n\ndef to_waveform(program: Loop) -> Waveform:\n if program.is_leaf():\n if program.repetition_count == 1:\n return program.waveform\n else:\n return RepetitionWaveform(program.waveform, program.repetition_count)\n else:\n if len(program) == 1:\n sequenced_waveform = to_waveform(cast(Loop, program[0]))\n else:\n sequenced_waveform = SequenceWaveform([to_waveform(cast(Loop, sub_program))\n for sub_program in program])\n if program.repetition_count > 1:\n return RepetitionWaveform(sequenced_waveform, program.repetition_count)\n else:\n return sequenced_waveform\n\n\nclass _CompatibilityLevel(Enum):\n compatible = 0\n action_required = 1\n incompatible = 2\n\n\ndef _is_compatible(program: Loop, min_len: int, quantum: int, sample_rate: TimeType) -> _CompatibilityLevel:\n program_duration_in_samples = program.duration * sample_rate\n\n if program_duration_in_samples.denominator != 1:\n return _CompatibilityLevel.incompatible\n\n if program_duration_in_samples < min_len or program_duration_in_samples % quantum > 0:\n return _CompatibilityLevel.incompatible\n\n if program.is_leaf():\n waveform_duration_in_samples = program.waveform.duration * sample_rate\n if waveform_duration_in_samples < min_len or (waveform_duration_in_samples / quantum).denominator != 1:\n return _CompatibilityLevel.action_required\n else:\n return _CompatibilityLevel.compatible\n else:\n if all(_is_compatible(cast(Loop, sub_program), min_len, quantum, sample_rate) == _CompatibilityLevel.compatible\n for sub_program in program):\n return _CompatibilityLevel.compatible\n else:\n return _CompatibilityLevel.action_required\n\n\ndef _make_compatible(program: Loop, min_len: int, quantum: int, sample_rate: Fraction) -> None:\n\n if program.is_leaf():\n program.waveform = to_waveform(program.copy_tree_structure())\n program.repetition_count = 1\n\n else:\n comp_levels = np.array([_is_compatible(cast(Loop, sub_program), min_len, quantum, sample_rate)\n for sub_program in program])\n incompatible = comp_levels == _CompatibilityLevel.incompatible\n if np.any(incompatible):\n single_run = program.duration * sample_rate / program.repetition_count\n if is_integer(single_run / quantum) and single_run >= min_len:\n new_repetition_count = program.repetition_count\n program.repetition_count = 1\n else:\n new_repetition_count = 1\n program.waveform = to_waveform(program.copy_tree_structure())\n program.repetition_count = new_repetition_count\n program[:] = []\n return\n else:\n for sub_program, comp_level in zip(program, comp_levels):\n if comp_level == _CompatibilityLevel.action_required:\n _make_compatible(sub_program, min_len, quantum, sample_rate)\n\n\ndef make_compatible(program: Loop, minimal_waveform_length: int, waveform_quantum: int, sample_rate: Fraction):\n comp_level = _is_compatible(program,\n min_len=minimal_waveform_length,\n quantum=waveform_quantum,\n sample_rate=sample_rate)\n if comp_level == _CompatibilityLevel.incompatible:\n raise ValueError('The program cannot be made compatible to restrictions')\n elif comp_level == _CompatibilityLevel.action_required:\n _make_compatible(program,\n min_len=minimal_waveform_length,\n quantum=waveform_quantum,\n sample_rate=sample_rate)\n","sub_path":"qctoolkit/hardware/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":20859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"457815348","text":"import sys\nimport os\nfrom common import *\nfrom const import *\nfrom string import Template\nimport getpass\nimport sys\n\n# socket with bob, pretending to be alice.\n# via fake buffer file\nfake_BUFFER_FILE_NAME = 'buffer_new'\nos.rename(BUFFER_DIR+BUFFER_FILE_NAME, BUFFER_DIR+fake_BUFFER_FILE_NAME)\ndialog = Dialog('print')\nsocket_b, aes_b = setup('alice', BUFFER_DIR, fake_BUFFER_FILE_NAME)\n\n#--------------------------------------------------------\n# socket with alice, pretending to be bob\ndialog = Dialog('print')\nsocket_a, aes_a = setup('bob', BUFFER_DIR, BUFFER_FILE_NAME)\n#---------------------------------------------------------\n\n####################\n#--RELAY\n####################\n\nif len(sys.argv) == 2:\n if sys.argv[1] == '--relay':\n\n #receive and decrypt from alice first\n received_a = receive_and_decrypt(aes_a, socket_a)\n dialog.chat('Eve heard Alice said: \"{}\"'.format(received_a))\n\n #encrpyt then send to bob\n encrypt_and_send(received_a, aes_b, socket_b)\n dialog.info('Eve relayed to Bob!')\n\n #receive and decrypt from bob\n received_b = receive_and_decrypt(aes_b, socket_b)\n dialog.chat('Eve heard Bob said: \"{}\"'.format(received_b))\n\n #encrypt and send to alice\n encrypt_and_send(received_b, aes_a, socket_a)\n dialog.info('Eve relayed to Alice!')\n\n#---------------------------------------------------------\n\n####################\n#--BREAK-HEART\n####################\n\n if sys.argv[1] == '--break-heart':\n\n #receive and decrypt from alice first\n received_a = receive_and_decrypt(aes_a, socket_a)\n dialog.chat('Eve heard Alice said: \"{}\"'.format(received_a))\n\n #encrpyt then send to bob\n #altered alice's msg\n encrypt_and_send(BAD_MSG['alice'], aes_b, socket_b)\n dialog.info('Eve DESTROYED Bob!')\n\n #receive and decrypt from bob\n received_b = receive_and_decrypt(aes_b, socket_b)\n dialog.chat('Eve heard Bob said: \"{}\"'.format(received_b))\n\n #encrypt and send to alice\n encrypt_and_send(received_b, aes_a, socket_a)\n dialog.info('Eve CONFUSED Alice?')\n\n#---------------------------------------------------------\n\n####################\n#--CUSTOM\n####################\n\n if sys.argv[1] == '--custom':\n\n #receive and decrypt from alice first\n received_a = receive_and_decrypt(aes_a, socket_a)\n dialog.chat('Eve heard Alice said: \"{}\"'.format(received_a))\n\n #prompt user input\n dialog.prompt('Please input message to send to Bob...')\n send_to_bob = input()\n\n #encrpyt then send to bob\n #not alice's msg, but user input\n encrypt_and_send(send_to_bob, aes_b, socket_b)\n dialog.info('Eve sent random message to Bob!')\n\n #receive and decrypt from bob\n received_b = receive_and_decrypt(aes_b, socket_b)\n dialog.chat('Eve heard Bob said: \"{}\"'.format(received_b))\n\n #prompt user input\n dialog.prompt('Please input message to send to Alice...')\n send_to_alice = input()\n\n #encrypt and send to alice\n encrypt_and_send(send_to_alice, aes_a, socket_a)\n dialog.info('Eve CONFUSED Alice?')\n","sub_path":"Q4/eve.py","file_name":"eve.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"475421511","text":"#coding:utf-8\n\n\"\"\"classe mère\"\"\"\n\nclass agents:\n\tdef __init__(self, nom, ide):\n\t\tself.nom = nom\n\t\tself.ide = ide\n\n\tdef travailler(self):\n\t\tprint(\"l'agent {}, matricule {} travaille et il est :\".format(self.nom, self.ide, self.poste))\n\n\"\"\"classe fille\"\"\"\n\nclass guichetier(agents):\n\tdef __init__(self, nom_guichetier, ide_guichetier, poste):\n\t\tagents.__init__(self, nom_guichetier, poste)\n\t\tself.poste = poste\n\n\n\tdef retrait(montant, solde = 10000):\n\t\tprint(\"Operations de retrait...\")\n\t\tmontant = input(\"entrer le montant à retitrer:\")\n\t\tmontant = int(montant)\n\t\tif montant >= solde:\n\t\t\tprint(\"solde insuffisant !!!\")\n\t\telse:\n\t\t\tsolde -= montant\n\t\t\tprint(\"retrait effectué avec succès !!! Nouveau solde :{}\".format(solde))\n\n#pp\n\ng1 = guichetier(\"donman\", \"18A036FS\", \"Guichetier\")\n\ng1.travailler()\n\ng1.retrait()\n","sub_path":"guichetier.py","file_name":"guichetier.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"616975771","text":"import os\nimport time\nimport sys\nimport socket\nimport logging\nimport subprocess\nimport re\nfrom byteport_client.factories import byteport_client_from_simple_argv\n\n\ndef collect_load_data(byteport_client, interval_sec=60):\n\n while True:\n loadavg = os.getloadavg()\n\n unix_stats = dict()\n unix_stats['la1'] = loadavg[0]\n unix_stats['la5'] = loadavg[1]\n unix_stats['la15'] = loadavg[2]\n\n try:\n #before = time.time()\n byteport_client.store(unix_stats)\n #print \"It took %s seconds\" % (time.time() - before)\n except Exception as e:\n # Catch and logg all errors and try again later is OK in this kind of use case\n logging.error(u'Error during Byteport API call: %s' % e)\n time.sleep(interval_sec)\n\n'''\nSimple script that collects three load figures from the system and uses the Byteport client\nAPI class to post them to api.byteport.se.\n'''\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n logging.debug('====>>> Note DEBUG log level is enabled by default in this example client.')\n\n client = byteport_client_from_simple_argv()\n\n # Log anything by sending a dictionary to the store() method\n try:\n git_version = subprocess.check_output([\"git\", \"rev-parse\", \"--short\", \"HEAD\"]).strip()\n client.store({'git_version': git_version})\n except Exception as e:\n logging.warn(\"Failed to obtain git version.\")\n\n # Simple way to log text data around the system\n client.log(\"Byteport Python Example client started!\", level='info')\n\n # Continous logging example\n collect_load_data(client, 1)\n","sub_path":"python/example_unix_stats_on_interval.py","file_name":"example_unix_stats_on_interval.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"247098116","text":"# Copyright 2021, Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A tff.aggregator for flattening coordinate values across vector dimensions.\"\"\"\n\nimport abc\nimport collections\nimport functools\nimport math\nfrom typing import Optional\n\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.aggregators import factory\nfrom tensorflow_federated.python.aggregators import sum_factory\nfrom tensorflow_federated.python.core.api import computations\nfrom tensorflow_federated.python.core.impl.federated_context import intrinsics\nfrom tensorflow_federated.python.core.impl.types import computation_types\nfrom tensorflow_federated.python.core.impl.types import placements\nfrom tensorflow_federated.python.core.impl.types import type_analysis\nfrom tensorflow_federated.python.core.impl.types import type_conversions\nfrom tensorflow_federated.python.core.templates import aggregation_process\nfrom tensorflow_federated.python.core.templates import measured_process\nfrom tensorflow_model_optimization.python.core.internal import tensor_encoding as te\n\nSEED_TF_TYPE = tf.int64\nOUTPUT_TF_TYPE = tf.float32\n\n\n# TODO(b/192618450): Revisit the abc design.\nclass _FlatteningFactory(factory.UnweightedAggregationFactory, abc.ABC):\n \"\"\"`UnweightedAggregationFactory` for flattening values across dimensions.\n\n The created `tff.templates.AggregationProcess` takes an input tensor structure\n and applies a random basis transform to each tensor reshaped as a vector.\n This, for instance, can be implemented as a random unitary transform on the\n vector that can be geometrically interpreted as randomly rotating/reflecting\n the vector.\n\n The useful property is that the transform can spread out the coordinate\n values more uniformly across the vector dimensions. This is useful for\n many down-stream operations, such as:\n 1. Uniform quantization, where the resulting vector rotation/reflection\n reduces the dynamic range of the coordinates to be quantized, decreasing\n the error incurred by quantization; and\n 2. Modular wrapping, where the spread-out coordinates after the transform\n could lead to less modular wrapping.\n\n Specifically, for any component tensor of the input tensor structrure, the\n forward transform would correspond to the following operations (specific\n implementation depends on the transform):\n 1. Reshapes the tensor into a vector (rank-1 tensor).\n 2. Pads the vector with zeros (the exact number of zeros to pad depend on\n the specific transform).\n 3. Applies a random basis transform to the vector (the randomness depends\n on the transform).\n The backward transform reverts the above steps.\n\n The forward transform is applied on `tff.CLIENTS` while the inverse transform\n (reverting the forward transform) is applied on `tff.SERVER`.\n\n This factory only accepts `value_type` of either `tff.TensorType` or\n `tff.StructWithPythonType` and expects the dtype of component tensors to be\n either all real integers or all real floats, and it will otherwise raise an\n error.\n \"\"\"\n\n ######## Abstract methods: transform-dependent ########\n @abc.abstractmethod\n def _preprocess_tensor(self, x):\n \"\"\"Preprocess a component tensor for forward transform.\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def _forward_transform_vector(self, x, seed_pair, num_repeats):\n raise NotImplementedError\n\n @abc.abstractmethod\n def _backward_transform_vector(self, x, seed_pair, num_repeats):\n raise NotImplementedError\n\n ######## Concrete methods: shared across different transforms ########\n def __init__(\n self,\n inner_agg_factory: Optional[factory.UnweightedAggregationFactory] = None,\n num_repeats: int = 1):\n \"\"\"Initializes the FlatteningFactory.\n\n Args:\n inner_agg_factory: The inner `UnweightedAggregationFactory` to aggregate\n the values after the transform.\n num_repeats: The number of times to repeat the transform on each component\n tensor. Must be a positive integer.\n\n Raises:\n TypeError: If `inner_agg_factory` is not an instance of\n `tff.aggregators.UnweightedAggregationFactory`\n ValueError: If `num_repeats` is not a positive integer.\n \"\"\"\n if inner_agg_factory is None:\n inner_agg_factory = sum_factory.SumFactory()\n\n if not isinstance(inner_agg_factory, factory.UnweightedAggregationFactory):\n raise TypeError('`inner_agg_factory` must have type '\n 'UnweightedAggregationFactory. '\n f'Found {type(inner_agg_factory)}.')\n\n if not isinstance(num_repeats, int) or num_repeats < 1:\n raise ValueError('`num_repeats` should be a positive integer. '\n f'Found {num_repeats}.')\n\n self._inner_agg_factory = inner_agg_factory\n self._num_repeats = num_repeats\n\n def _postprocess_tensor(self, x, original_spec):\n \"\"\"Unpad, reshape, and cast a component tensor after backward transform.\"\"\"\n original_len = original_spec.shape.num_elements()\n reshaped_x = tf.reshape(x[:original_len], original_spec.shape)\n if original_spec.dtype.is_integer:\n reshaped_x = tf.round(reshaped_x)\n return tf.cast(reshaped_x, original_spec.dtype)\n\n def _forward_transform_struct(self, struct, seed_pair, num_repeats):\n \"\"\"Applies the transform to each component tensor of a structure.\"\"\"\n num_tensors = len(tf.nest.flatten(struct))\n seed_pairs_list = [seed_pair + i * num_repeats for i in range(num_tensors)]\n seed_pairs_struct = tf.nest.pack_sequence_as(struct, seed_pairs_list)\n\n transform_fn = functools.partial(\n self._forward_transform_vector, num_repeats=num_repeats)\n prep_struct = tf.nest.map_structure(self._preprocess_tensor, struct)\n return tf.nest.map_structure(transform_fn, prep_struct, seed_pairs_struct)\n\n def _backward_transform_struct(self, struct, struct_py_type, seed_pair,\n num_repeats):\n \"\"\"Inverts the transform applied to the structure.\"\"\"\n num_tensors = len(tf.nest.flatten(struct))\n seed_pairs_list = [seed_pair + i * num_repeats for i in range(num_tensors)]\n seed_pairs_struct = tf.nest.pack_sequence_as(struct, seed_pairs_list)\n\n transform_fn = functools.partial(\n self._backward_transform_vector, num_repeats=num_repeats)\n unrotated_struct = tf.nest.map_structure(transform_fn, struct,\n seed_pairs_struct)\n return tf.nest.map_structure(self._postprocess_tensor, unrotated_struct,\n struct_py_type)\n\n def create(\n self,\n value_type: factory.ValueType) -> aggregation_process.AggregationProcess:\n # As the Hadamard transform alters the tensor specs, we compute the Python\n # structure of the types for the inverse transform.\n if (value_type.is_struct_with_python() and\n type_analysis.is_structure_of_tensors(value_type)):\n py_type = type_conversions.structure_from_tensor_type_tree(\n lambda x: tf.TensorSpec(x.shape, x.dtype), value_type)\n elif value_type.is_tensor():\n py_type = tf.TensorSpec(value_type.shape, value_type.dtype)\n else:\n raise TypeError('Expected `value_type` to be `TensorType` or '\n '`StructWithPythonType` containing only `TensorType`. '\n f'Found type: {repr(value_type)}')\n\n _check_component_dtypes(value_type)\n seed_pair_type = init_seed_pair.type_signature.result\n\n @computations.tf_computation(value_type, seed_pair_type)\n def forward_transform_structure(value, seed_pair):\n return self._forward_transform_struct(value, seed_pair, self._num_repeats)\n\n @computations.tf_computation(\n forward_transform_structure.type_signature.result, seed_pair_type)\n def backward_transform_structure(value, seed_pair):\n return self._backward_transform_struct(value, py_type, seed_pair,\n self._num_repeats)\n\n tff_inner_type = forward_transform_structure.type_signature.result\n inner_agg_process = self._inner_agg_factory.create(tff_inner_type)\n\n @computations.federated_computation()\n def init_fn():\n state = collections.OrderedDict(\n round_seed=intrinsics.federated_eval(init_seed_pair,\n placements.SERVER),\n inner_agg_process=inner_agg_process.initialize())\n return intrinsics.federated_zip(state)\n\n @computations.federated_computation(init_fn.type_signature.result,\n computation_types.at_clients(value_type)\n )\n def next_fn(state, value):\n server_seed = state['round_seed']\n client_seed = intrinsics.federated_broadcast(server_seed)\n rotated_value = intrinsics.federated_map(forward_transform_structure,\n (value, client_seed))\n\n inner_state = state['inner_agg_process']\n inner_agg_output = inner_agg_process.next(inner_state, rotated_value)\n\n unrotated_agg_value = intrinsics.federated_map(\n backward_transform_structure, (inner_agg_output.result, server_seed))\n\n new_state = collections.OrderedDict(\n round_seed=intrinsics.federated_map(next_seed_pair, server_seed),\n inner_agg_process=inner_agg_output.state)\n measurements = collections.OrderedDict(\n rotation=inner_agg_output.measurements)\n\n return measured_process.MeasuredProcessOutput(\n state=intrinsics.federated_zip(new_state),\n result=unrotated_agg_value,\n measurements=intrinsics.federated_zip(measurements))\n\n return aggregation_process.AggregationProcess(init_fn, next_fn)\n\n\nclass HadamardTransformFactory(_FlatteningFactory):\n \"\"\"Implements `FlatteningFactory` with the Fast Walsh-Hadamard Transform.\n\n The created `tff.templates.AggregationProcess` takes an input tensor structure\n and applies the randomized fast Walsh-Hadamard transform to each tensor\n flattened as a vector in O(d*log(d)) time, where `d` is the vector dimension.\n https://en.wikipedia.org/wiki/Fast_Walsh%E2%80%93Hadamard_transform\n\n Specifically, for any component tensor, the forward transform corresponds to\n the following operations at `tff.CLIENTS`:\n 1. Flattens the tensor into a vector (rank-1 tensor).\n 2. Pads the vector to `d_2` dimensions with zeros, where `d_2` is\n the smallest power of 2 larger than or equal to the number of elements.\n 3. Multiplies the padded vector with random +1/-1 values (i.e. flipping the\n signs of the vector). This corresponds to applying a random orthogonal\n diagonal matrix.\n 4. Multiplies the randomly flipped vector the `d_2 x d_2` Hadamard matrix.\n The backward transform reverts the above steps at `tff.SERVER`.\n\n The side effects of this aggregator include:\n 1. The component tensors are flattened and padded with zeros. For example,\n if the factory receives a `value_type` of ,\n the inner factory will operate on .\n 2. The dtype of the component tensors are casted to floats.\n \"\"\"\n\n def _preprocess_tensor(self, x):\n # Casts, reshapes, and pads to a float vector with power-of-2 dimensions.\n return pad_zeros_pow2(tf.reshape(tf.cast(x, OUTPUT_TF_TYPE), [-1]))\n\n @tf.function\n def _forward_transform_vector(self, x, seed_pair, num_repeats):\n\n def _transform_fn(x, cur_seed_pair):\n # Randomly flip signs.\n signs = sample_rademacher(tf.shape(x), x.dtype, cur_seed_pair)\n flipped_x = signs * x\n # Apply Hadamard matrix.\n expanded_x = tf.expand_dims(flipped_x, axis=0)\n rotated_x = te.utils.fast_walsh_hadamard_transform(expanded_x)\n return tf.squeeze(rotated_x, axis=0)\n\n for index in range(num_repeats):\n x = _transform_fn(x, seed_pair + index)\n return x\n\n @tf.function\n def _backward_transform_vector(self, x, seed_pair, num_repeats):\n\n def _transform_fn(x, cur_seed_pair):\n expanded_x = tf.expand_dims(x, axis=0)\n unrotated_x = te.utils.fast_walsh_hadamard_transform(expanded_x)\n unrotated_x = tf.squeeze(unrotated_x, axis=0)\n signs = sample_rademacher(tf.shape(unrotated_x), x.dtype, cur_seed_pair)\n return signs * unrotated_x\n\n for index in range(num_repeats - 1, -1, -1):\n x = _transform_fn(x, seed_pair + index)\n return x\n\n\nclass DiscreteFourierTransformFactory(_FlatteningFactory):\n \"\"\"Implements `FlatteningFactory` with the Discrete Fourier Transform.\n\n The created `tff.templates.AggregationProcess` takes an input tensor structure\n and applies the randomized discrete Fourier transform (using TF's fast Fourier\n transform implementation `tf.signal.fft/ifft`) to each tensor flattened as\n a vector in O(d*log(d)) time, where `d` is the vector dimension.\n https://en.wikipedia.org/wiki/Discrete_Fourier_transform\n\n Specifically, for any component tensor, the forward transform corresponds to\n the following operations at `tff.CLIENTS`:\n 1. Flattens the tensor into a vector (rank-1 tensor).\n 2. Pads the vector to an even number of dimensions `d` with zeros (i.e. pad\n at most one zero).\n 3. Packs the real vector into a complex vector with length `d/2` by filling\n the real and imaginary values with two halves of the real vector.\n 4. Multiplies the each coordinate of the complex vector with random\n rotations (i.e. apply `cos(x) + isin(x)` for x in the range [0, 2pi]).\n 5. Multiplies the resulting vector with the `d x d` DFT matrix.\n 6. Unpacks the complex vector back to a real vector with length `d`.\n 7. Normalize the vector by `1 / sqrt(d/2)`.\n The backward transform reverts the above steps at `tff.SERVER`.\n\n The side effects of this aggregator include:\n 1. The component tensors are flattened and padded with zeros. For example,\n if the factory receives a `value_type` of ,\n the inner factory will operate on .\n 2. The dtype of the component tensors are casted to floats.\n \"\"\"\n\n def _preprocess_tensor(self, x):\n \"\"\"Casts, reshapes, and pads to a float vector with even dimensions.\"\"\"\n return pad_zeros_even(tf.reshape(tf.cast(x, OUTPUT_TF_TYPE), [-1]))\n\n @tf.function\n def _forward_transform_vector(self, x, seed_pair, num_repeats):\n\n def _transform_fn(x, cur_seed_pair):\n split_x = tf.reshape(x, [2, -1])\n complex_x = tf.complex(real=split_x[0], imag=split_x[1])\n # Apply randomness.\n complex_x *= sample_cis(tf.shape(complex_x), cur_seed_pair)\n # Apply FFT as rotation.\n fft_x = tf.signal.fft(complex_x)\n rotated_x = tf.concat([tf.math.real(fft_x), tf.math.imag(fft_x)], axis=0)\n # Normalize by 1/sqrt(d/2) where `d` is the padded dim to an even number.\n return rotated_x / tf.cast(tf.sqrt(tf.size(rotated_x) / 2), tf.float32)\n\n for index in range(num_repeats):\n x = _transform_fn(x, seed_pair + index)\n return x\n\n @tf.function\n def _backward_transform_vector(self, x, seed_pair, num_repeats):\n\n def _transform_fn(x, cur_seed_pair):\n unnorm_x = x * tf.cast(tf.sqrt(tf.size(x) / 2), tf.float32)\n split_x = tf.reshape(unnorm_x, [2, -1])\n complex_x = tf.complex(real=split_x[0], imag=split_x[1])\n # Invert the FFT rotation.\n ifft_x = tf.signal.ifft(complex_x)\n # Undo randomness.\n ifft_x *= sample_cis(tf.shape(ifft_x), cur_seed_pair, inverse=True)\n return tf.concat([tf.math.real(ifft_x), tf.math.imag(ifft_x)], axis=0)\n\n for index in range(num_repeats - 1, -1, -1):\n x = _transform_fn(x, seed_pair + index)\n return x\n\n\n@computations.tf_computation()\ndef init_seed_pair():\n microseconds_per_second = 10**6 # Timestamp returns fractional seconds.\n timestamp_microseconds = tf.cast(tf.timestamp() * microseconds_per_second,\n SEED_TF_TYPE)\n return tf.convert_to_tensor([timestamp_microseconds, 0])\n\n\n@computations.tf_computation(init_seed_pair.type_signature.result)\ndef next_seed_pair(seed_pair):\n timestamp_microseconds, sequence_number = seed_pair[0], seed_pair[1]\n return tf.convert_to_tensor([timestamp_microseconds, sequence_number + 1])\n\n\ndef sample_rademacher(shape, dtype, seed_pair):\n \"\"\"Sample uniform random +1/-1 values with specified shape/dtype/seed_pair.\"\"\"\n rand_uniform = tf.random.stateless_uniform(shape=shape, seed=seed_pair)\n return tf.cast(tf.sign(rand_uniform - 0.5), dtype)\n\n\ndef sample_cis(shape, seed_pair, inverse=False):\n \"\"\"Sample e^(i * theta) for theta in the range [0, 2pi] as tf.complex64.\"\"\"\n # While it suffices to draw theta from [0, pi/2, pi, 3pi/2] (2 bits of\n # randomness) for each complex coordinate, sampling floating angles can avoid\n # the uniform integer sampler which may not be available on `tff.CLIENTS`.\n theta = tf.random.stateless_uniform(shape, seed_pair, minval=0, maxval=2)\n theta *= math.pi\n theta *= tf.cond(tf.cast(inverse, tf.bool), lambda: -1.0, lambda: 1.0)\n return tf.exp(tf.complex(real=0.0, imag=theta))\n\n\ndef pad_zeros_pow2(x):\n \"\"\"Pads a rank-1 tensor with zeros to the next power of two dimensions.\"\"\"\n size = tf.size(x)\n log2_size = tf.math.log(tf.cast(size, tf.float32)) / math.log(2.0)\n # NOTE: We perform `pow` in float32 to avoid the integer TF `pow` op which is\n # currently not available in the pruning graph. This can be avoided via\n # Grappler's constant folding optimizer, but it must be disabled due to\n # b/164455653. While float32 can only represent the nonnegative integer range\n # [0, 2^24] exactly, we only consider powers of 2 for padding and thus can\n # tolerate up to 2^30 with a cast to int32.\n pad_size = tf.cast(2.0**tf.math.ceil(log2_size), tf.int32)\n return pad_zeros(x, pad_size - size)\n\n\ndef pad_zeros_even(x):\n \"\"\"Pads a rank-1 tensor with zeros to the next even dimensions.\"\"\"\n num_zeros = tf.cast(tf.equal(tf.size(x) % 2, 1), tf.int32)\n return pad_zeros(x, num_zeros)\n\n\ndef pad_zeros(x, num_zeros):\n \"\"\"Pads a rank-1 tensor with shape (d,) with `num_zero` zeros.\"\"\"\n tf.debugging.assert_rank(x, 1, f'Expected rank-1 tensors, but found {x}.')\n return tf.pad(x, [[0, tf.maximum(0, num_zeros)]])\n\n\ndef _check_component_dtypes(value_type):\n \"\"\"Checks all components of the `value_type` to be either ints or floats.\"\"\"\n if not (type_analysis.is_structure_of_floats(value_type) or\n type_analysis.is_structure_of_integers(value_type)):\n raise TypeError('Component dtypes of `value_type` must all be integers or '\n f'floats. Found {value_type}.')\n","sub_path":"tensorflow_federated/python/aggregators/rotation.py","file_name":"rotation.py","file_ext":"py","file_size_in_byte":19035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"411872271","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nhtml = urlopen('http://www.globo.com')\nbsObj = BeautifulSoup(html.read(), 'html.parser')\n\n\nlista = bsObj.findAll('div')\nfor text in lista:\n pesquisa = str(text)\n print(\"*\" * 100)\n if pesquisa.find(\"Anitta\") == -1:\n print(\"No 'is' here!\")\n else:\n print(text.getText())","sub_path":"aula02-raspagem/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"370372469","text":"# -*- coding: utf-8 -*-\r\n\r\n__author__ = \"Cody Precord \"\r\n__svnid__ = \"$Id: calc.py 850 2009-05-01 00:24:27Z CodyPrecord $\"\r\n__revision__ = \"$Revision: 850 $\"\r\n\r\n#--------------------------------------------------------------------------#\r\n# Dependancies\r\nimport tkinter as tk\r\nfrom tkinter import ttk,messagebox,filedialog\r\nfrom noval import _,NewId\r\nimport noval.util.utils as utils\r\nimport noval.project.wizard as projectwizard\r\nfrom noval.project.baseconfig import *\r\nfrom noval.python.project.viewer import *\r\nfrom noval.python.project.model import *\r\nfrom noval.python.project.rundocument import *\r\nimport noval.consts as consts\r\nimport noval.imageutils as imageutils\r\nimport os\r\nimport noval.util.strutils as strutils\r\nimport noval.util.fileutils as fileutils\r\nimport noval.python.parser.utils as parserutils\r\nfrom noval.project.executor import *\r\nimport noval.terminal as terminal\r\nimport noval.misc as misc\r\nimport noval.ui_utils as ui_utils\r\nimport noval.ttkwidgets.treeviewframe as treeviewframe\r\nimport noval.toolbar as toolbar\r\nimport noval.ui_base as ui_base\r\nimport noval.python.project.runconfiguration as runconfiguration\r\nimport noval.project.command as command\r\nimport noval.python.pyutils as pyutils\r\nfrom pkg_resources import resource_filename\r\nimport datetime\r\nfrom shutil import which\r\nimport noval.python.interpreter.pythonpackages as pythonpackages\r\nimport noval.python.interpreter.interpretermanager as interpretermanager\r\nimport noval.project.variables as variablesutils\r\nimport threading\r\nimport time\r\nimport noval.util.urlutils as urlutils\r\nimport random\r\nimport noval.python.project.runconfig as runconfig\r\n \r\ndef GetInterpreterScriptPath(interpreter,is_user_site=False):\r\n if is_user_site:\r\n interpreter_path = os.path.dirname(interpreter.GetUserLibPath())\r\n else:\r\n interpreter_path = interpreter.InstallPath\r\n return os.path.join(interpreter_path,\"Scripts\")\r\n \r\ndef GetToolPath(interpreter,name,is_user_site=False):\r\n if utils.is_windows():\r\n return os.path.join(GetInterpreterScriptPath(interpreter,is_user_site),name + \".exe\")\r\n return which(name)\r\n \r\n\r\ndef GetDjangoToolPath(interpreter):\r\n django_tool_path = GetToolPath(interpreter,\"django-admin\")\r\n if not os.path.exists(django_tool_path):\r\n django_tool_path = GetToolPath(interpreter,\"django-admin\",is_user_site=True)\r\n if not os.path.exists(django_tool_path): \r\n raise RuntimeError(_(\"interpreter %s need to install package \\\"django\\\"\")%interpreter.Name)\r\n return django_tool_path\r\n\r\n\r\ndef CheckDjango(interpreter,parent=None):\r\n try:\r\n GetDjangoToolPath(interpreter)\r\n except RuntimeError as e:\r\n messagebox.showinfo(GetApp().GetAppName(),str(e),parent=parent)\r\n dlg = pythonpackages.InstallPackagesDialog(parent,interpreter,pkg_name='django',install_args='--user django',autorun=True)\r\n status = dlg.ShowModal()\r\n if status == constants.ID_CANCEL:\r\n return False\r\n return True\r\n\r\nclass DjangoProject(PythonProject):\r\n def __init__(self):\r\n super(DjangoProject,self).__init__()\r\n self._runinfo.DocumentTemplate = \"djangokit.django.DjangoProjectTemplate\"\r\n\r\nclass DjangoProjectDocument(PythonProjectDocument):\r\n\r\n def __init__(self, model=None):\r\n PythonProjectDocument.__init__(self,model)\r\n \r\n @staticmethod\r\n def GetProjectModel():\r\n return DjangoProject()\r\n\r\n def CheckIsbuiltinInterpreter(self,run_parameter):\r\n if run_parameter.Interpreter.IsBuiltIn:\r\n raise RuntimeError(_('Builtin Interpreter is not support to run django project'))\r\n\r\n def NewRunConfiguration(self,main_module_file,configuration_name,build_args,interpreter_name,file_configuration_list=[]):\r\n file_configuration = runconfiguration.FileConfiguration(self,main_module_file)\r\n file_configuration_list.append(configuration_name)\r\n pj_file_key = file_configuration.GetRootKeyPath()\r\n #update file configuration list\r\n utils.profile_set(pj_file_key + \"/ConfigurationList\",file_configuration_list)\r\n args = {\r\n runconfiguration.StartupConfiguration.CONFIGURATION_NAME:runconfiguration.StartupConfiguration(self,main_module_file, 0, ''),\r\n runconfiguration.AugumentsConfiguration.CONFIGURATION_NAME:runconfiguration.AugumentsConfiguration(self,main_module_file,'',build_args),\r\n runconfiguration.InterpreterConfiguration.CONFIGURATION_NAME:runconfiguration.InterpreterConfiguration(self,main_module_file,interpreter_name),\r\n runconfiguration.EnvironmentConfiguration.CONFIGURATION_NAME:runconfiguration.EnvironmentConfiguration(self,main_module_file,{}),\r\n }\r\n \r\n run_configuration = runconfiguration.RunConfiguration(configuration_name,**args)\r\n run_configuration.SaveConfiguration()\r\n \r\n def SaveDebugRunConfiguration(self,debug_argument='runserver 127.0.0.1:${SERVER_PORT} --noreload',run_arguments='runserver 0.0.0.0:8000'):\r\n configuration_list = []\r\n self.NewRunConfiguration(self.GetModel().StartupFile,\"run_web_server\",run_arguments,self.GetModel().interpreter.name,configuration_list)\r\n self.NewRunConfiguration(self.GetModel().StartupFile,\"debug_web_server\",debug_argument,self.GetModel().interpreter.name,configuration_list)\r\n startup_dir = os.path.dirname(self.GetModel().StartupFile.filePath)\r\n prefix = startup_dir.replace(self.GetPath(),\"\").lstrip(os.sep)\r\n if prefix != \"\":\r\n prefix = prefix + \"|\"\r\n self.SaveRunConfiguration(configuration_list,prefix=prefix)\r\n \r\n def SaveRunConfiguration(self,file_configuration_list,prefix=\"\"):\r\n configuration_list = [prefix + \"manage.py/\" + configuration_name for configuration_name in file_configuration_list]\r\n utils.profile_set(self.GetKey() + \"/ConfigurationList\",configuration_list)\r\n utils.profile_set(self.GetKey() + \"/RunConfigurationName\",configuration_list[-1])\r\n\r\n def Debug(self):\r\n self.DebugWeb()\r\n PythonProjectDocument.Debug(self)\r\n \r\n def RunWithoutDebug(self,filetoRun=None):\r\n self.DebugWeb()\r\n PythonProjectDocument.RunWithoutDebug(self)\r\n \r\n def DebugWeb(self,break_debug=False):\r\n available_port = random.randint(40000,60000)\r\n variablesutils.GetProjectVariableManager().AddVariable('SERVER_PORT',available_port,replace_exist=True)\r\n threading.Thread(target=self.StartWeb,args=(available_port,break_debug),daemon=True).start()\r\n \r\n def BreakintoDebugger(self,filetoRun=None):\r\n self.DebugWeb(break_debug=True)\r\n PythonProjectDocument.BreakintoDebugger(self,filetoRun)\r\n \r\n def StartWeb(self,available_port,break_debug):\r\n st = time.time()\r\n while True:\r\n end = time.time()\r\n if end - st > 60 and not break_debug:\r\n break\r\n time.sleep(0.5)\r\n url_addr = \"http://127.0.0.1:%d\"%available_port\r\n if urlutils.RequestData(url_addr,to_json=False) is None:\r\n print ('web url',url_addr,'is not available.....')\r\n continue\r\n fileutils.startfile(url_addr)\r\n break\r\n \r\n def GetRunConfiguration(self,run_file=None,is_debug=False):\r\n project_configuration = runconfiguration.ProjectConfiguration(self)\r\n configuration_name_list = project_configuration.LoadConfigurationNames()\r\n if is_debug:\r\n return configuration_name_list[1]\r\n else:\r\n return configuration_name_list[0]\r\n \r\n\r\n def NewApp(self,app_name):\r\n interpreter = self.GetandSetProjectDocInterpreter()\r\n if not interpreter:\r\n return\r\n startup_file = self.GetandSetProjectStartupfile()\r\n work_dir = self.GetPath()\r\n p = utils.create_process(interpreter.Path,'%s startapp %s'%(startup_file.filePath,app_name),cwd=work_dir)\r\n p.wait()\r\n app_path = os.path.join(work_dir,app_name)\r\n view_path = os.path.join(app_path,'views.py')\r\n models_path = os.path.join(app_path,'models.py')\r\n apps_path = os.path.join(app_path,'apps.py')\r\n admin_path = os.path.join(app_path,'admin.py')\r\n test_path = os.path.join(app_path,'tests.py')\r\n init_path = os.path.join(app_path,'__init__.py')\r\n self.GetCommandProcessor().Submit(command.ProjectAddFilesCommand(self,[view_path,models_path,apps_path,admin_path,test_path,init_path],app_name))\r\n \r\n\r\n def RunIndebugger(self):\r\n django_run_parameter = self.GetDjangoRunconfig(is_debug=True)\r\n self.DebugIndebugger(django_run_parameter)\r\n\r\n def RunInterminal(self,filetoRun=None):\r\n django_run_parameter = self.GetDjangoRunconfig()\r\n self.Runterminal(django_run_parameter)\r\n \r\n def Runterminal(self,run_parameter):\r\n self.CheckIsbuiltinInterpreter(run_parameter)\r\n self.RunScript(run_parameter)\r\n\r\n def DebugIndebugger(self,run_parameter):\r\n self.CheckIsbuiltinInterpreter(run_parameter)\r\n self.DebugRunScript(run_parameter)\r\n \r\n def GetDjangoRunconfig(self,is_debug=False):\r\n interpreter = self.GetandSetProjectDocInterpreter()\r\n self.GetandSetProjectStartupfile()\r\n startup_file = self.GetModel().StartupFile\r\n port = utils.profile_get_int(self.GetFileKey(startup_file) + \"/WebDefaultPort\",8000)\r\n if not is_debug:\r\n arg = \"runserver 0.0.0.0:%d\"%port\r\n else:\r\n arg = \"runserver 127.0.0.1:%d --noreload\"%port\r\n return runconfig.PythonRunconfig(interpreter,startup_file.filePath,arg,project=self)\r\n \r\nclass DjangoProjectTemplate(PythonProjectTemplate):\r\n \r\n @staticmethod\r\n def CreateProjectTemplate():\r\n projectTemplate = DjangoProjectTemplate(GetApp().GetDocumentManager(),\r\n _(\"Project File\"),\r\n \"*%s\" % consts.PROJECT_EXTENSION,\r\n os.getcwd(),\r\n consts.PROJECT_EXTENSION,\r\n \"DjangoProject Document\",\r\n _(\"DjangoProject Viewer\"),\r\n DjangoProjectDocument,\r\n PythonProjectView,\r\n icon = imageutils.getProjectIcon())\r\n GetApp().GetDocumentManager().DisassociateTemplate(projectTemplate)\r\n return projectTemplate\r\n \r\n def GetPropertiPages(self):\r\n return PythonProjectTemplate.GetPropertiPages(self) + [(\"Django option\",\"root\",\"djangokit.django.DjangoInformationPanel\")]\r\n\r\n\r\nclass DjangoProjectNameLocationPage(BasePythonProjectNameLocationPage):\r\n\r\n def __init__(self,master,**kwargs):\r\n BasePythonProjectNameLocationPage.__init__(self,master,**kwargs)\r\n self.can_finish = False\r\n\r\n def GetProjectTemplate(self):\r\n return DjangoProjectTemplate.CreateProjectTemplate()\r\n \r\n def SaveProject(self,path):\r\n return True\r\n \r\n def SaveDjangoProject(self,path):\r\n return BasePythonProjectNameLocationPage.SaveProject(self,path)\r\n \r\n\r\nclass DjangoInformationPanel(pyutils.PythonBaseConfigurationPanel):\r\n \r\n def __init__(self,parent,item,current_project,**kwargs):\r\n pyutils.PythonBaseConfigurationPanel.__init__(self,parent,current_project)\r\n self.columnconfigure(0, weight=1)\r\n self.current_project = current_project\r\n self.item = item\r\n self.is_wizard = kwargs.get('is_wizard',False)\r\n sizer_frame = ttk.Frame(self)\r\n sizer_frame.grid(column=0, row=1, sticky=\"nsew\")\r\n \r\n sizer_frame.columnconfigure(1, weight=1)\r\n if self.is_wizard:\r\n ttk.Label(sizer_frame,text=_('Default app:')).grid(column=0, row=0, sticky=\"nsew\",padx=(0,consts.DEFAUT_CONTRL_PAD_X),pady=(consts.DEFAUT_CONTRL_PAD_Y,0))\r\n self.default_app_var = tk.StringVar()\r\n self.app_entry = ttk.Entry(sizer_frame,textvariable=self.default_app_var)\r\n self.app_entry.grid(column=1, row=0, sticky=\"nsew\",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(0,consts.DEFAUT_CONTRL_PAD_X))\r\n row_index = 1\r\n else:\r\n row_index = 0\r\n ttk.Label(sizer_frame,text=_('Web Default Port:')).grid(column=0, row=row_index, sticky=\"nsew\",padx=(0,consts.DEFAUT_CONTRL_PAD_X),pady=(consts.DEFAUT_CONTRL_PAD_Y,0))\r\n self.port_var = tk.IntVar(value=8000)\r\n \r\n #验证端口文本控件输入是否合法,端口只能输入数字\r\n validate_cmd = self.register(self.validatePortInput)\r\n self.port_entry = ttk.Entry(sizer_frame,validate = 'key', validatecommand = (validate_cmd, '%P'),textvariable=self.port_var)\r\n self.port_entry.grid(column=1, row=row_index, sticky=\"nsew\",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(0,consts.DEFAUT_CONTRL_PAD_X))\r\n \r\n row_index += 1\r\n ttk.Label(sizer_frame,text=_('Debug server arguments:')).grid(column=0, row=row_index, sticky=\"nsew\",padx=(0,consts.DEFAUT_CONTRL_PAD_X),pady=(consts.DEFAUT_CONTRL_PAD_Y,0))\r\n self.debug_arguments_var = tk.StringVar(value='runserver 127.0.0.1:${SERVER_PORT} --noreload')\r\n self.debug_arguments_entry = ttk.Entry(sizer_frame,textvariable=self.debug_arguments_var)\r\n self.debug_arguments_entry.grid(column=1, row=row_index, sticky=\"nsew\",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(0,consts.DEFAUT_CONTRL_PAD_X))\r\n \r\n row_index += 1\r\n ttk.Label(sizer_frame,text=_('Run server arguments:')).grid(column=0, row=row_index, sticky=\"nsew\",padx=(0,consts.DEFAUT_CONTRL_PAD_X),pady=(consts.DEFAUT_CONTRL_PAD_Y,0))\r\n self.run_arguments_var = tk.StringVar(value='runserver 0.0.0.0:8000')\r\n self.run_arguments_entry = ttk.Entry(sizer_frame,textvariable=self.run_arguments_var)\r\n self.run_arguments_entry.grid(column=1, row=row_index, sticky=\"nsew\",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(0,consts.DEFAUT_CONTRL_PAD_X))\r\n \r\n def validatePortInput(self,contents):\r\n if not contents.isdigit():\r\n self.port_entry.bell()\r\n return False\r\n return True\r\n \r\n \r\n def OnOK(self,optionsDialog=None):\r\n if self.port_var.get() >= 1 and self.port_var.get() <= 65535:\r\n doc = self.GetCurrentProject()\r\n startup_file = doc.GetModel().StartupFile\r\n doc.SaveDebugRunConfiguration(self.debug_arguments_var.get(),self.run_arguments_var.get())\r\n utils.profile_set(doc.GetFileKey(startup_file) + \"/WebDefaultPort\",self.port_var.get())\r\n return True\r\n messagebox.showerror(_('Error'),_('invalid port'))\r\n return False\r\n \r\n def GetCurrentProject(self):\r\n if self.current_project is None:\r\n prev_page = self.master.master.GetPrev()\r\n self.current_project = prev_page.new_project_doc\r\n return self.current_project\r\n\r\n\r\nclass DjangoInformationPage(projectwizard.BitmapTitledContainerWizardPage):\r\n \"\"\"Creates the calculators interface\r\n @todo: Dissable << and >> when floating values are present\r\n @todo: When integer values overflow display convert to scientific notation\r\n @todo: Keybindings to numpad and enter key\r\n\r\n \"\"\"\r\n def __init__(self, parent,**kwargs):\r\n \"\"\"Initialiases the calculators main interface\"\"\"\r\n projectwizard.BitmapTitledContainerWizardPage.__init__(self, parent,(\"Django Project Wizard\"),_(\"Django Project Information\\nPlease Set Information of Django Project\"),\"python_logo.png\")\r\n self.can_finish = True\r\n \r\n def CreateContent(self,content_frame,**kwargs):\r\n self.information_panel = DjangoInformationPanel(content_frame,None,None,**{'is_wizard':True})\r\n self.information_panel.grid(column=0, row=1, sticky=\"nsew\")\r\n\r\n def Finish(self):\r\n interpreter = self.GetInterpreter()\r\n if not CheckDjango(interpreter,parent=self) or interpreter is None:\r\n return False\r\n \r\n\r\n \r\n django_tool_path = GetDjangoToolPath(interpreter)\r\n projName = self.GetPrev().name_var.get().strip()\r\n args = \"startproject %s\"%projName\r\n work_dir = self.GetWorkDir()\r\n p = utils.create_process(django_tool_path,args,cwd=work_dir)\r\n p.wait()\r\n \r\n project_path = self.GetProjectPath()\r\n fullProjectPath = os.path.join(project_path, strutils.MakeNameEndInExtension(projName, consts.PROJECT_EXTENSION))\r\n if not self.GetPrev().SaveDjangoProject(fullProjectPath):\r\n return False\r\n \r\n startup_path = os.path.join(project_path,'manage.py')\r\n app_path = os.path.join(project_path,projName)\r\n settings_path = os.path.join(app_path,'settings.py')\r\n urls_path = os.path.join(app_path,'urls.py')\r\n wsgi_path = os.path.join(app_path,'wsgi.py')\r\n init_path = os.path.join(app_path,'__init__.py')\r\n view = GetApp().MainFrame.GetProjectView().GetView()\r\n doc = view.GetDocument()\r\n doc.GetCommandProcessor().Submit(command.ProjectAddFilesCommand(doc,[urls_path,settings_path,wsgi_path,init_path],projName))\r\n doc.GetCommandProcessor().Submit(command.ProjectAddFilesCommand(doc,[startup_path],None))\r\n view.SetProjectStartupFile()\r\n \r\n if not self.information_panel.OnOK():\r\n return False\r\n default_app = self.information_panel.default_app_var.get().strip()\r\n if default_app != \"\":\r\n doc.NewApp(default_app)\r\n return True\r\n\r\n def GetInterpreter(self):\r\n prev_page = self.GetPrev()\r\n interpreter_name = prev_page.GetNewPojectConfiguration().Interpreter\r\n interpreter = interpretermanager.InterpreterManager().GetInterpreterByName(interpreter_name)\r\n return interpreter\r\n\r\n def GetProjectPath(self):\r\n prev_page = self.GetPrev()\r\n project_path = prev_page.GetProjectLocation()\r\n return os.path.join(project_path,prev_page.name_var.get().strip())\r\n \r\n\r\n def GetWorkDir(self):\r\n prev_page = self.GetPrev()\r\n project_path = prev_page.GetProjectLocation()\r\n return project_path\r\n","sub_path":"plugins/Django/djangokit/django.py","file_name":"django.py","file_ext":"py","file_size_in_byte":18189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"162093022","text":"\"\"\"@package docstring\nSegment the image based on the cell counter grid \n\nTODO: how to pass roi's and imageQuality? via getter or (result) signal?\nNo median blurring, but bilateral?\n\"\"\"\n#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom PySide2.QtCore import *\nimport cv2\nimport inspect\nimport traceback\nfrom lib.manipulator import Manipulator\nimport matplotlib.pyplot as plt\n\n## @author Jeroen Veen\nclass ImageSegmenter(Manipulator):\n \"\"\"Image segmenter\n \\param image\n \\return image\n \"\"\"\n \n def __init__(self, *args, **kwargs):\n \"\"\"The constructor.\"\"\"\n super().__init__(\"image segmenter\")\n\n # ksize - Median Blur aperture linear size;\n # must be odd and greater than 1, for example: 3, 5, 7 ...\n self.ksize = kwargs['ksize'] if 'ksize' in kwargs else 0\n\n # Findgrid parameter as a fraction of the image size\n self.sizeFrac = kwargs['sizeFrac'] if 'sizeFrac' in kwargs else 0.005\n\n # Plotting\n self.plot = kwargs['plot'] if 'plot' in kwargs else False\n\n # Debug plot\n self.debugPlot = kwargs['debugPlot'] if 'debugPlot' in kwargs else False\n\n if self.debugPlot:\n self.fig, (self.ax1, self.ax2) = plt.subplots(2,1)\n self.graph1 = None\n self.graph2 = None\n self.ax1.grid(True)\n self.ax2.grid(True)\n plt.show(block=False)\n \n def __del__(self):\n \"\"\"The deconstructor.\"\"\"\n None \n \n def start(self, Image):\n \"\"\"Image processing function.\"\"\" \n try:\n self.startTimer() \n self.image = Image\n self.ROIs = None\n self.imageQuality = None\n\n # Blur the image, beware this is very slow\n self.image = self.image if self.ksize < 1 else cv2.medianBlur(self.image, self.ksize)\n\n # Find grid pattern along row and column averages\n row_av = cv2.reduce(self.image, 0, cv2.REDUCE_AVG, dtype=cv2.CV_32S).flatten('F')\n row_seg_list, row_mask, smooth_row_av = find1DGrid(row_av, int(self.sizeFrac*row_av.size))\n col_av = cv2.reduce(self.image, 1, cv2.REDUCE_AVG, dtype=cv2.CV_32S).flatten('F')\n col_seg_list, col_mask, smooth_col_av = find1DGrid(col_av, int(self.sizeFrac*col_av.size))\n\n # Create ROI list and annotate image\n list_width = len(row_seg_list)\n list_length = len(col_seg_list)\n self.ROIs = np.zeros([list_width*list_length,4], dtype=np.uint16)\n self.ROI_total_area = 0\n for i, x in enumerate(row_seg_list):\n for j, y in enumerate(col_seg_list):\n # ROI: (left,top,width,height)\n self.ROIs[i+j*list_width] = [x[0],y[0],x[1],y[1]]\n cv2.rectangle(self.image, (x[0],y[0]), (x[0]+x[1],y[0]+y[1]), (0, 255, 0), 2)\n self.ROI_total_area += x[1]*y[1]\n\n # Compute metrics from grid pattern\n # Rationale: parameterize edge histogram by variance to amplitude (0-bin) ratio \n col_stuff = np.diff(smooth_col_av[~col_mask]) # slice masked areas\n col_stuff = col_stuff[25:-25] # slice edge effects\n row_stuff = np.diff(smooth_row_av[~row_mask]) # slice masked areas\n row_stuff = row_stuff[25:-25] # slice edge effects\n self.imageQuality = np.sqrt( np.var(col_stuff) # / col_stuff[np.abs(col_stuff) < .5].size\n + np.var(row_stuff) ) # / row_stuff[np.abs(row_stuff) < .5].size )\n # Rationale: sharp edges result in ROI increase\n self.imageQuality *= (self.ROI_total_area/np.prod(self.image.shape[0:2])) \n \n # Plot curves\n if self.debugPlot:\n col_hist, bin_edges = np.histogram(col_stuff, bins=np.arange(-5,5,.1), density=True)\n \n # Draw grid lines\n self.ax1.clear()\n self.graph1 = self.ax1.plot(row_stuff)[0] # (col_hist)[0]\n self.ax2.clear()\n self.graph2 = self.ax2.plot(col_stuff)[0] # smooth_col_av)[0]\n\n # We need to draw *and* flush\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\n### This way of plotting is probably faster, but right now can't get it to work with clearing as well \n## if (self.graph1 is None):\n## self.graph1 = self.ax1.plot(smooth_row_av)[0]\n## self.graph2 = self.ax2.plot(smooth_col_av)[0]\n## else: \n## self.graph1.set_image(np.arange(smooth_row_av.shape[1]), smooth_row_av)\n## self.graph2.set_image(np.arange(smooth_col_av.shape[1]), smooth_col_av)\n## # Need both of these in order to rescale\n## self.ax1.relim()\n## self.ax1.autoscale_view()\n## self.ax2.relim()\n## self.ax2.autoscale_view()\n\n # Finalize\n self.stopTimer()\n self.signals.finished.emit() \n\n except Exception as err:\n exc = traceback.format_exception(type(err), err, err.__traceback__, chain=False)\n self.signals.error.emit(exc)\n self.signals.message.emit('E: {} exception: {}'.format(self.name, err))\n\n return self.image\n\ndef moving_average(x, N=5):\n if N > 1 and (N & 1) == 1:\n x = np.pad(x, pad_width=(N // 2, N // 2),\n mode='constant') # Assuming N is odd\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)\n else:\n raise ValueError(\"Moving average size must be odd and greater than 1.\")\n\n\ndef find1DGrid(data, N): \n if N <= 1:\n raise ValueError('findGrid parameter <= 1')\n if (N & 1) != 1: # enforce N to be odd\n N += 1\n gridSmoothKsize = N\n gridMinSegmentLength = 10*N\n \n # High-pass filter, to suppress uneven illumination\n data = np.abs(data - moving_average(data, int(3*N)))\n data[:N] = 0 # cut off MA artifacts\n data[-N:] = 0 # cut off MA artifacts, why not -(N-1)/2?? ??\n smooth_data = moving_average(data, gridSmoothKsize)\n smooth_data = smooth_data - np.mean(smooth_data)\n mask_data = np.zeros(data.shape, dtype='bool') # mask grid lines\n mask_data[np.where(smooth_data < 0)[0]] = True\n \n # Now filter mask_data based on segment length and suppress too short segments\n prev_x = False\n segmentLength = 0\n segmentList = []\n for index, x in enumerate(mask_data):\n if x: # segment\n segmentLength += 1\n elif x != prev_x: # falling edge\n if segmentLength < gridMinSegmentLength: # suppress short segments\n mask_data[index - segmentLength: index] = False\n # print(diff(data[index - segmentLength:index]))\n else:\n segmentList.append((index - segmentLength, segmentLength)) # Save segment start and length\n segmentLength = 0 # reset counter\n prev_x = x\n\n return (segmentList, mask_data, smooth_data)\n\n\n \n","sub_path":"lib/imageSegmenter.py","file_name":"imageSegmenter.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"378875529","text":"# Laryan Guaico Valdés\n# Regresión Logística\n\n\nimport warnings\nfrom sklearn import datasets\nfrom matplotlib.colors import ListedColormap\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import LogisticRegression\n\n\n# se carga la base de datos iris a la variable\niris = datasets.load_iris()\n# asignamos variables de entrada y salida\nX = iris.data[:, [2, 3]]\ny = iris.target\n\n# variables globales\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.35, random_state=0)\nX_train_std = []\nX_test_std = []\n\n# Funcion que imprime los 5 primeros datos del dataset\ndef imprime1():\n print('----Pétalo----')\n print('[Longitud | Ancho]')\n for i in range(0, 5):\n print(iris.data[i, [2, 3]])\n\n\nimprime1()\n\n\ndef imprime2():\n\n global X_train, X_test, y_train, y_test\n global X_train_std, X_test_std\n # Funcion que imprime los 5 primeros datos con metodo HOLD OUT usando 65% de entrenamiento y resto de testeo\n\n sc = StandardScaler()\n sc.fit(X_train)\n X_train_std = sc.transform(X_train)\n X_test_std = sc.transform(X_test)\n print('\\nTrain Data')\n for i in range(1, 6):\n print(X_train_std[i])\n\n\nimprime2()\n\n\n# funcion que define funcion sigmoide\ndef sigmoid(z):\n return 1.0 / (1.0 + np.exp(-z))\n\n\n# funcion que define variable y plotea funcion sigmoide\ndef go_sigmoid():\n z = np.arange(-7, 7, 0.1)\n phi_z = sigmoid(z)\n plt.plot(z, phi_z)\n plt.axvline(0.0, color='k')\n plt.axhspan(0.0, 1.0, facecolor='1.0', alpha=1.0, ls='dotted')\n plt.axhline(y=0.5, ls='dotted', color='k')\n plt.yticks([0.0, 0.5, 1.0])\n plt.ylim(-0.1, 1.1)\n plt.xlabel('z')\n plt.ylabel('$\\phi (z)$')\n plt.show()\n\n\ngo_sigmoid()\n\n\n# definicion de variables para entrenar un modelo de percepcion,\n# que sera utilizado como clasificador para trazar las regiones\n# de decicion y ver que tan bien separa las diferentes muestras\n#\nppn = Perceptron(n_iter=40, eta0=0.1, random_state=0)\nppn.fit(X_train_std, y_train)\n# hacemos la prediccion\ny_pred = ppn.predict(X_test_std)\n\n\ndef versiontuple(v):\n return tuple(map(int, (v.split(\".\"))))\n\n\n# funcion que muestra las regiones de decision del modelo percepción anterior\ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n\n markers = ('s', 'x', 'o', '^', 'v',)\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 0].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n \n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],\n alpha=0.8, c=cmap(idx),\n marker=markers[idx], label=cl)\n\n if test_idx:\n # plot all samples\n if not versiontuple(np.__version__) >= versiontuple('1.9.0'):\n X_test, y_test = X[list(test_idx), :], y[list(test_idx)]\n warnings.warn('Please update to NumPy 1.9.0 or newer')\n else:\n X_test, y_test = X[test_idx, :], y[test_idx]\n\n plt.scatter(X_test[:, 0],\n X_test[:, 1],\n c='',\n alpha=1.0,\n linewidths=1,\n marker='o',\n s=55, label='test set')\n\n\n# usando el clasificador de percepcion se mostrara la trama\n# resultante de las tres flores, observando sus limites de decision lineal\n\nX_combined_std = np.vstack((X_train_std, X_test_std))\ny_combined = np.hstack((y_train, y_test))\nplot_decision_regions(X=X_combined_std, y=y_combined, classifier=ppn, test_idx=range(105, 150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.show()\n\n\n# usando el clasificador en funcion de la REGRESION LINEAL se mostrara la\n# trama resultante de las tres flores, observando sus limites de decision lineal\n\nlr = LogisticRegression(C=1000.0, random_state=0)\nlr.fit(X_train_std, y_train)\nplot_decision_regions(X_combined_std, y_combined, classifier=lr, test_idx=range(105, 150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.show()\n\n\n# regularizacion, es introducir informacion adicional para visualizar los pesos de parametros extremos\n# Se aplican diverentes valores para C inversa, mostrando los coeficientes de peso, en donde estos se reducen\n# si disminuimos el parametro C, y viseversa si aumentamos el parametro C el coeficiente de peso aumentaran\n# demostrando que C esta directamente relacionado con el parametro de regularizacion inverso: 1/lambda\n\ndef regularizacion():\n weights, params = [], []\n for c in np.arange(-5, 5, dtype=float):\n lr = LogisticRegression(C=10**c, random_state=0)\n lr.fit(X_train_std, y_train)\n weights.append(lr.coef_[1])\n params.append(10**c)\n weights = np.array(weights)\n plt.plot(params, weights[:, 0], label='petal length')\n plt.plot(params, weights[:, 1], linestyle='--', label='petal width')\n plt.ylabel('weight coefficient')\n plt.xlabel('C')\n plt.legend(loc='upper left')\n plt.xscale('log')\n plt.show()\n\n\nregularizacion()\n","sub_path":"Proyecto - Regresión_Logística/RegresionLogistica.py","file_name":"RegresionLogistica.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"606220039","text":"# -*- coding: utf-8 -*-\n\n#OpenCVのインポート\nimport cv2\n\nimport numpy as np\n\n#開く画像ファイル名\nfname=\"ends.jpeg\"\n\n#二値化閾値\nthreshold=127\n\n#画像を読み出しオブジェクトimg_colorに代入\nimg_color= cv2.imread(fname)\n\n#画像をグレースケールで読み出しオブジェクトimg_grayに代入\nimg_gray = cv2.imread(fname,cv2.IMREAD_GRAYSCALE)\n\n#img_grayを平均化領域16x16で平均化処理しimg_blurに代入\nimg_blur = cv2.blur(img_gray,(16,16))\n\n#オブジェクトimg_blurを閾値threshold(127)で二値化しimg_binaryに代入\nret, img_binary= cv2.threshold(img_blur, threshold, 255, cv2.THRESH_BINARY)\n\n#img_binaryを輪郭抽出\ncontours, hierarchy = cv2.findContours(img_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n\n#抽出した輪郭を赤色でimg_colorに重ね書き\ncv2.drawContours(img_color, contours, -1, (0,0,255), 2)\n\n#抽出した輪郭の個数を表示する\nl = len(contours)\nprint(l)\n\n#抽出した輪郭の面積\nAreas = []\nfor i in contours:\n area = cv2.contourArea(i)\n Areas.append(area)\n\nprint(Areas)\n\nm = np.mean(Areas)\ns = np.std(Areas)\nucl = m + s\nlcl = m - s\n\ndiff = 0\nfor j in range(0, l):\n if Areas[j] < lcl:\n diff -= 1\n print([Areas[j], -1])\n elif Areas[j] >= ucl:\n diff += 1\n print([Areas[j], 1])\n else:\n print(Areas[j])\n\nl += diff\n\nprint(l)\n\n#別ウィンドウを開き(ウィンドウ名 \"contours\")オブジェクトimg_colorを表示\ncv2.imshow(\"contours\",img_color)\n\n#キー入力待ち\ncv2.waitKey(0)\n#ウインドウを閉じる\ncv2.destroyAllWindows()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"623969856","text":"\"\"\"Training Base Generator.\"\"\"\n\nimport networkx as nx\nfrom networkx.readwrite import json_graph as js\nimport random as rdm\nfrom progress_bar import InitBar as ib\nimport math\nimport numpy as np\nimport minerminor.mm_utils as mmu\nimport minerminor.mm_draw as mmd\nimport planarity as pl\n\ndef choice_first_node(graph):\n \"\"\"Pick up a node which can form a cycle and a list for the seconde one.\"\"\"\n node = rdm.choice(graph.nodes())\n possible_nodes_iter = nx.non_neighbors(graph, node)\n possible_nodes = []\n for i in possible_nodes_iter:\n possible_nodes.append(i)\n\n if possible_nodes:\n return node, possible_nodes\n else:\n return choice_first_node(graph)\n\n\ndef tree_to_1tree(graph):\n # A update avec la fonction nx.non_edge() ...\n \"\"\"Transform a tree to 1-tree.\"\"\"\n first_node, possible_nodes = choice_first_node(graph)\n seconde_node = rdm.choice(possible_nodes)\n graph.add_edge(first_node, seconde_node)\n\n return graph\n\n\ndef certf_tw2(graph):\n \"\"\"Certif for TW2 production.\"\"\"\n finish = True\n G = nx.Graph(graph)\n while finish:\n currt_node = None\n currt_node_sec = None\n dic_degree = nx.degree(G)\n\n if 1 in dic_degree.values():\n currt_node = rdm.choice([i for i in dic_degree if dic_degree[i] == 1])\n currt_node_sec = rdm.choice(list(nx.all_neighbors(G, currt_node)))\n\n elif 2 in dic_degree.values():\n currt_node = rdm.choice([i for i in dic_degree if dic_degree[i] == 2])\n currt_node_sec = rdm.choice(list(nx.all_neighbors(G, currt_node)))\n else:\n finish = False\n\n if finish:\n G_ = nx.contracted_edge(G, (currt_node, currt_node_sec), self_loops=False)\n G = nx.Graph(G_)\n\n return True if len(G) == 1 else False\n\n\n\ndef learning_base_T21T_generation(rank):\n \"\"\"Generate T21T learning base with nonisomorphic tree for a given rank.\"\"\"\n data_set = nx.nonisomorphic_trees(rank, create=\"graph\")\n tree_set, cycle_set = [], []\n pbar = ib()\n total = nx.number_of_nonisomorphic_trees(rank)\n for count, i in enumerate(data_set):\n pbar(count/total * 100)\n tree_set.append(js.node_link_data(i))\n cycle_set.append(js.node_link_data(tree_to_1tree(i)))\n\n return [tree_set, cycle_set]\n\n\ndef pTree_generation(tree, arr_ptree_rank):\n \"\"\"From tree make P_tree.\"\"\"\n # We check the number of available edge to restrict max_cycle.\n edges_set = []\n max_ptree_rank = max(arr_ptree_rank) + 1\n for edge in nx.non_edges(tree):\n edges_set.append(edge)\n if len(edges_set) < max_ptree_rank:\n max_ptree_rank = len(edges_set)\n # We create a set of pTree with a tree in seed.\n ptree_set = []\n current_graph = tree\n for i in range(max_ptree_rank):\n if i in arr_ptree_rank:\n ptree_set.append(nx.Graph(current_graph))\n current_graph = tree_to_1tree(current_graph)\n\n return ptree_set\n\n\ndef pTree_basic_cycle_generation(nb_nodes, arr_ptree_rank, depth_base):\n \"\"\"P-Tree Learning base generator with Euler lem.\"\"\"\n learning_base = []\n for p_rank in arr_ptree_rank:\n ptree_class = []\n # nb_edges = nb_nodes + p_rank - 1\n pbar = ib()\n print(\"Construction de la classe {0}-Tree : {1} à construire\".format(\n p_rank, depth_base))\n tree_set = nx.nonisomorphic_trees(nb_nodes)\n while len(ptree_class) < depth_base:\n try:\n graph = next(tree_set)\n except StopIteration:\n tree_set = nx.nonisomorphic_trees(nb_nodes)\n graph = next(tree_set)\n\n for i in range(p_rank):\n non_edges = [[x, y] for x, y in nx.non_edges(graph)]\n x, y = rdm.choice(non_edges)\n graph.add_edge(x, y)\n\n if not mmu.robust_iso(graph, ptree_class):\n pbar(len(ptree_class)/depth_base*100)\n ptree_class.append(graph)\n\n learning_base.append(ptree_class)\n\n return learning_base\n\n\ndef learning_base_pTree_generation(nb_nodes, arr_ptree_rank, feature_size):\n \"\"\"Generate PTree classes with nonisomorphic tree for a given rank.\"\"\"\n limit_edges = int((math.pow(nb_nodes, 2)-3*nb_nodes+2)/2)\n max_ptree_rank = max(arr_ptree_rank)\n if limit_edges < max_ptree_rank:\n print(\"Carefully the limitation of basic cycle is {}\".format(\n limit_edges))\n max_ptree_rank = limit_edges\n learning_base = [[] for i in arr_ptree_rank]\n tree_itr = nx.nonisomorphic_trees(nb_nodes, create=\"graph\")\n pbar = ib()\n print(\"\"\"Creation of {0} features in parallel.\n \"\"\".format(len(arr_ptree_rank)*feature_size))\n for count, tree in enumerate(tree_itr):\n pbar(count/feature_size*100)\n for i, graph in enumerate(pTree_generation(tree, arr_ptree_rank)):\n learning_base[i].append(graph)\n if count >= feature_size:\n break\n\n return learning_base\n\n\ndef learning_base_tw2(nb_nodes, arr_tw_rank, feature_size):\n \"\"\"Generate TW2 learning base.\"\"\"\n learning_base = [[] for i in arr_tw_rank]\n for count_rank, rank in enumerate(arr_tw_rank):\n print(\"\\nCreation of the TW class : {0}\".format(rank))\n pbar = ib()\n for step in range(feature_size):\n pbar((step/feature_size)*100)\n is_good = True\n while is_good:\n G = nx.complete_graph(rank)\n while len(G) < nb_nodes:\n node_focus = len(G)\n clique = random_clique(G, rank)\n G.add_node(node_focus)\n for i in clique:\n G.add_edge(node_focus, i)\n if not mmu.robust_iso(G, learning_base[count_rank]):\n is_good = False\n # print(\"TW ==> \"+str(nx.chordal_graph_treewidth(G)))\n # mmd.show_graph(G)\n learning_base[count_rank].append(G)\n\n return learning_base\n\n\ndef learning_base_planar_by_minor_agreg(nb_nodes, feature_size, minor):\n \"\"\"Learning base planar by clique agreg.\"\"\"\n learning_base = [[], []]\n print(\"\\nPlanar class creation\")\n pbar = ib()\n for step in range(feature_size):\n pbar((step/feature_size)*100)\n # Creation of P\n\n # minor creation \n G = nx.Graph(minor)\n edge = rdm.choice(G.edges())\n G.remove_edge(*edge)\n is_good = True\n while is_good:\n G_ = nx.Graph(G)\n G_.add_node(len(G_))\n\n edge = rdm.choice(list(G.edges()))\n G_.add_edge(len(G_) - 1, edge[0])\n G_.add_edge(len(G_) - 1, edge[1])\n\n if pl.is_planar(G_):\n G = nx.Graph(G_)\n if len(G) == nb_nodes:\n is_good = False\n learning_base[0].append(G)\n\n G = nx.Graph(minor)\n is_good = True\n while is_good:\n G_ = nx.Graph(G)\n G_.add_node(len(G_))\n\n edge = rdm.choice(list(G.edges()))\n G_.add_edge(len(G_) - 1, edge[0])\n G_.add_edge(len(G_) - 1, edge[1])\n\n if not pl.is_planar(G_):\n G = nx.Graph(G_)\n if len(G) == nb_nodes:\n is_good = False\n learning_base[1].append(G)\n\n return learning_base\n\n\ndef learning_base_planar(nb_nodes, arr_planar_rank, feature_size):\n \"\"\"Generate Planar base.\"\"\"\n learning_base = [[] for i in arr_planar_rank]\n print(\"\\nPlanar class creation..\")\n pbar = ib()\n for step in range(feature_size):\n pbar((step/feature_size)*100)\n is_good = True\n while is_good:\n G, H = generate_planar_deg(nb_nodes, step % 2)\n if not mmu.robust_iso(G, learning_base[0]) and not mmu.robust_iso(H, learning_base[1]):\n is_good = False\n learning_base[0].append(G)\n learning_base[1].append(H)\n\n return learning_base\n\n\ndef learning_base_rdm(nb_nodes, _, feature_size):\n \"\"\"Generate 0-p/k-p.\"\"\"\n learning_base = [[], []]\n print(\"\\nTest base creation..; 0-P/k-P\")\n learning_base[0] = rdm.sample(list(nx.nonisomorphic_trees(nb_nodes)), feature_size)\n # mmd.show_graph(learning_base[0][0])\n learning_base[1] = agreg_tree(rdm.sample(list(nx.nonisomorphic_trees(nb_nodes)), feature_size), nb_nodes, feature_size)\n\n return learning_base\n\n\ndef learning_base_rdm_tw2(nb_nodes, _, feature_size):\n \"\"\"Generate rdm base TW2.\"\"\"\n learning_base = [[], []]\n tmp = 0\n tree_noniso_list = list(nx.nonisomorphic_trees(nb_nodes))\n print(\"\\nRdm TW2 base creation..\")\n pbar = ib()\n while len(learning_base[0]) < feature_size or len(learning_base[1]) < feature_size:\n graph = agreg_tree([rdm.choice(tree_noniso_list)], nb_nodes, feature_size)[0]\n # print(len(tree_noniso_list))\n if certf_tw2(graph):\n if len(learning_base[0]) < feature_size:\n learning_base[0].append(graph)\n else:\n if len(learning_base[1]) < feature_size:\n learning_base[1].append(graph)\n pbar(((len(learning_base[0])+len(learning_base[1]))/(feature_size*2))*100)\n # tmp = 1 if tmp == 0 else 0\n # print(len(learning_base[0]), len(learning_base[1]), tmp)\n return learning_base\n # mmd.show_graph(graph)\n\n\ndef agreg_tree(arr, nb_nodes, feature_size):\n \"\"\"Agreg tree.\"\"\"\n min_ = nb_nodes - 1\n max_ = int((nb_nodes * min_) / 2)\n\n for count, tree in enumerate(arr):\n rdm_edges_int = rdm.randint(0, max_ - len(tree.edges()))\n for i in range(rdm_edges_int):\n # import ipdb; ipdb.set_trace()\n edge = rdm.choice(nx.complement(tree).edges())\n tree.add_edge(*edge)\n\n return arr\n\n\ndef generate_planar_deg(nb_nodes, side_start):\n \"\"\"Private method for planar generation.\"\"\"\n if side_start % 2 == 0:\n G = rdm.choice(list(nx.nonisomorphic_trees(nb_nodes)))\n edges_action = G.add_edge\n set_choice = nx.non_edges\n else:\n G = nx.complete_graph(nb_nodes)\n edges_action = G.remove_edge\n set_choice = nx.edges\n is_good = True\n while(is_good):\n H = G.copy()\n edges_choice = rdm.choice(list(set_choice(G)))\n edges_action(edges_choice[0], edges_choice[1])\n if((side_start % 2 == 0 and not pl.is_planar(G) or (side_start % 2 == 1 and pl.is_planar(G)))):\n is_good = False\n\n if side_start % 2 == 0:\n return H, G\n else:\n return G, H\n\n\ndef random_clique(G, clique_rank):\n \"\"\"Return random clique of G.\"\"\"\n arr_clique = [i for i in nx.enumerate_all_cliques(G) if len(i) == clique_rank]\n return rdm.choice(arr_clique)\n\n\ndef random_path(G, path_size):\n \"\"\"Return a random path of size n.\"\"\"\n res = []\n arr_nodes = np.arange(0, len(G))\n focus_node = rdm.choice(arr_nodes)\n res.append(focus_node)\n for i in range(path_size-1):\n arr_neig = [i for i in G.neighbors(focus_node) if i not in res]\n focus_node = rdm.choice(arr_neig)\n res.append(focus_node)\n\n return res\n","sub_path":"working_env/Scripts/minerminor/mm_generator.py","file_name":"mm_generator.py","file_ext":"py","file_size_in_byte":11066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"93201068","text":"from distutils.core import setup\nimport os, re\nfrom os.path import join as pjoin\n\n\ndef find_packages(path):\n ret = []\n for root, dirs, files in os.walk(path):\n if '__init__.py' in files:\n ret.append(re.sub('^[^A-z0-9_]+', '', root.replace('/', '.')))\n return ret\n\nreadme = open('README.md').read()\n\nrequirements = [\n 'numpy',\n 'torch >= 0.4.0',\n 'torchvision',\n 'easydict',\n 'massedit',\n 'tensorboard-pytorch',\n 'tensorflow-tensorboard',\n]\n\n\nsetup(\n name='objdect',\n version='0.0.3.2',\n author='duinodu',\n author_email='472365351duino@gmail.com',\n url='https://github.com/duinodu',\n license='MIT',\n description='Unified tool for objdect detection',\n long_description=readme,\n\n packages=find_packages('objdect'),\n scripts=['bin/objdect-new'],\n package_data={'': ['_template/*', \n '_template/model/*',\n '_template/model/layers/*',\n '_template/model/utils/*']},\n\n zip_safe=True,\n install_requires=requirements,\n)\n","sub_path":"pypi_install_script/objdect-0.0.3.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"442988790","text":"#!/usr/bin/env python3\n\n\"\"\"\n\tFile name: fixed_point.py\n\tPython Version: 3.6\n\n\t\tFixed point iteration solver.\n\n\t\tMethod:\n\n\t\t\t1.) Convert function g(x) to root finding form. \n\t\t\t\tWhere f(x*) = 0 is the root finding form of the fixed point problem function g(x*) = x(*).\n\n\t\t\t\t\t\t\t\t\tf(x*) = g(x*) - x* = 0.\n\n\t\t\t2.) Compute the coefficients of the newton interpolating polynomial of f(x)\n\t\t\t\tso the derivative, f'(x), is easy to compute.\n\n\t\t\t\t\t\t\t\t\ty = Ffun(x) - where x is selected nodes\n\t\t\t\t\t\t\t\t\tc = newton_interp.coeffients(x,y)\n\t\t\t\t\t\t\t\t\tc_prime = np.polyder(c)\n\t\t\t\t\t\t\t\t\tJfun = lambda xi: np.polyval(c_prime, xi)\n\n\t\t\t\t\tNote: f(x) = Ffun(x), f'(x) = Jfun(x) in order to match newtons method syntax.\n\n\t\t\t3.) Use Ffun, Jfun and newtons method to find x*, which hopefully is close to Gfun(x*).\n\n\tL.J. Brown\n\tMath5315 @ SMU\n\tFall 2018\n\"\"\"\n\n__filename__ = \"fixed_point.py\"\n__author__ = \"L.J. Brown\"\n\n# internal libraries\nimport logging\n\n# external libraries\nimport numpy as np\n\n# mylib libraries\nfrom newton_interp import *\nfrom newton2 import *\n\n# initilize logger\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef root_finding_form(Gfun, n=10):\n\t\"\"\"\n\t\tConvert the fixed point problem function to a form for previously defined newton method, method\n\t\treturns Ffun, Jfun for use in newtons method:\n\n\t\t\tuse:\n\t\t\t\tFfun, Jfun = root_finding_form(Gfun)\n\t\t\t\tx_traget = newton(Ffun, Jfun, x, maxit, Srtol, Satol, Rrtol, Ratol, output)\n\n\t\t1.) Convert function g(x) to root finding form, 'f(x*) = g(x*) - x* = 0', to find Ffun for newtons method.\n\t\t2.) Constructing a newton interpolating polynomial and find its derivative to find Jfun for newtons method.\n\n\t\t:param Gfun: fixed point problem function.\n\t\t:param n: 'int' of number of nodes given when constructing newton interpolating polynomial, defualt 10.\n\t\t:returns: Ffun, Jfun to be used as parameters in newtons method defined in the file 'newton.py'.\n\t\"\"\"\n\n\t#\n\t# \tConvert to root finding problem\n\t#\n\n\t# function to convert to root finding problem given g(x). 'g(x*) = x*' -> 'f(x*) = 0'\n\troot_finding_conversion = lambda Gfun: lambda x: Gfun(x) -x \n\n\t# convert\n\tFfun = root_finding_conversion(Gfun)\n\n\t#\n\t# \tSelect nodes for constructing newton interpolating polynomial. \n\t#\n\t# \t\tNote: Currently linearly spaced evalutations of f(x).\n\t# \t\tTODO: Use chebyshev nodes.\n\t#\n\n\t# compute x and y data points\n\tx = np.linspace(-1,1,n)\n\ty = Ffun(x)\n\n\t# compute coefficients of interpolating polynomial\n\tc = coeffients(x,y)\n\n\t# compute coefficients for first derivative of p with coefficients c\n\tc_prime = np.polyder(c)\n\n\t# construct Jfun lambda function for derivative of interpolating polynomial\n\t# to use in newtons method\n\tJfun = lambda xi: np.polyval(c_prime, xi)\n\n\treturn Ffun, Jfun\n\n\ndef fixed_point(Gfun, x, maxit, rtol, atol, output=True):\n\n\t# convert Gfun to parameters useful in newtons method newton, \n\t# newton(Ffun, Jfun, x, maxit, rtol, atol, output)\n\tFfun, Jfun = root_finding_form(Gfun)\n\n\t# find and return x*\n\treturn newton2(Ffun, Jfun, x, maxit, rtol, atol, output=output)\n\n\n# testing\nif \"__main__\" in __name__:\n\n\tSHOW_OUTPUT = False\n\n\t# log SHOW_OUTPUT value\n\tlogger.info(\"\\n\\nSHOW_OUTPUT set to %s.\\n\\n\" % SHOW_OUTPUT)\n\n\t# test functions:\n\n\t# use: \n\t# Gfun_a = test_functions['Gfun_a']\n\t# Ffun_a = root_finding_conversion(Gfun_a)\n\n\tfixed_point_functions = {\n\n\t\t'Gfun_a' : lambda x: (x**2)/4 -x/2 -1,\n\t\t'Gfun_b' : lambda x: np.cos(x),\n\t\t'Gfun_c' : lambda x: (x/3 +x)/2,\n\t\t'Gfun_d' : lambda x: np.cosh(x)/x -np.arctan(x)\n\t}\n\n\t# intitial guesses:\n\n\tinitial_guesses = {\n\t\t'Gfun_a' : 2,\n\t\t'Gfun_b' : 2,\n\t\t'Gfun_c' : 2,\n\t\t'Gfun_d' : 2\n\t}\n\n\t# For all problems use an absolute solution tolerance of 10−5, \n\t# a relative solution tolerance of 10−10, \n\t# allow a maximum of 100 iterations.\n\tmaxit = 100\n\tatol = 10**(-5)\n\trtol = 10**(-10)\n\n\t# interp_representation_error\n\tinterp_representation_error = lambda Gfun, x_final: abs(Gfun(x_final) - x_final)\n\n\t# run trials\n\tfor Gfun_name, Gfun in fixed_point_functions.items():\n\n\t\tGfun = fixed_point_functions[Gfun_name]\n\t\tx = initial_guesses[Gfun_name]\n\n\t\t# convert Gfun to parameters useful in newtons method newton, \n\t\t# newton(Ffun, Jfun, x, maxit, rtol, atol, output)\n\t\tFfun, Jfun = root_finding_form(Gfun)\n\n\t\t# find x*\n\t\tx_target = newton2(Ffun, Jfun, x, maxit, rtol, atol, output=SHOW_OUTPUT)\n\n\t\t# display interp_representation_error\n\t\terror = interp_representation_error(Gfun, x)\n\t\tlogger.info(\"\\n\\nFinal fixed point target error using newton interpolating polynomial in place of Ffun.\\n \\\n\t\t\t\\n\\tFunction name: %s, \\n\\n\\t\\t |Gfun(x) - x| = %s.\\n\" % (Gfun_name, error))\n\n\n","sub_path":"hw2_v2/fixed_point.py","file_name":"fixed_point.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"115504247","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import joinedload, load_only\n\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import Project, Release\nfrom warehouse.xml import XML_CSP\n\n\n@view_config(\n route_name=\"rss.updates\",\n renderer=\"rss/updates.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef rss_updates(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"latest_releases\": latest_releases}\n\n\n@view_config(\n route_name=\"rss.packages\",\n renderer=\"rss/packages.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef rss_packages(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n newest_projects = (\n request.db.query(Project)\n .options(load_only(\"created\", \"normalized_name\"))\n .options(joinedload(Project.releases, innerjoin=True)\n .load_only(\"summary\"))\n .order_by(Project.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"newest_projects\": newest_projects}\n","sub_path":"warehouse/rss/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"370199640","text":"from decimal import Decimal\nfrom flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileRequired\nfrom wtforms import TextField, DecimalField, SelectField\nfrom wtforms.validators import InputRequired, NumberRange, ValidationError\nfrom wtforms.widgets import html_params, Select, HTMLString\nfrom flask_wtf import Form\nfrom flask_babel import lazy_gettext as _\nfrom my_app import db\n\n\nclass Product(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255))\n price = db.Column(db.Float)\n category_id = db.Column(db.Integer, db.ForeignKey('category.id'))\n category = db.relationship(\n 'Category', backref=db.backref('products', lazy='dynamic')\n )\n image_path = db.Column(db.String(255))\n user_timezone = db.Column(db.String(255))\n\n def __init__(self, name, price, category, image_path, user_timezone=''):\n self.name = name\n self.price = price\n self.category = category\n self.image_path = image_path\n self.user_timezone = user_timezone\n\n def __repr__(self):\n return '' % self.id\n\n\nclass Category(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100))\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return '' % self.id\n\n\nclass NameForm(FlaskForm):\n name = TextField(_('Name'), validators=[InputRequired()])\n\n\nclass CustomCategoryInput(Select):\n\n def __call__(self, field, **kwargs):\n kwargs.setdefault('id', field.id)\n html = []\n for val, label, selected in field.iter_choices():\n html.append(\n ' %s' % (\n html_params(\n name=field.name, value=val, checked=selected, **kwargs\n ), label\n )\n )\n return HTMLString(' '.join(html))\n\n\nclass CategoryField(SelectField):\n widget = CustomCategoryInput()\n\n def iter_choices(self):\n categories = [(c.id, c.name) for c in Category.query.all()]\n for value, label in categories:\n yield (value, label, self.coerce(value) == self.data)\n\n def pre_validate(self, form):\n for v, _ in [(c.id, c.name) for c in Category.query.all()]:\n if self.data == v:\n break\n else:\n raise ValueError(self.gettext('Not a valid choice'))\n\n\nclass ProductForm(NameForm):\n price = DecimalField(_('Price'), validators=[\n InputRequired(), NumberRange(min=Decimal('0.0'))\n ])\n category = CategoryField(\n _('Category'), validators=[InputRequired()], coerce=int\n )\n image = FileField(_('Product Image'), validators=[FileRequired()])\n\n\ndef check_duplicate_category(case_sensitive=True):\n def _check_duplicate(form, field):\n if case_sensitive:\n res = Category.query.filter(\n Category.name.like('%' + field.data + '%')\n ).first()\n else:\n res = Category.query.filter(\n Category.name.ilike('%' + field.data + '%')\n ).first()\n if res:\n raise ValidationError(\n 'Category named %s already exists' % field.data\n )\n return _check_duplicate\n\n\nclass CategoryForm(NameForm):\n name = TextField(_('Name'), validators=[\n InputRequired(), check_duplicate_category()\n ])\n","sub_path":"Chapter11/my_app/catalog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"432726582","text":"import os\nimport pandas as pd\n\nfrom ChildProject.projects import ChildProject\nfrom ChildProject.pipelines.samplers import PeriodicSampler\nfrom ChildProject.pipelines.zooniverse import ZooniversePipeline, pad_interval\n\n\ndef test_padding():\n assert pad_interval(300, 800, 500, 1) == (300, 800)\n assert pad_interval(200, 800, 500, 2) == (0, 1000)\n\n assert pad_interval(300, 900, 500, 1) == (100, 1100)\n\n assert pad_interval(2000, 2500, 100, 10) == (1750, 2750)\n\n assert pad_interval(100, 300, 500, 1) == (-50, 450)\n\n\ndef test_extraction():\n os.makedirs(\"output/zooniverse\", exist_ok=True)\n\n project = ChildProject(\"examples/valid_raw_data\")\n project.read()\n\n sampler = PeriodicSampler(project, 500, 500, 250)\n segments = sampler.sample()\n sampler.segments.to_csv(\"output/zooniverse/sampled_segments.csv\")\n\n zooniverse = ZooniversePipeline()\n\n chunks, parameters = zooniverse.extract_chunks(\n path=project.path,\n destination=\"output/zooniverse\",\n keyword=\"test\",\n segments=\"output/zooniverse/sampled_segments.csv\",\n chunks_length=250,\n chunks_min_amount=2,\n spectrogram=True,\n )\n\n chunks = pd.read_csv(chunks)\n\n assert len(chunks) == 2 * len(segments)\n assert all(\n chunks[\"wav\"]\n .apply(lambda f: os.path.exists(os.path.join(\"output/zooniverse/chunks\", f)))\n .tolist()\n )\n assert all(\n chunks[\"mp3\"]\n .apply(lambda f: os.path.exists(os.path.join(\"output/zooniverse/chunks\", f)))\n .tolist()\n )\n assert all(\n chunks[\"png\"]\n .apply(lambda f: os.path.exists(os.path.join(\"output/zooniverse/chunks\", f)))\n .tolist()\n )\n\n","sub_path":"tests/test_zooniverse.py","file_name":"test_zooniverse.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"320936139","text":"import os\nimport sys\nimport torch\nimport argparse\nfrom Training import VAETrainer\nfrom Models import VAEModel, VAELossModel\nfrom Dataset import get_stream_vae\nfrom tqdm import tqdm\nfrom torch import nn\nimport numpy as np\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\nfrom src import DATA_DIR, MODELS_DIR, LOG_DIR\n\nif __name__=='__main__':\n\tparser = argparse.ArgumentParser(description='Train deep protein folder')\n\tparser.add_argument('-experiment', default='VAETest', help='Experiment name')\n\t\n\tparser.add_argument('-image_model', default='Simple', help='Image prediction model')\n\tparser.add_argument('-dataset_dir', default='', help='Image prediction model')\n\t\t\t\n\tparser.add_argument('-load_epoch', default=None, help='Max epoch', type=int)\n\t\n\n\targs = parser.parse_args()\n\n\ttorch.cuda.set_device(0)\n\t\n\tEXP_DIR = os.path.join(LOG_DIR, args.experiment)\n\tMDL_DIR = os.path.join(MODELS_DIR, args.experiment)\n\ttry:\n\t\tos.mkdir(EXP_DIR)\n\texcept:\n\t\tpass\n\ttry:\n\t\tos.mkdir(MDL_DIR)\n\texcept:\n\t\tpass\n\n\t\n\timage_model = VAEModel().cuda()\n\tloss_model = VAELossModel().cuda()\n\n\ttrainer = VAETrainer(\timage_model = image_model,\n\t\t\t\t\t\t\tloss_model = loss_model,\n\t\t\t\t\t\t\tlr=0.0)\n\t\t\n\tepoch = 0\n\tif args.load_epoch is None:\n\t\tfor filename in os.listdir(os.path.join(MDL_DIR)):\n\t\t\tif filename.find('epoch')!=-1:\n\t\t\t\tepoch_num = filename[filename.find('epoch') + len('epoch'):filename.rfind('.')]\n\t\t\t\tif int(epoch_num)>epoch:\n\t\t\t\t\tepoch = int(epoch_num)\n\t\ttrainer.load_models(epoch, MDL_DIR)\n\telse:\n\t\ttrainer.load_models(args.load_epoch, MDL_DIR)\n\t\tepoch = args.load_epoch\n\tprint('Loaded from epoch = ', epoch)\n\t\n\tdata_path = os.path.join(DATA_DIR, args.dataset_dir)\n\tif not os.path.exists(data_path):\n\t\traise(Exception(\"dataset not found\", data_path))\n\t\n\tstream_valid = get_stream_vae(data_path, 'video_set.dat')\n\t\n\ttrainer.new_log(os.path.join(EXP_DIR,\"test_loss.dat\"), log_dir=os.path.join(EXP_DIR,'video'))\n\tfor data in tqdm(stream_valid):\n\t\ttrainer.predict(data)\n\t\t\n","sub_path":"src/video_vae.py","file_name":"video_vae.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"335551172","text":"\"\"\"\r\n\r\ninput (full summary):\r\nresults/meeting/ami/test/tixier/[MSC parameter id]/ES2004a_tixier.txt\r\n\r\noutput (summaries with varies budgets):\r\n/tmp/takahe/rouge2.0-distribution_tixier_6_13_152/test-summarization/system/ES2004a_tixier-50.txt\r\n\r\noutput (grid search csv):\r\nresults/params_submodularity.csv\r\n\r\noutput (ROUGE score)\r\ntixier_evaluation.csv\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport time\r\nimport csv\r\nimport string\r\nimport re\r\nimport copy\r\nimport gensim\r\nimport utils\r\nimport shutil\r\nimport numpy as np\r\nimport submodularity\r\nimport core_rank as cr\r\nimport multiprocessing\r\nfrom data.meeting import meeting_lists\r\nfrom sklearn.model_selection import ParameterGrid\r\n\r\ndef worker(idx, submodularity_param):\r\n # make a copy of rouge folder for each worker\r\n path_to_rouge_of_worker = '/tmp/takahe/rouge2.0-distribution_' + system_name + '_' + str(corpus_id) + '_' + str(MSC_param_id) + '_' + str(submodularity_param['index']) + '/'\r\n path_to_system_of_worker = path_to_rouge_of_worker + 'test-summarization/system/'\r\n path_to_results_csv_of_worker = path_to_rouge_of_worker + 'results.csv'\r\n if os.path.exists(path_to_rouge_of_worker):\r\n # clean system folder\r\n shutil.rmtree(path_to_system_of_worker)\r\n os.mkdir(path_to_system_of_worker)\r\n # clean existing results.csv\r\n if os.path.exists(path_to_results_csv_of_worker):\r\n os.remove(path_to_results_csv_of_worker)\r\n else:\r\n shutil.copytree(path_to_rouge, path_to_rouge_of_worker)\r\n\r\n # ##########################\r\n # ### LOOP OVER MEETINGS ###\r\n # ##########################\r\n for meeting_id in ids:\r\n # print(\"\\t\\t\\t worker:\", worker_id, \"meeting_id:\", meeting_id)\r\n\r\n for summary_size in summary_size_range:\r\n # print(\"\\t\\t\\t\\tsummary_size:\", summary_size)\r\n\r\n cut = '\\n'.join(submodularity.sentence_extraction_submodularity(\r\n summary_of_meeting[meeting_id],\r\n summary_stemmed_of_meeting[meeting_id],\r\n list(core_rank_scores_of_meeting[meeting_id]),\r\n # round up to avoid carrying many decimals (to improve efficiency)\r\n np.round(np.array(list(core_rank_scores_of_meeting[meeting_id].values())) // sum(list(core_rank_scores_of_meeting[meeting_id].values())), 4),\r\n to_stem=False,\r\n budget=summary_size,\r\n scaling_factor=submodularity_param['scaling_factor'],\r\n weighted_sum_concepts=True,\r\n negative_terms=None,\r\n lamda=submodularity_param['lamda'],\r\n kmeans_diversity_score=True,\r\n kmeans_clusters_dict=kmeans_clusters_dict_of_meeting[meeting_id]\r\n ))\r\n\r\n cut = ''.join([l for l in cut if l not in string.punctuation])\r\n\r\n f = open(path_to_system_of_worker + meeting_id + '_' + system_name + '-' + str(summary_size) + '.txt', 'w')\r\n f.write(cut)\r\n f.close()\r\n\r\n # #############\r\n # ### ROUGE ###\r\n # #############\r\n # run rouge2.0.jar\r\n os.chdir(path_to_rouge_of_worker)\r\n code = os.system('java -jar rouge2.0.jar > /dev/null')\r\n if code != 0:\r\n raise RuntimeError()\r\n\r\n # read results.csv\r\n with open(path_to_results_csv_of_worker) as f:\r\n results = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)]\r\n\r\n scores = {\r\n str(summary_size): {'Avg_Precision': [], 'Avg_Recall': [], 'Avg_F-Score': []}\r\n for summary_size in summary_size_range\r\n }\r\n\r\n for result in results:\r\n summary_size = result['System Name'].split('-')[1].split('.')[0]\r\n scores[summary_size]['Avg_Precision'].append(float(result['Avg_Precision']))\r\n scores[summary_size]['Avg_Recall'].append(float(result['Avg_Recall']))\r\n scores[summary_size]['Avg_F-Score'].append(float(result['Avg_F-Score']))\r\n\r\n # for each summary_size, get the average score over different meetings\r\n for summary_size in summary_size_range:\r\n for key in ['Avg_Precision', 'Avg_Recall', 'Avg_F-Score']:\r\n scores[str(summary_size)][key] = np.mean(scores[str(summary_size)][key])\r\n\r\n # evaluation score correspond to current submodularity_param\r\n overall_evaluation_score = np.mean(\r\n [scores[str(summary_size)]['Avg_F-Score'] for summary_size in summary_size_range])\r\n\r\n print(\"\\t\\tsubmodularity_param id:\", submodularity_param['index'])\r\n return overall_evaluation_score, scores\r\n\r\n\r\ndomain = 'meeting' # meeting\r\ndataset_id = 'ami' # ami, icsi\r\nlanguage = 'en' # en, fr\r\ndevelopment_or_test = 'test' # development / test\r\n\r\n# #########################\r\n# ### RESOURCES LOADING ###\r\n# #########################\r\nif domain == 'meeting':\r\n path_to_stopwords = 'resources/stopwords/meeting/stopwords.' + language + '.dat'\r\n stopwords = utils.load_stopwords(path_to_stopwords)\r\n\r\n if dataset_id == 'ami':\r\n ids = meeting_lists.ami_development_set \\\r\n if development_or_test == 'development' \\\r\n else meeting_lists.ami_test_set\r\n elif dataset_id == 'icsi':\r\n ids = meeting_lists.icsi_development_set \\\r\n if development_or_test == 'development' \\\r\n else meeting_lists.icsi_test_set\r\n\r\nif language == 'en':\r\n path_to_wv = 'resources/GoogleNews-vectors-negative300.bin.gz'\r\n\r\n# Load Word2Vec (takes approx. 8G RAM)\r\nprint(\"loading GoogleNews...\")\r\nstart = time.time()\r\n# vectors = Word2Vec(size=3e2, min_count=1)\r\n# vectors.build_vocab([item for sublist in lists_of_tokens.values() for item in sublist])\r\n# vectors.intersect_word2vec_format(path_to_wv, binary=True)\r\nwv = gensim.models.KeyedVectors.load_word2vec_format(path_to_wv, binary=True)\r\n# vectors = Word2Vec.load_word2vec_format(path_to_wv, binary=True)\r\nprint(\"finish loading GoogleNews, time_cost = %.2fs\" % (time.time() - start))\r\n\r\n# #############\r\n# ### ROUGE ###\r\n# #############\r\npath_to_rouge = 'rouge2.0-distribution/'\r\n\r\n# clean existing system folder\r\nif os.path.exists(path_to_rouge + 'test-summarization/system/'):\r\n shutil.rmtree(path_to_rouge + 'test-summarization/system/')\r\nos.mkdir(path_to_rouge + 'test-summarization/system/')\r\n\r\n# clean existing results.csv\r\nif os.path.exists(path_to_rouge + 'results.csv'):\r\n os.remove(path_to_rouge + 'results.csv')\r\n\r\n# clean existing rouge folder copy of workers\r\nif os.path.exists('/tmp/takahe/'):\r\n shutil.rmtree('/tmp/takahe/')\r\n\r\n# Run previously copy_ami_icsi_reference.py to copy human written summaries to rouge's reference folder\r\n\r\n# #####################################\r\n# ### COMMUNITY CREATION PARAMETERS ###\r\n# #####################################\r\npath = 'data/' + dataset_id + '_params_create_community.csv'\r\nwith open(path) as f:\r\n corpus_params_dict = {row['index']: {k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)}\r\n\r\n# ######################\r\n# ### MSC PARAMETERS ###\r\n# ######################\r\nsystem_name_list = ['filippova', 'boudin', 'mehdad', 'tixier']\r\nMSC_system_params_dict = {}\r\n\r\nfor system_name in system_name_list:\r\n path = 'results/' + system_name + '_params_MSC_' + development_or_test + '.csv'\r\n with open(path) as f:\r\n MSC_system_params_dict[system_name] = {row['index']: {k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)}\r\n\r\n# ################################\r\n# ### SUBMODULARITY PARAMETERS ###\r\n# ################################\r\nsummary_size_range = range(50, 550, 50)\r\n\r\nparam_grid = {\r\n 'lamda' : np.arange(0, 1.1, 0.1),\r\n 'scaling_factor': np.arange(0, 2.1, 0.1),\r\n}\r\nsubmodularity_params = list(ParameterGrid(param_grid))\r\nfor i in range(len(submodularity_params)):\r\n submodularity_params[i]['index'] = i\r\n\r\n# # save indexed parameter grid\r\n# keys = list(submodularity_params[0])\r\n# with open('results/' + 'params_submodularity.csv', 'w') as output_file:\r\n# dict_writer = csv.DictWriter(output_file, keys)\r\n# dict_writer.writeheader()\r\n# dict_writer.writerows(submodularity_params)\r\n\r\n# ######################\r\n# ### EVALUATION CSV ###\r\n# ######################\r\nfor system_name in system_name_list:\r\n with open('results/' + system_name + '_evaluation.csv', \"w\") as f:\r\n f.write('index_step1,index_step2,index_step3,overall_score')\r\n for key in ['Avg_F-Score', 'Avg_Precision', 'Avg_Recall']:\r\n for summary_size in summary_size_range:\r\n f.write(',' + str(summary_size) + '_' + key)\r\n f.write('\\n')\r\n\r\n# ###############################################\r\n# ### LOOP OVER COMMUNITY CREATION PARAMETERS ###\r\n# ###############################################\r\ncorpus_id_range = range(0, 9)\r\n\r\nfor corpus_id in corpus_id_range:\r\n start = time.time()\r\n\r\n print(str(corpus_id_range.index(corpus_id)) + '/' + str(len(corpus_id_range) - 1), \"corpus:\", dataset_id + '_' + str(corpus_id))\r\n\r\n # #########################\r\n # ### LOOP OVER SYSTEMS ###\r\n # #########################\r\n for system_name in system_name_list:\r\n print(system_name)\r\n\r\n # ################################\r\n # ### LOOP OVER MSC PARAMETERS ###\r\n # ################################\r\n for MSC_param_id in range(len(MSC_system_params_dict[system_name])):\r\n print(\"\\tMSC_param_id:\", MSC_param_id)\r\n\r\n # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n # only run on best parameter found in UCD, MSC, BSM steps\r\n if system_name == 'filippova':\r\n if corpus_id == 6 and MSC_param_id == 3: # ami\r\n # if corpus_id == 5 and param_id == 4: # icsi\r\n submodularity_params_single = [submodularity_params[68]] # ami\r\n # submodularity_params_single = [submodularity_params[21]] # icsi\r\n pass\r\n else:\r\n continue\r\n elif system_name == 'boudin':\r\n if corpus_id == 6 and MSC_param_id == 2: # ami\r\n # if corpus_id == 5 and param_id == 3: # icsi\r\n submodularity_params_single = [submodularity_params[51]] # ami\r\n # submodularity_params_single = [submodularity_params[67]] # icsi\r\n pass\r\n else:\r\n continue\r\n elif system_name == 'mehdad':\r\n if corpus_id == 3 and MSC_param_id == 0: # ami\r\n # if corpus_id == 6 and param_id == 2: # icsi\r\n submodularity_params_single = [submodularity_params[94]] # ami\r\n # submodularity_params_single = [submodularity_params[45]] # icsi\r\n pass\r\n else:\r\n continue\r\n elif system_name == 'tixier':\r\n if corpus_id == 6 and MSC_param_id == 13: # ami\r\n # if corpus_id == 4 and param_id == 16: # icsi\r\n submodularity_params_single = [submodularity_params[152]] # ami\r\n # submodularity_params_single = [submodularity_params[0]] # icsi\r\n pass\r\n else:\r\n continue\r\n # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n\r\n # remove_stopwords = True if MSC_system_params_dict[system_name][str(MSC_param_id)]['remove_stopwords'] == 'True' else False\r\n remove_stopwords = True\r\n # pos_filtering = True if MSC_system_params_dict[system_name][str(MSC_param_id)]['pos_filtering'] == 'True' else False\r\n pos_filtering = False\r\n # stemming = True if MSC_system_params_dict[system_name][str(MSC_param_id)]['stemming'] == 'True' else False\r\n stemming = True\r\n # cr_w = int(MSC_system_params_dict[system_name][str(MSC_param_id)]['cr_w'])\r\n cr_w = 12\r\n # cr_weighted = True if MSC_system_params_dict[system_name][str(MSC_param_id)]['cr_weighted'] == 'True' else False\r\n cr_weighted = True\r\n # cr_overspanning = True if MSC_system_params_dict[system_name][str(MSC_param_id)]['cr_overspanning'] == 'True' else False\r\n cr_overspanning = True\r\n\r\n # #######################################################\r\n # ### RESULTS LOADING, CORERANK AND KMEANS CLUSTERING ###\r\n # #######################################################\r\n summary_of_meeting = {}\r\n summary_stemmed_of_meeting = {}\r\n core_rank_scores_of_meeting = {}\r\n kmeans_clusters_dict_of_meeting = {}\r\n\r\n for meeting_id in ids:\r\n path = 'data/utterance/' + domain + '/manual/' + dataset_id + '_' + str(corpus_id) + '/' +\\\r\n meeting_id + '_utterances.txt'\r\n with open(path, 'r+') as f:\r\n utterances = f.read().splitlines()\r\n\r\n # get CoreRank scores dict\r\n lists_of_terms = []\r\n for sentence in utterances:\r\n lists_of_terms.append(\r\n utils.clean_text(\r\n copy.copy(sentence), stopwords=stopwords, remove_stopwords=remove_stopwords,\r\n pos_filtering=pos_filtering, stemming=stemming,\r\n lower_case=True\r\n # lower_case for CoreRank\r\n )\r\n )\r\n core_rank_scores_of_meeting[meeting_id] = cr.get_core_rank_scores(lists_of_terms, window_size=cr_w, overspanning=cr_overspanning, weighted=cr_weighted)\r\n\r\n # Kmeans clustering\r\n terms = list(set([item for sublist in lists_of_terms for item in sublist]))\r\n kmeans_clusters_dict, X = utils.cluster_words(terms, wv, num_cluster=60)\r\n kmeans_clusters_dict_of_meeting[meeting_id] = kmeans_clusters_dict\r\n # optimal_k_clusters(X, range(0, X.shape[0], 10)[1:], meeting_id, system_name[i])\r\n\r\n path = 'results/' + domain + '/' + dataset_id + '_' + str(corpus_id) + '/' + development_or_test + '/' \\\r\n + system_name + '/' + str(MSC_param_id) + '/' + meeting_id + '_' + system_name + '.txt'\r\n with open(path, 'r+') as f:\r\n summary = f.read().splitlines()\r\n\r\n summary = [re.sub(' +', ' ', sentence).strip().lower().split(' ') for sentence in summary]\r\n summary_of_meeting[meeting_id] = summary\r\n\r\n summary_stemmed = []\r\n for sentence in summary:\r\n summary_stemmed.append(\r\n utils.clean_text(\r\n ' '.join(sentence), stopwords=stopwords, remove_stopwords=False,\r\n pos_filtering=False, stemming=stemming,\r\n lower_case=True\r\n )\r\n )\r\n summary_stemmed_of_meeting[meeting_id] = summary_stemmed\r\n\r\n # #################################################\r\n # ### MULTIPROCESSING OVER SUBMODULARITY PARAMS ###\r\n # #################################################\r\n start_submodularity = time.time()\r\n pool = multiprocessing.Pool()\r\n pool_results = []\r\n\r\n for idx, submodularity_param in enumerate(submodularity_params_single):\r\n pool_results.append(\r\n pool.apply_async(worker, args=(idx, submodularity_param))\r\n )\r\n\r\n pool.close()\r\n pool.join()\r\n print(\"time_cost = %.2fs\" % (time.time() - start_submodularity))\r\n\r\n # get results from pool\r\n overall_evaluation_scores_of_submodularity_params = []\r\n scores_of_submodularity_params = []\r\n for pool_result in pool_results:\r\n overall_evaluation_score, scores = pool_result.get()\r\n overall_evaluation_scores_of_submodularity_params.append(overall_evaluation_score)\r\n scores_of_submodularity_params.append(scores)\r\n\r\n # ---- Output all ----\r\n with open(system_name + '_evaluation.csv', \"a\") as f:\r\n for idx, submodularity_param in enumerate(submodularity_params_single):\r\n f.write(\r\n str(corpus_id) + ',' +\r\n str(MSC_param_id) + ',' +\r\n str(submodularity_param['index']) + ',' +\r\n str(overall_evaluation_scores_of_submodularity_params[idx])\r\n )\r\n\r\n for key in ['Avg_F-Score', 'Avg_Precision', 'Avg_Recall']:\r\n for summary_size in summary_size_range:\r\n f.write(',')\r\n f.write(str(scores_of_submodularity_params[idx][str(summary_size)][key]))\r\n f.write('\\n')\r\n\r\n # ---- output the best ----\r\n # # select the best submodularity_param based on the best overall_evaluation_score\r\n # index = utils.max_index(overall_evaluation_scores_of_submodularity_params)\r\n # best_submodularity_param = submodularity_params[index]\r\n # best_overall_evaluation_score = overall_evaluation_scores_of_submodularity_params[index]\r\n # best_scores = scores_of_submodularity_params[index]\r\n #\r\n # # keep final param tuning results\r\n # index_of_community_creation_param = corpus_id\r\n # index_of_MSC_param = MSC_param_id\r\n # index_of_submodularity_param = index\r\n #\r\n # with open(system_name + '_evaluation.csv', \"a\") as f:\r\n # f.write(\r\n # str(index_of_community_creation_param) + ',' +\r\n # str(index_of_MSC_param) + ',' +\r\n # str(index_of_submodularity_param) + ',' +\r\n # str(best_overall_evaluation_score)\r\n # )\r\n #\r\n # for key in ['Avg_F-Score', 'Avg_Precision', 'Avg_Recall']:\r\n # for summary_size in summary_size_range:\r\n # f.write(',')\r\n # f.write(str(best_scores[str(summary_size)][key]))\r\n # f.write('\\n')\r\n\r\n print(\"time_cost = %.2fs\" % (time.time() - start))\r\n\r\n\r\n","sub_path":"budgeted_submodular_maximization_single.py","file_name":"budgeted_submodular_maximization_single.py","file_ext":"py","file_size_in_byte":18528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"496065683","text":"def input_info():\n \"\"\"\n Input info aboat triangle\n \"\"\"\n a, b, c, tch = map(float, input(\"Input info: \").split())\n # search for similar sides of triangle\n if a == b:\n return a, c, tch\n if a == c:\n return a, b, tch\n if c == b:\n return b, a, tch\n\n\ndef res(a, c, tch):\n \"\"\"\n Search for side of square\n \"\"\"\n res = c * (a * a - c * c / 4) ** 0.5 / ((a * a - c * c / 4) ** 0.5 + c )\n return res\n\n\ndef write_file(res):\n \"\"\"\n Write result to file\n \"\"\"\n with open(\"result.txt\", \"w\") as file:\n file.write(str(res))\n\n\ndef main_program():\n a, b, tch = input_info()\n result = res(a, b, tch)\n print(result)\n write_file(result)\n\n\nmain_program()\n","sub_path":"n1.py","file_name":"n1.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"562009110","text":"from classes.Robots import BiscointRobot\nimport unittest\nfrom unittest.mock import Mock\n\nBiscointRobot = Mock()\n\ndef fake_get_offer(op, amount, is_quote):\n return {'offerId': 'EtRb8i2YTzDBWDHiQ', 'base': 'BTC', 'quote': 'BRL', 'op': 'buy', 'isQuote': True, 'baseAmount': '0.00014887', 'quoteAmount': '50.00', 'efPrice': '335863.51', 'createdAt': '2021-10-16T20:15:00.109Z', 'expiresAt': '2021-10-16T20:15:15.109Z', 'apiKeyId': 'moeGJ9tQ6TRGkJN8c'}\n\nclass TestRobot(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(TestRobot, self).__init__(*args, **kwargs)\n self.robot = BiscointRobot()\n self.robot.get_offer.side_effect = fake_get_offer\n \n\n def test_robot_get_offer(self):\n offer = self.robot.get_offer(op='buy', amount='50',is_quote=True)\n self.assertIsNotNone(offer)\n self.assertIn('offerId', offer)\n self.assertIn('base', offer)\n self.assertIn('quote', offer)\n self.assertIn('baseAmount', offer)\n self.assertIn('quoteAmount', offer)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"114889808","text":"class FieldValidator():\n\n @classmethod\n def validate_digit(self, field: str, field_name: str):\n while not field.isdigit():\n field = input(f'field \\'{field_name}\\' must be digits only. New input:')\n\n field = int(field)\n return field\n\n","sub_path":"utils/field_validator.py","file_name":"field_validator.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"268246099","text":"import time\n\nfrom TestData.Data import Testdata\nfrom Utilities.BaseClass import BaseClass\nfrom Pages.Warehouse_Asset_List import Warehouse_List\n\n\nclass Test_Thirteen(BaseClass):\n def test_asset_list(self):\n log = self.getlogger() # For log file\n Order = Warehouse_List(self.driver) # Call the page class\n log.info(\"Login into system\")\n Order.login()\n log.info(\"Click on the menu link\")\n Order.get_side_menu()\n log.info(\"Click on the menu list\")\n # First case\n Order.get_menu_list()\n log.info(\"Click on the asset button\")\n Order.get_asset_button()\n log.info(\"Click on the Find Option button\")\n Order.get_find_option()\n log.info(\"Click on the asset button\")\n Order.get_asset_id()\n time.sleep(3)\n log.info(\"Pass the value and click on find button\")\n Order.get_asset_find_button()\n Create_title = Order.get_page_title()\n assert Create_title == Testdata.asset_title\n # Second case\n Order.get_xls()\n Order.get_csv()\n time.sleep(3)\n","sub_path":"Tests/test_13_WH_Asset_List.py","file_name":"test_13_WH_Asset_List.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"}